repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/tests/python/unittest/test_ir_type.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test type nodes in the IR"""
import tvm
def check_json_roundtrip(node):
json_str = tvm.ir.save_json(node)
back = tvm.ir.load_json(json_str)
assert tvm.ir.structural_equal(back, node, map_free_vars=True)
def test_prim_type():
x = tvm.ir.PrimType("int32")
assert isinstance(x, tvm.ir.PrimType)
assert x.dtype == "int32"
def test_tensor_type_bad_constructor():
try:
x = tvm.ir.TensorType("xx", "xx")
except tvm.error.TVMError:
pass
def test_tensor_type():
shape = tvm.runtime.convert([1, 2, 3])
dtype = "float32"
tt = tvm.ir.TensorType(shape, dtype)
assert tt.dtype == dtype
assert tt.shape == shape
assert tt.span == None
str(tt)
check_json_roundtrip(tt)
def test_type_param():
tp = tvm.ir.TypeVar("name", tvm.ir.TypeKind.Type)
assert tp.kind == tvm.ir.TypeKind.Type
# assert tp.span # TODO allow us to set span
str(tp)
check_json_roundtrip(tp)
def test_func_type():
type_params = tvm.runtime.convert([])
type_constraints = tvm.runtime.convert([]) # TODO: fill me in
arg_types = tvm.runtime.convert([])
ret_type = tvm.ir.TensorType((1, 2, 3), "float32")
tf = tvm.ir.FuncType(arg_types, ret_type, type_params, type_constraints)
assert tf.type_params == type_params
assert tf.type_constraints == type_constraints
assert tf.arg_types == arg_types
assert tf.ret_type == ret_type
assert tf.span == None
# TODO make sure we can set span
str(tf)
check_json_roundtrip(tf)
def test_tuple_type():
tp = tvm.ir.TypeVar("tp", tvm.ir.TypeKind.Type)
tf = tvm.ir.FuncType([], tvm.ir.TupleType([]), [], [])
tt = tvm.ir.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
fields = tvm.runtime.convert([tp, tf, tt])
tup_ty = tvm.ir.TupleType(fields)
assert tup_ty.fields == fields
str(tup_ty)
check_json_roundtrip(tup_ty)
def test_type_relation():
tp = tvm.ir.TypeVar("tp", tvm.ir.TypeKind.Type)
tf = tvm.ir.FuncType([], None, [], [])
tt = tvm.ir.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
args = tvm.runtime.convert([tp, tf, tt])
num_inputs = 2
func = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
attrs = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
tr = tvm.ir.TypeRelation(func, args, num_inputs, attrs)
assert tr.args == args
assert tr.num_inputs == num_inputs
str(tr)
check_json_roundtrip(tr)
if __name__ == "__main__":
test_tensor_type_bad_constructor()
test_tensor_type()
test_type_param()
test_func_type()
test_tuple_type()
test_type_relation()
| 3,443 | 30.027027 | 76 | py |
tvm | tvm-main/tests/python/unittest/test_tir_analysis_verify_memory.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import pytest
from tvm import te
import tvm.testing
# The following DLDeviceType/TVMDeviceExtType values
# are originally defined in dlpack.h and c_runtime_api.h.
gpu_devices = ["cuda", "opencl", "metal", "vulkan"]
other_devices = ["llvm", "ext_dev"]
# All computations are bound.
# So VerifyMemory pass is expected to succeed.
#
@tvm.testing.uses_gpu
def test_verify_memory_all_bind():
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] + 1.0, name="B")
# B is bound to threads.
s = te.create_schedule(B.op)
bx, tx = s[B].split(B.op.axis[0], factor=64)
s[B].bind(bx, te.thread_axis("blockIdx.x"))
s[B].bind(tx, te.thread_axis("threadIdx.x"))
mod = tvm.lower(s, [A, B])
for dev_type in gpu_devices + other_devices:
if tvm.testing.device_enabled(dev_type):
binded_mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target(dev_type))
)(mod)
tvm.tir.transform.VerifyMemory()(binded_mod)
# Computations are not bound.
# So VerifyMemory pass fails when device type is GPU.
#
@tvm.testing.uses_gpu
def test_verify_memory_not_bind():
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] + 1.0, name="B")
# B is not bound to threads.
s = te.create_schedule(B.op)
mod = tvm.lower(s, [A, B])
for dev_type in gpu_devices:
if tvm.testing.device_enabled(dev_type):
binded_mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target(dev_type))
)(mod)
with pytest.raises(RuntimeError):
tvm.tir.transform.VerifyMemory()(binded_mod)
for dev_type in other_devices:
if tvm.testing.device_enabled(dev_type):
binded_mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target(dev_type))
)(mod)
tvm.tir.transform.VerifyMemory()(binded_mod)
# Computations are partially bound.
# So VerifyMemory pass fails when device type is GPU.
#
@tvm.testing.uses_gpu
def test_verify_memory_partially_bind():
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda i: A[i] + 1.0, name="B")
C = te.compute(B.shape, lambda i: B[i] + 2.0, name="C")
D = te.compute(C.shape, lambda i: C[i] + 2.0, name="D")
# C is bound to threads, but B and D are not.
s = te.create_schedule([B.op, C.op, D.op])
bx, tx = s[C].split(C.op.axis[0], factor=64)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
mod = tvm.lower(s, [A, B, C, D])
for dev_type in gpu_devices:
if tvm.testing.device_enabled(dev_type):
binded_mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target(dev_type))
)(mod)
with pytest.raises(RuntimeError):
tvm.tir.transform.VerifyMemory()(binded_mod)
for dev_type in other_devices:
if tvm.testing.device_enabled(dev_type):
binded_mod = tvm.tir.transform.Apply(
lambda f: f.with_attr("target", tvm.target.Target(dev_type))
)(mod)
tvm.tir.transform.VerifyMemory()(binded_mod)
if __name__ == "__main__":
test_verify_memory_all_bind()
test_verify_memory_not_bind()
test_verify_memory_partially_bind()
| 4,274 | 34.040984 | 76 | py |
tvm | tvm-main/tests/python/unittest/test_tir_analysis_oob.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm.script import tir as T
@T.prim_func
def bad_load(A: T.Buffer((2, 3), "float32"), B: T.Buffer((3, 2), "float32")):
B[0, 0] = A[2, 2]
@T.prim_func
def bad_load_loop(A: T.Buffer((2, 3), "float32"), B: T.Buffer((3, 2), "float32")):
for i in range(3):
B[i, 0] = A[i, 2]
@T.prim_func
def bad_store(A: T.Buffer((2, 3), "float32"), B: T.Buffer((3, 2), "float32")):
B[0, 3] = A[1, 2]
@T.prim_func
def bad_store_loop(A: T.Buffer((2, 3), "float32"), B: T.Buffer((3, 2), "float32")):
for i in range(3):
B[0, i] = A[1, i]
@T.prim_func
def unknown_bounds(A: T.Buffer((2, 3), "float32"), B: T.Buffer((3, 2), "float32")):
N = T.int32()
for i in range(3):
B[0, N] = A[1, i]
def test_oob_load():
with pytest.raises(tvm.tir.ScheduleError) as err:
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(bad_load))
assert "buffer A" in err.value.args[0]
with pytest.raises(tvm.tir.ScheduleError) as err:
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(bad_load_loop))
assert "buffer A" in err.value.args[0]
def test_oob_store():
with pytest.raises(tvm.tir.ScheduleError) as err:
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(bad_store))
assert "buffer B" in err.value.args[0]
with pytest.raises(tvm.tir.ScheduleError) as err:
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(bad_store_loop))
assert "buffer B" in err.value.args[0]
def test_unknown_bounds():
# This should not return an error as we can't probe that N goes out of bounds
tvm.tir.analysis.OOBChecker()(tvm.IRModule.from_expr(unknown_bounds))
if __name__ == "__main__":
tvm.testing.main()
| 2,509 | 30.772152 | 83 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_parser_evaluator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.parser.evaluator"""
import pytest
import tvm.testing
from tvm.script.parser.core.diagnostics import Source
from tvm.script.parser.core.evaluator import ExprEvaluator
def _calc(expr, extra_vars=None):
if extra_vars is None:
extra_vars = {}
source = Source(expr)
mod_ast = source.as_ast()
mod_body_ast = mod_ast.body
expr_stmt_ast = mod_body_ast[0]
expr_ast = expr_stmt_ast.value
return ExprEvaluator.eval(None, extra_vars, expr_ast)
def test_evaluator_basic():
assert _calc("1, 3.14, True, 'str'") == (1, 3.14, True, "str")
def test_evaluator_op():
assert _calc("1 + 2, 1 - 2, 1 * 2, 1 / 2") == (3, -1, 2, 0.5)
def test_evaluator_value_table():
res = _calc("a + b, a - b, a * b, a / b", {"a": 1, "b": 2})
a, b = 1, 2
assert res == (a + b, a - b, a * b, a / b)
def test_evaluator_func_call():
def func(a, b):
return a + b, a - b, a * b, a / b
assert _calc("func(1, 2)", {"func": func}) == func(1, 2)
def test_evaluator_slice():
res = _calc("a, a[1:], a[:5], a[1: 5], a[1: 5: 2]", {"a": [1, 2, 3, 4, 5, 6]})
a = [1, 2, 3, 4, 5, 6]
assert res == (a, a[1:], a[:5], a[1:5], a[1:5:2])
if __name__ == "__main__":
tvm.testing.main()
| 2,040 | 30.890625 | 82 | py |
tvm | tvm-main/tests/python/unittest/test_arith_simplify.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import tir
def test_simplify_reshape_flattened_index():
ana = tvm.arith.Analyzer()
i0 = tir.Var("i0", "int64")
i1 = tir.Var("i1", "int64")
ana.bind(i0, tvm.ir.Range(0, 8))
ana.bind(i1, tvm.ir.Range(0, 3))
i_flattened = i0 * 3 + i1
assert tvm.ir.structural_equal(
ana.simplify((i_flattened) // 12 * 12 + (i_flattened) % 12 // 4 * 4 + (i_flattened) % 4),
i_flattened,
)
def test_simplify_symbolic_comparison():
ana = tvm.arith.Analyzer()
i0 = tir.Var("i0", "int64")
i1 = tir.Var("i1", "int64")
n, m = tvm.tir.SizeVar("n", "int64"), tvm.tir.SizeVar("m", "int64")
outer = (n + 31) // 32
ana.bind(i0, tvm.ir.Range(0, outer))
ana.bind(i1, tvm.ir.Range(0, 32))
PS = tvm.arith.ProofStrength
assert not ana.can_prove(i0 * 32 + i1 < (n + 31) // 32 * 32, PS.DEFAULT)
assert ana.can_prove(i0 * 32 + i1 < (n + 31) // 32 * 32, PS.SYMBOLIC_BOUND)
assert ana.can_prove(i0 * 32 + i1 < (n + 31) // 32 * 32 + m, PS.SYMBOLIC_BOUND)
assert ana.can_prove(i0 * 32 + i1 + 1 <= (n + 31) // 32 * 32, PS.SYMBOLIC_BOUND)
assert ana.can_prove((n + 31) // 32 * 32 >= i0 * 32 + i1 + 1, PS.SYMBOLIC_BOUND)
assert ana.can_prove((n + 31) // 32 * 32 >= i0 * 32 + i1, PS.SYMBOLIC_BOUND)
def test_regression_simplify_inf_recursion():
ana = tvm.arith.Analyzer()
cond = tir.Var("cond", "int32")
res = (tvm.tir.NE(cond, 0).astype("int8") - tvm.tir.NE(cond, 0).astype("int8")).astype(
"int32"
) == 0
# regression in a previous case
# try compare and int set recursive call can cause infinite loop
ana.rewrite_simplify(res)
if __name__ == "__main__":
tvm.testing.main()
| 2,509 | 34.857143 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_loop_partition.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.ir.module import IRModule
from tvm.script import tir as T
import numpy
def collect_visit(stmt, f):
ret = []
tvm.tir.stmt_functor.post_order_visit(stmt, lambda x: ret.append(f(x)))
return ret
def test_basic():
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
T = te.compute((n,), lambda i: A[i] + B[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=4)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n], stmt))
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"]
assert not any(collect_visit(stmt.body.body[0], lambda x: isinstance(x, tvm.tir.IfThenElse)))
assert any(collect_visit(stmt.body.body[1], lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_const_loop():
n = 21
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
T = te.compute((n,), lambda i: A[i] + B[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=4)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_no_unroll_loop():
n = 21
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
T = te.compute((n,), lambda i: A[i] + B[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=4)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(
config={
"tir.LoopPartition": {
"partition_const_loop": True,
"no_unroll_loop_with_extent_one": True,
}
}
):
mod = tvm.tir.transform.LoopPartition()(mod)
mod = tvm.tir.transform.Simplify()(mod)
stmt = tvm.tir.transform.RemoveNoOp()(mod)["main"].body
assert sum(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.For))) == 4
def test_multi_loop():
ib = tvm.tir.ir_builder.create()
m = te.size_var("m")
n = te.size_var("n")
with ib.for_range(0, 4, "i") as i:
with ib.for_range(0, n, "j") as j:
with ib.for_range(0, m, "k") as k:
with ib.if_scope(ib.likely(i * m + j + k < n)):
ib.emit(tvm.tir.Evaluate(m))
with ib.else_scope():
ib.emit(tvm.tir.Evaluate(n))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([n, m], stmt))
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt.body[0], lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_multi_if():
ib = tvm.tir.ir_builder.create()
m = te.size_var("m")
n = te.size_var("n")
with ib.for_range(0, 4, "i") as i:
with ib.for_range(0, n, "j") as j:
with ib.for_range(0, m, "k") as k:
with ib.if_scope(ib.likely(i * m + j + k < n)):
ib.emit(tvm.tir.Evaluate(m))
with ib.else_scope():
ib.emit(tvm.tir.Evaluate(n))
with ib.if_scope(ib.likely(i * m + j - k < n)):
ib.emit(tvm.tir.Evaluate(m))
with ib.else_scope():
ib.emit(tvm.tir.Evaluate(n))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt.body[0], lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_thread_axis():
m = te.size_var("m")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
B = te.compute((m, l), lambda i, j: A[i, j] + 3, name="B")
s = te.create_schedule(B.op)
s[B].set_scope("shared")
num_thread = 16
xo, xi = s[B].split(B.op.axis[0], 32)
xi0, xi1 = s[B].split(xi, nparts=num_thread)
s[B].bind(xi0, te.thread_axis("threadIdx.x"))
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"]
assert not any(collect_visit(stmt.body.body[0], lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_vectorize():
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
bias = te.size_var("bias", dtype="float32")
scale = te.size_var("scale", dtype="float32")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i) * scale + bias, name="C")
# schedule
s = te.create_schedule(C.op)
# create iter var and assign them tags.
num_thread = 32
bx, x = s[C].split(C.op.axis[0], factor=num_thread * 4)
tx, x = s[C].split(x, nparts=num_thread)
_, x = s[C].split(x, factor=4)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].vectorize(x)
stmt = tvm.lower(s, [A, B], name="main")["main"]
body = stmt.body.body.body.body
assert x.var.name not in str(body.condition)
assert any(collect_visit(body.then_case, lambda x: isinstance(x, tvm.tir.Ramp)))
def test_condition():
ib = tvm.tir.ir_builder.create()
m = te.size_var("m")
n = te.size_var("n")
with ib.for_range(0, tvm.tir.truncdiv(n + 3, 4), "i") as i:
with ib.for_range(0, 4, "j") as j:
ib.emit(tvm.tir.Evaluate(tvm.tir.Select(ib.likely(i * 4 + j < n), m, n)))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([m, n], stmt))
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt[0], lambda x: isinstance(x, tvm.tir.Select)))
def test_condition_EQ():
ib = tvm.tir.ir_builder.create()
m = te.size_var("m")
n = te.size_var("n")
with ib.for_range(0, 10, "i") as i:
ib.emit(tvm.tir.Evaluate(tvm.tir.Select(ib.likely(tvm.tir.EQ(i, 5)), m, n)))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([m, n], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt[0], lambda x: isinstance(x, tvm.tir.Select)))
def test_thread_axis2():
n = tvm.runtime.convert(4096)
m = te.size_var("m")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
num_thread = 32
bx, x = s[C].split(C.op.axis[0], factor=32)
tx, x = s[C].split(x, nparts=num_thread)
_, x = s[C].split(x, factor=m)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
stmt = tvm.lower(s, [A, B], name="main")["main"]
for_body = stmt.body.body.body.body[0]
assert "threadIdx" not in str(for_body.extent)
def test_everything_during_deduction():
m = te.size_var("m")
n = te.size_var("n")
ib = tvm.tir.ir_builder.create()
with ib.for_range(0, n, "i") as i:
with ib.for_range(0, 32, "j") as j:
with ib.if_scope(ib.likely(tvm.tir.truncdiv(i, j) < m)):
# this guard will produce everything during deduction
ib.emit(tvm.tir.Evaluate(m))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([m, n], stmt))
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert isinstance(stmt.body.body, tvm.tir.IfThenElse)
def test_single_likely():
n = 60
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
T = te.compute((n,), lambda i: A[i] + B[i])
s = te.create_schedule(T.op)
x = T.op.axis[0]
xo, xi = s[T].split(x, factor=16)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_multi_likely():
n = 94
m = 62
A = te.placeholder((n, m), name="A")
B = te.placeholder((n, m), name="B")
T = te.compute((n, m), lambda i, j: A[i, j] + B[i, j])
s = te.create_schedule(T.op)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
x, y = T.op.axis
xo, xi = s[T].split(x, factor=16)
yo, yi = s[T].split(y, factor=16)
s[T].reorder(xo, yo, xi, yi)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_oneD_pool():
m = te.size_var("m")
ib = tvm.tir.ir_builder.create()
# data = te.placeholder((16,), name = 'data')
data = ib.pointer("float32", name="A")
out = ib.pointer("float32", name="A")
with ib.for_range(0, 16, "ow") as ow:
with ib.for_range(0, 3, "kw") as kw:
with ib.if_scope(ib.likely(ow > 0)):
with ib.if_scope(ib.likely(ow < 15)):
out[ow] = tvm.te.max(out[ow], data[ow + kw - 1])
with ib.for_range(0, 16, "ow") as ow:
with ib.for_range(0, 3, "kw") as kw:
with ib.if_scope(ib.likely(ow < 1)):
with ib.if_scope(ib.likely(kw > 0)):
out[ow] = tvm.te.max(out[ow], data[ow + kw - 1])
with ib.for_range(0, 16, "ow") as ow:
with ib.for_range(0, 3, "kw") as kw:
with ib.if_scope(ib.likely(ow > 14)):
with ib.if_scope(ib.likely(kw < 2)):
out[ow] = tvm.te.max(out[ow], data[ow + kw - 1])
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([m, data, out], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_cce_loop_1():
ib = tvm.tir.ir_builder.create()
dtype = "float16"
n = 514
m = 514
_A = te.placeholder((n * m,), name="A")
Ab = tvm.tir.decl_buffer((n * m,), dtype, name="A")
A = ib.buffer_ptr(Ab)
_B = te.placeholder((n * m,), name="B")
Bb = tvm.tir.decl_buffer((n * m,), dtype, name="B")
B = ib.buffer_ptr(Bb)
# for i in 0 to n-1:
with ib.for_range(0, 11, name="i") as i:
with ib.for_range(0, 160, name="j") as j:
with ib.if_scope(ib.likely(((i * 160) + j) < 1600)):
A[(i + 1) * m + j + 1] = (
B[(i) * m + j + 1] + B[(i + 1) * m + j + 1] + B[(i + 2) * m + j + 1]
)
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab, Bb], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_cce_loop_2():
ib = tvm.tir.ir_builder.create()
len = 112
tile = 32
loop = (len + tile - 1) // tile
with ib.for_range(0, loop, "i") as i:
head = i * tile
with ib.if_scope(ib.likely(head + tile > len)):
tail = len
ib.emit(tvm.tir.call_extern("float32", "cce_intrisic", head, tail))
with ib.else_scope():
tail = head + tile
ib.emit(tvm.tir.call_extern("float32", "cce_intrisic", head, tail))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_cce_loop_3():
ib = tvm.tir.ir_builder.create()
loop1 = 4
loop2 = 9998
tile = 39991
with ib.for_range(0, loop2, "i") as i:
with ib.for_range(0, loop1, "j") as j:
head1 = i
head2 = j
with ib.if_scope(ib.likely(head1 * loop1 + head2 < tile)):
ib.emit(tvm.tir.call_extern("float16", "cce_intrisic", head1))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_conv_tiling():
HSTR = WSTR = 1
in_channel = 128
kernel_height = kernel_width = 3
out_channel = 64
batch_size = 1
in_height = in_width = 64
out_height = out_width = in_height - kernel_height + 1
data = te.placeholder((batch_size, in_channel, in_height, in_width), name="data")
kernel = te.placeholder((kernel_height, kernel_width, in_channel, out_channel), name="kernel")
ic = te.reduce_axis((0, in_channel), name="ic")
kh = te.reduce_axis((0, kernel_height), name="kh")
kw = te.reduce_axis((0, kernel_width), name="kw")
conv = te.compute(
(batch_size, out_channel, out_height, out_width),
lambda n, oc, oh, ow: te.sum(
data[n, ic, oh * HSTR + kh, ow * WSTR + kw] * kernel[kh, kw, ic, oc], axis=[ic, kh, kw]
),
name="conv2d",
)
s = te.create_schedule(conv.op)
n, oc, oh, ow = conv.op.axis
oho, owo, ohi, owi = s[conv].tile(oh, ow, 16, 16)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.LoopPartition()(mod)
stmt = tvm.tir.transform.Simplify()(mod)["main"].body
assert not any(collect_visit(stmt, lambda x: isinstance(x, tvm.tir.IfThenElse)))
def test_multilevel_splitting_with_indivisble_factors():
from tvm import topi
A = te.placeholder((130,), dtype="float32")
B = topi.nn.relu(A)
s = te.create_schedule(B.op)
(y,) = s[B].op.axis
(yo, yi) = s[B].split(y, factor=8)
(yoo, yoi) = s[B].split(yo, factor=16)
s[B].reorder(yoo, yoi, yi)
s[B].unroll(yi)
## But this does the right thing.
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
lowered_body = tvm.lower(s, [A, B], name="x")["x"].body
def visit_stmt(op):
return isinstance(op, tvm.tir.Max)
num_max = collect_visit(lowered_body, visit_stmt)
assert num_max.count(True) == 10
def test_double_splitting_with_indivisible_factors():
m = 48
dtype = "float32"
A = te.placeholder((m,), name="A", dtype=dtype)
C = te.compute((m,), lambda i: A[i], name="C")
D = te.compute((m,), lambda i: C[i], name="D")
s = te.create_schedule(D.op)
co, ci = s[C].split(C.op.axis[0], factor=10)
do, di = s[D].split(D.op.axis[0], 32)
s[C].compute_at(s[D], do)
target = "llvm"
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
f = tvm.lower(s, [A, C, D], name="fadd1", simple_mode=False)
func = tvm.build(f, target=target)
top_produce = f["fadd1"].body
assert not any(collect_visit(top_produce, lambda x: isinstance(x, tvm.tir.IfThenElse)))
# check functional correctness of generated code
dev = tvm.device(target, 0)
a = tvm.nd.array(
numpy.ones(
m,
).astype(dtype),
dev,
)
c = tvm.nd.array(
numpy.zeros(
m,
).astype(dtype),
dev,
)
d = tvm.nd.array(
numpy.zeros(
m,
).astype(dtype),
dev,
)
func(a, c, d)
tvm.testing.assert_allclose(c.numpy(), a.numpy(), rtol=1e-5)
tvm.testing.assert_allclose(d.numpy(), a.numpy(), rtol=1e-5)
def test_simple_rfactor():
K = 16 * 4 + 4
k = te.reduce_axis((0, K), "k")
A = te.placeholder((1, K), name="A")
B = te.compute((1,), lambda b: te.sum(A[b, k], axis=k), name="B")
s = te.create_schedule(B.op)
ko, _ = s[B].split(s[B].op.reduce_axis[0], 16)
BF = s.rfactor(B, ko, 0)
s.normalize()
bounds = tvm.te.schedule.InferBound(s)
stmt1 = tvm.te.schedule.ScheduleOps(s, bounds)
mod1 = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt1))
stmt1 = tvm.tir.transform.Simplify()(mod1)["main"].body
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod2 = tvm.tir.transform.LoopPartition()(mod1)
stmt2 = tvm.tir.transform.Simplify()(mod2)["main"].body
# make sure loop partition actually did something
assert not tvm.ir.structural_equal(stmt1.body, stmt2.body)
@T.prim_func
def partitioned_concat(
A: T.Buffer((16,), "float32"), B: T.Buffer((16,), "float32"), C: T.Buffer((32,), "float32")
) -> None:
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
for i in T.serial(0, 16):
C[i] = A[i]
for i in T.serial(0, 16):
C[i + 16] = B[i + 16]
def test_explicit_partition_hint():
A = te.placeholder((16,), name="A")
B = te.placeholder((16,), name="B")
C = te.compute((32,), lambda i: te.if_then_else(i < 16, A[i], B[i]), name="C")
s = te.create_schedule(C.op)
s.normalize()
s[C].pragma(s[C].op.axis[0], "loop_partition_hint", True)
mod = tvm.driver.build_module.schedule_to_module(s, [A, B, C], "main", None)
with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}):
mod = tvm.tir.transform.StorageFlatten(64)(mod)
mod = tvm.tir.transform.LoopPartition()(mod)
mod = tvm.tir.transform.Simplify()(mod)
assert tvm.ir.structural_equal(mod["main"], partitioned_concat)
def partition_from_scheduled_tir(prim_func, pass_cfg):
with tvm.transform.PassContext(config=pass_cfg):
mod = IRModule.from_expr(prim_func)
mod = tvm.tir.transform.LowerOpaqueBlock()(mod)
mod = tvm.tir.transform.FlattenBuffer()(mod)
mod = tvm.tir.transform.LoopPartition()(mod)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
return mod
@T.prim_func
def partitioned_concat_3(
placeholder: T.Buffer((1, 64, 28, 28), "int8"),
placeholder_1: T.Buffer((1, 32, 28, 28), "int8"),
placeholder_2: T.Buffer((1, 32, 28, 28), "int8"),
T_concat: T.Buffer((1, 128, 28, 28), "int8"),
) -> None:
placeholder_flat = T.Buffer([50176], "int8", data=placeholder.data)
placeholder_1_flat = T.Buffer([25088], "int8", data=placeholder_1.data)
placeholder_2_flat = T.Buffer([25088], "int8", data=placeholder_2.data)
T_concat_flat = T.Buffer([100352], "int8", data=T_concat.data)
for i1, i2, i3 in T.grid(64, 28, 28):
T_concat_flat[i1 * 784 + i2 * 28 + i3] = placeholder_flat[i1 * 784 + i2 * 28 + i3]
for i1, i2, i3 in T.grid(32, 28, 28):
T_concat_flat[i1 * 784 + i2 * 28 + i3 + 50176] = placeholder_1_flat[i1 * 784 + i2 * 28 + i3]
for i1, i2, i3 in T.grid(32, 28, 28):
T_concat_flat[i1 * 784 + i2 * 28 + i3 + 75264] = placeholder_2_flat[i1 * 784 + i2 * 28 + i3]
@T.prim_func
def concat_func_3(
placeholder: T.Buffer((1, 64, 28, 28), "int8"),
placeholder_1: T.Buffer((1, 32, 28, 28), "int8"),
placeholder_2: T.Buffer((1, 32, 28, 28), "int8"),
T_concat: T.Buffer((1, 128, 28, 28), "int8"),
) -> None:
placeholder_flat = T.Buffer([50176], "int8", data=placeholder.data)
placeholder_1_flat = T.Buffer([25088], "int8", data=placeholder_1.data)
placeholder_2_flat = T.Buffer([25088], "int8", data=placeholder_2.data)
T_concat_flat = T.Buffer([100352], "int8", data=T_concat.data)
for i1 in T.serial(128, annotations={"pragma_loop_partition_hint": 1}):
for i2, i3 in T.grid(28, 28):
if 96 <= i1:
T_concat_flat[i1 * 784 + i2 * 28 + i3] = placeholder_2_flat[
i1 * 784 + i2 * 28 + i3 - 75264
]
if 64 <= i1 and i1 < 96:
T_concat_flat[i1 * 784 + i2 * 28 + i3] = placeholder_1_flat[
i1 * 784 + i2 * 28 + i3 - 50176
]
if i1 < 64:
T_concat_flat[i1 * 784 + i2 * 28 + i3] = placeholder_flat[i1 * 784 + i2 * 28 + i3]
def test_condition_mutually_exclusive():
mod = partition_from_scheduled_tir(
concat_func_3, {"tir.LoopPartition": {"partition_const_loop": True}}
)
assert tvm.ir.structural_equal(mod["main"], partitioned_concat_3)
def test_loop_partition_unroll_hint():
@T.prim_func
def main(
A_arg: T.Buffer((1, 3, 224, 224), "int8"), B_arg: T.Buffer((1, 224, 7, 16), "int8")
) -> None:
A = T.Buffer(150528, "int8", data=A_arg.data)
B = T.Buffer(25088, "int8", data=B_arg.data)
for ax0 in T.serial(
112,
annotations={"pragma_loop_partition_hint": True},
):
for ax1, ax2, ax3 in T.grid(224, 7, 16):
if 3 <= ax0 * 2 + ax2 and ax0 * 2 + ax2 < 227 and ax3 < 3:
B[ax1 * 112 + ax2 * 16 + ax3] = A[ax3 * 50176 + ax1 * 224 + ax0 * 2 + ax2 - 3]
@T.prim_func
def partitioned_main(
A_arg: T.Buffer((1, 3, 224, 224), "int8"), B_arg: T.Buffer((1, 224, 7, 16), "int8")
) -> None:
A = T.Buffer(150528, dtype="int8", data=A_arg.data)
B = T.Buffer(25088, dtype="int8", data=B_arg.data)
# body
for ax1, ax2, ax3 in T.grid(224, 7, 16):
if 3 <= ax2 and ax3 < 3:
B[ax1 * 112 + ax2 * 16 + ax3] = A[ax3 * 50176 + ax1 * 224 + ax2 - 3]
for ax1, ax2, ax3 in T.grid(224, 7, 16):
if 1 <= ax2 and ax3 < 3:
B[ax1 * 112 + ax2 * 16 + ax3] = A[ax3 * 50176 + ax1 * 224 + ax2 - 1]
for ax0, ax1, ax2, ax3 in T.grid(109, 224, 7, 16):
if ax3 < 3:
B[ax1 * 112 + ax2 * 16 + ax3] = A[ax3 * 50176 + ax1 * 224 + ax0 * 2 + ax2 + 1]
for ax1, ax2, ax3 in T.grid(224, 7, 16):
if ax2 < 5 and ax3 < 3:
B[ax1 * 112 + ax2 * 16 + ax3] = A[ax3 * 50176 + ax1 * 224 + ax2 + 219]
mod = partition_from_scheduled_tir(
main,
{
"tir.LoopPartition": {
"partition_const_loop": True,
"unroll_loop_with_partition_hint_no_interval": True,
}
},
)
mod = tvm.tir.transform.UnrollLoop()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
mod = tvm.tir.transform.Simplify()(mod)
assert tvm.ir.structural_equal(mod["main"], partitioned_main)
def test_loop_partition_recursive_unroll_hint():
@T.prim_func
def main():
placeholder_0_dm = T.decl_buffer([1, 32, 32, 16], dtype="int8")
for i3_0 in T.serial(5, annotations={"pragma_loop_partition_hint": 1}):
for i2_0 in T.serial(2, annotations={"pragma_loop_partition_hint": 1}):
pad_temp = T.decl_buffer([1, 16, 16, 16], dtype="int8")
for ax0, ax1, ax2 in T.grid(16, 16, 16):
if (
6 <= i2_0 * 4 + ax0
and i2_0 * 4 + ax0 < 26
and 6 <= i3_0 * 4 + ax1
and i3_0 * 4 + ax1 < 26
):
pad_temp[
0,
i2_0 * 4 + ax0 - 6 + 6 - i2_0 * 4,
i3_0 * 4 + ax1 - 6 + 6 - i3_0 * 4,
ax2,
] = placeholder_0_dm[
0,
i2_0 * 4 + ax0 - 6 - -6,
i3_0 * 4 + ax1 - 6 - -6,
ax2,
]
@T.prim_func
def partitioned_main():
placeholder_0_dm = T.allocate([16384], "int8", "global")
placeholder_0_dm_1 = T.Buffer([16384], dtype="int8", data=placeholder_0_dm)
for i3_0 in T.unroll(2):
for i2_0 in T.unroll(2):
pad_temp = T.allocate([4096], "int8", "global")
pad_temp_1 = T.Buffer([4096], dtype="int8", data=pad_temp)
for ax0, ax1, ax2 in T.grid(16, 16, 16):
if 6 <= i2_0 * 4 + ax0 and 6 <= i3_0 * 4 + ax1:
pad_temp_1[ax0 * 256 + ax1 * 16 + ax2] = placeholder_0_dm_1[
i2_0 * 2048 + ax0 * 512 + i3_0 * 64 + ax1 * 16 + ax2
]
for i2_0 in T.unroll(2):
pad_temp_2 = T.allocate([4096], "int8", "global")
pad_temp_3 = T.Buffer([4096], dtype="int8", data=pad_temp_2)
for ax0, ax1, ax2 in T.grid(16, 16, 16):
if 6 <= i2_0 * 4 + ax0:
pad_temp_3[ax0 * 256 + ax1 * 16 + ax2] = placeholder_0_dm_1[
i2_0 * 2048 + ax0 * 512 + ax1 * 16 + ax2 + 128
]
for i3_0 in T.unroll(2):
for i2_0 in T.unroll(2):
pad_temp_4 = T.allocate([4096], "int8", "global")
pad_temp_5 = T.Buffer([4096], dtype="int8", data=pad_temp_4)
for ax0, ax1, ax2 in T.grid(16, 16, 16):
if 6 <= i2_0 * 4 + ax0 and i3_0 * 4 + ax1 < 14:
pad_temp_5[ax0 * 256 + ax1 * 16 + ax2] = placeholder_0_dm_1[
i2_0 * 2048 + ax0 * 512 + i3_0 * 64 + ax1 * 16 + ax2 + 192
]
mod = partition_from_scheduled_tir(
main,
{
"tir.LoopPartition": {
"partition_const_loop": True,
"unroll_loop_with_partition_hint_no_interval": True,
}
},
)
assert tvm.ir.structural_equal(mod["main"], partitioned_main)
def test_loop_partition_keep_loop_annotations():
@T.prim_func
def before(A: T.Buffer(160, "int32"), B: T.Buffer(160, "int32")) -> None:
for i in T.serial(
160,
annotations={"pragma_loop_partition_hint": True, "key": "value"},
):
if i < 10:
B[i] = A[i] + 1
elif 10 <= i and i < 150:
B[i] = A[i] + 2
else:
B[i] = A[i] + 3
@T.prim_func
def after(A: T.Buffer(160, "int32"), B: T.Buffer(160, "int32")) -> None:
for i in T.serial(10, annotations={"key": "value"}):
B[i] = A[i] + 1
for i in T.serial(140, annotations={"key": "value"}):
B[i + 10] = A[i + 10] + 2
for i in T.serial(10, annotations={"key": "value"}):
B[i + 150] = A[i + 150] + 3
mod = partition_from_scheduled_tir(
before,
{
"tir.LoopPartition": {
"partition_const_loop": True,
}
},
)
assert tvm.ir.structural_equal(mod["main"], after)
def test_loop_partition_with_unit_loop_in_condition():
@T.prim_func
def before(
placeholder: T.Buffer((50176,), "int8"),
placeholder_1: T.Buffer((25088,), "int8"),
placeholder_2: T.Buffer((25088,), "int8"),
T_concat: T.Buffer((100352,), "int8"),
) -> None:
for k in range(1, annotations={"preserve_unit_loop": True}):
for i1 in T.serial(128, annotations={"pragma_loop_partition_hint": 1}):
for i2, i3 in T.grid(28, 28):
if 96 <= k * 128 + i1:
T_concat[k * i1 * 784 + i2 * 28 + i3] = placeholder_2[
i1 * 784 + i2 * 28 + i3 - 75264
]
if 64 <= k * 128 + i1 and k * 128 + i1 < 96:
T_concat[i1 * 784 + i2 * 28 + i3] = placeholder_1[
i1 * 784 + i2 * 28 + i3 - 50176
]
if k * 128 + i1 < 64:
T_concat[i1 * 784 + i2 * 28 + i3] = placeholder[i1 * 784 + i2 * 28 + i3]
@T.prim_func
def after(
placeholder: T.Buffer(50176, "int8"),
placeholder_1: T.Buffer(25088, "int8"),
placeholder_2: T.Buffer(25088, "int8"),
T_concat: T.Buffer(100352, "int8"),
) -> None:
for _ in T.serial(1, annotations={"preserve_unit_loop": True}):
for i1, i2, i3 in T.grid(64, 28, 28):
T_concat[i1 * 784 + i2 * 28 + i3] = placeholder[i1 * 784 + i2 * 28 + i3]
for i1, i2, i3 in T.grid(32, 28, 28):
T_concat[i1 * 784 + i2 * 28 + i3 + 50176] = placeholder_1[i1 * 784 + i2 * 28 + i3]
for i1, i2, i3 in T.grid(32, 28, 28):
T_concat[i2 * 28 + i3] = placeholder_2[i1 * 784 + i2 * 28 + i3]
mod = partition_from_scheduled_tir(
before,
{
"tir.LoopPartition": {
"partition_const_loop": True,
}
},
)
assert tvm.ir.structural_equal(mod["main"], after)
if __name__ == "__main__":
tvm.testing.main()
| 31,414 | 36.713085 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_parser_tir.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.parser.tir"""
import pytest
import tvm.testing
from tvm.script.parser import tir as T
from tvm import ir, tir
def test_tir_buffer_proxy():
buffer_0 = T.Buffer((128, 128), "float32")
assert (
isinstance(buffer_0, tir.Buffer)
and list(buffer_0.shape) == [128, 128]
and buffer_0.dtype == "float32"
)
buffer_1 = T.Buffer((64, 64, 64), "int32")
assert (
isinstance(buffer_1, tir.Buffer)
and list(buffer_1.shape) == [64, 64, 64]
and buffer_1.dtype == "int32"
)
def test_tir_ptr_proxy():
ptr_0 = T.handle("int32", "global")
assert (
isinstance(ptr_0, tir.Var)
and ptr_0.dtype == "handle"
and isinstance(ptr_0.type_annotation, ir.PointerType)
and ptr_0.type_annotation.element_type == ir.PrimType("int32")
and ptr_0.type_annotation.storage_scope == "global"
)
ptr_1 = T.handle("float32", "shared")
assert (
isinstance(ptr_1, tir.Var)
and ptr_1.dtype == "handle"
and isinstance(ptr_1.type_annotation, ir.PointerType)
and ptr_1.type_annotation.element_type == ir.PrimType("float32")
and ptr_1.type_annotation.storage_scope == "shared"
)
def test_tir_func_name():
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
assert matmul.__name__ == "matmul"
def test_tir_macro_decorator_signature():
@T.prim_func
def evaluate0():
T.evaluate(0)
# Ok, no parentheses
@T.macro
def func1():
T.evaluate(0)
assert func1.hygienic
@T.prim_func
def use1():
func1()
tvm.ir.assert_structural_equal(use1, evaluate0)
# Ok, empty parentheses
@T.macro()
def func2():
T.evaluate(0)
assert func2.hygienic
@T.prim_func
def use2():
func2()
tvm.ir.assert_structural_equal(use1, evaluate0)
with pytest.raises(ValueError):
# Wrong: non-keyword argument
@T.macro(True)
def func3():
T.evaluate()
def test_tir_macro_signature():
@T.macro
def assign(i, *args, t1, **kwargs):
vi, vj, vk = T.axis.remap("SSR", [i, args[0], args[1]])
kwargs["t3"][vi, vj] = kwargs["t3"][vi, vj] + t1[vi, vk] * kwargs["t2"][vj, vk]
@T.prim_func
def matmul_w_macro(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
assign(i, j, k, t1=A, t2=B, t3=C)
@T.prim_func
def matmul_no_macro(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
tvm.ir.assert_structural_equal(matmul_no_macro, matmul_w_macro)
def test_tir_macro_hygienic():
x_value = 128
@T.macro(hygienic=True)
def static_capture(A, B):
B[()] = A[x_value]
@T.prim_func
def use_hygienic(A: T.Buffer((1024,), "int32"), B: T.Buffer((), "int32")) -> None:
for x_value in T.serial(10):
static_capture(A, B)
@T.prim_func
def expected_hygienic(A: T.Buffer((1024,), "int32"), B: T.Buffer((), "int32")) -> None:
for x_value in range(10):
B[()] = A[128]
tvm.ir.assert_structural_equal(use_hygienic, expected_hygienic)
def test_tir_macro_non_hygienic():
x_value = 128
@T.macro(hygienic=False)
def dynamic_capture(A, B):
B[()] = A[x_value]
@T.prim_func
def use_non_hygienic(A: T.Buffer((1024,), "int32"), B: T.Buffer((), "int32")) -> None:
for x_value in T.serial(10):
dynamic_capture(A, B)
@T.prim_func
def expected_non_hygienic(A: T.Buffer((1024,), "int32"), B: T.Buffer((), "int32")) -> None:
for x_value in range(10):
B[()] = A[x_value]
tvm.ir.assert_structural_equal(use_non_hygienic, expected_non_hygienic)
if __name__ == "__main__":
tvm.testing.main()
| 5,428 | 28.666667 | 95 | py |
tvm | tvm-main/tests/python/unittest/test_datatype_nv_fp8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
import tvm.tir as tir
from tvm import te
from tvm.script import tir as T
try:
from ml_dtypes import float8_e4m3fn as e4m3_float8, float8_e5m2 as e5m2_float8
except ImportError:
e4m3_float8, e5m2_float8 = None, None
def fp8_unary(dtype: str):
@T.prim_func
def func(
a: T.handle,
b: T.handle,
a_add_b: T.handle,
a_sub_b: T.handle,
a_mul_b: T.handle,
a_fp32: T.handle,
a_roundtrip: T.handle,
) -> None:
A = T.match_buffer(a, [128], dtype=dtype)
B = T.match_buffer(b, [128], dtype=dtype)
A_add_B = T.match_buffer(a_add_b, [128], dtype=dtype)
A_sub_B = T.match_buffer(a_sub_b, [128], dtype=dtype)
A_mul_B = T.match_buffer(a_mul_b, [128], dtype=dtype)
A_fp32 = T.match_buffer(a_fp32, [128], dtype="float32")
A_roundtrip = T.match_buffer(a_roundtrip, [128], dtype=dtype)
for i in range(128):
with T.block("fp8_unary"):
vi = T.axis.spatial(128, i)
A_add_B[vi] = A[vi] + B[vi]
A_sub_B[vi] = A[vi] - B[vi]
A_mul_B[vi] = A[vi] * B[vi]
A_fp32[vi] = A[vi]
A_roundtrip[vi] = A_fp32[vi]
return func
np_dtype, dtype_str = tvm.testing.parameters(
(e4m3_float8, "e4m3_float8"), (e5m2_float8, "e5m2_float8")
)
def test_create_nv_fp8_nd_array(np_dtype, dtype_str):
if np_dtype is None:
"""Skip test if ml_dtypes is not installed"""
return
x = np.random.rand(128, 128).astype(np_dtype)
x_nd = tvm.nd.array(x)
assert x_nd.dtype == dtype_str
def test_fp8_unary_op(np_dtype, dtype_str):
func = fp8_unary(dtype_str)
if not tvm.testing.device_enabled("llvm"):
return
if np_dtype is None:
"""Skip test if ml_dtypes is not installed"""
return
f = tvm.build(func, target="llvm")
a = np.random.randn(128).astype(np_dtype)
b = np.random.randn(128).astype(np_dtype)
a_add_b = np.zeros(128).astype(np_dtype)
a_sub_b = np.zeros(128).astype(np_dtype)
a_mul_b = np.zeros(128).astype(np_dtype)
a_fp32 = np.zeros(128).astype(np.float32)
a_roundtrip = np.zeros(128).astype(np_dtype)
args = list(
map(lambda _: tvm.nd.array(_), [a, b, a_add_b, a_sub_b, a_mul_b, a_fp32, a_roundtrip])
)
f(*args)
def test_nv_fp8_buffer(np_dtype, dtype_str):
m = te.size_var("m")
n = te.size_var("n")
A = tvm.tir.decl_buffer((m, n), dtype_str)
assert A.dtype == dtype_str
if __name__ == "__main__":
tvm.testing.main()
| 3,404 | 31.428571 | 94 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_hoist_expression.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.transform import HoistedConditionals, HoistedLetBindings, HoistExpression
class BaseBeforeAfter:
hoisted_conditionals = tvm.testing.parameter(HoistedConditionals.All)
hoisted_let_bindings = tvm.testing.parameter(HoistedLetBindings.All)
def test_hoist(self, hoisted_conditionals, hoisted_let_bindings):
before = self.before
before_mod = tvm.IRModule.from_expr(before)
config = {
"tir.HoistExpression": {
"hoisted_conditionals": hoisted_conditionals.value,
"hoisted_let_bindings": hoisted_let_bindings.value,
}
}
with tvm.transform.PassContext(config=config):
after_mod = tvm.tir.transform.HoistExpression()(before_mod)
after = after_mod["main"]
expected = self.expected
try:
tvm.ir.assert_structural_equal(after, expected)
except ValueError as err:
script = tvm.IRModule({"expected": expected, "after": after, "before": before}).script()
raise ValueError(
f"Function after simplification did not match expected:\n{script}"
) from err
class TestHoistToTop(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.IfElseStmt,
HoistedConditionals.All,
)
@T.prim_func
def before(A: T.Buffer((16,), "float32"), n: T.int32):
for i in T.serial(16):
if n != 0:
A[i] = 0.0
@T.prim_func
def expected(A: T.Buffer((16,), "float32"), n: T.int32):
if n != 0:
for i in T.serial(16):
A[i] = 0.0
class TestSuppressHoistIfElse(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.Never,
HoistedConditionals.IfElseExpr,
)
@T.prim_func
def before(A: T.Buffer((16,), "float32"), n: T.int32):
for i in T.serial(16):
if n != 0:
A[i] = 0.0
expected = before
class TestHoistBlockVar(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer((128, 16), "float32"), n: T.int32):
i = T.env_thread("threadIdx.x")
T.launch_thread(i, 128)
for j in T.serial(16):
if i < 32:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer((128, 16), "float32"), n: T.int32):
i = T.env_thread("threadIdx.x")
T.launch_thread(i, 128)
if i < 32:
for j in T.serial(16):
A[i, j] = 0.0
class TestSuppressHoistBlockVar(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.All & ~HoistedConditionals.UsingBlockVar
)
@T.prim_func
def before(A: T.Buffer((128, 16), "float32"), n: T.int32):
thread_x = T.env_thread("threadIdx.x")
T.launch_thread(thread_x, 128)
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
if i < 32:
for j in T.serial(16):
A[i, j] = 0.0
expected = before
class TestHoistAcrossBlockVar(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer((128, 16), "float32"), n: T.int32):
thread_x = T.env_thread("threadIdx.x")
T.launch_thread(thread_x, 128)
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
if n == 0:
for j in T.serial(16):
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer((128, 16), "float32"), n: T.int32):
thread_x = T.env_thread("threadIdx.x")
if n == 0:
T.launch_thread(thread_x, 128)
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
for j in T.serial(16):
A[i, j] = 0.0
class TestSuppressHoistAcrossBlockVar(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.All & ~HoistedConditionals.UsingBlockVar
)
@T.prim_func
def before(A: T.Buffer((128, 16), "float32"), n: T.int32):
thread_x = T.env_thread("threadIdx.x")
T.launch_thread(thread_x, 128)
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
for j in T.serial(16):
if n == 0:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer((128, 16), "float32"), n: T.int32):
thread_x = T.env_thread("threadIdx.x")
T.launch_thread(thread_x, 128)
if n == 0:
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
for j in T.serial(16):
A[i, j] = 0.0
class TestHoistToMiddle(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
for j in T.serial(4):
if i < 3:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
if i < 3:
for j in T.serial(4):
A[i, j] = 0.0
class TestHoistWithLet(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
for j in T.serial(4):
condition = i < 3
if condition:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
condition = i < 3
if condition:
for j in T.serial(4):
A[i, j] = 0.0
class TestHoistDisableLet(BaseBeforeAfter):
"""As TestHoistWithLet, but forbid hoisting of LetStmt
Because the condition depends on the let binding, it should no
longer be hoisted.
"""
hoisted_let_bindings = tvm.testing.parameter(HoistedLetBindings.Never)
@T.prim_func
def before(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
for j in T.serial(4):
condition = i < 3
if condition:
A[i, j] = 0.0
expected = before
class TestHoistIfElse(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
for j in T.serial(4):
if i < 3:
A[i, j] = 0.0
else:
A[i, j] = 1.0
@T.prim_func
def expected(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
if i < 3:
for j in T.serial(4):
A[i, j] = 0.0
else:
for j in T.serial(4):
A[i, j] = 1.0
class TestHoistSequentialAssign(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer((4, 4), "float32"), B: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
for j in T.serial(4):
if i < 3:
A[i, j] = 0.0
B[i, j] = 0.0
else:
A[i, j] = 1.0
B[i, j] = 1.0
@T.prim_func
def expected(A: T.Buffer((4, 4), "float32"), B: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
if i < 3:
for j in T.serial(4):
A[i, j] = 0.0
B[i, j] = 0.0
else:
for j in T.serial(4):
A[i, j] = 1.0
B[i, j] = 1.0
class TestHoistMultiIf(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
for j in T.serial(4):
for k in T.serial(4):
if j < 3:
if i < 2:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
if i < 2:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0
class TestHoistComplexConditional(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer((4, 4), "float32")):
for i, j, k in T.grid(4, 4, 4):
if j < 3 and i < 2:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
if i < 2:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0
class TestSuppressSplittingConditional(BaseBeforeAfter):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.All & ~HoistedConditionals.BooleanExpression
)
@T.prim_func
def before(A: T.Buffer((4, 4), "float32")):
for i, j, k in T.grid(4, 4, 4):
if j < 3 and i < 2:
A[i, j] = 0.0
@T.prim_func
def expected(A: T.Buffer((4, 4), "float32")):
for i, j in T.grid(4, 4):
if j < 3 and i < 2:
for k in T.serial(4):
A[i, j] = 0.0
class TestHoistMultiIfElse(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
for j in T.serial(4):
for k in T.serial(4):
if j < 3:
if i < 2:
A[i, j] = 0.0
else:
A[i, j] = 1.0
else:
if i < 2:
A[i, j] = 2.0
else:
A[i, j] = 3.0
@T.prim_func
def expected(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
if i < 2:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0
else:
for k in T.serial(4):
A[i, j] = 2.0
else:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 1.0
else:
for k in T.serial(4):
A[i, j] = 3.0
class TestHoistMultiIfElseDifferentBranches(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
for j in T.serial(4):
for k in T.serial(4):
if j < 3:
if i < 2:
A[i, j] = 0.0
else:
A[i, j] = 1.0
else:
if i < 1:
A[i, j] = 2.0
else:
A[i, j] = 3.0
@T.prim_func
def expected(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
if i < 2:
if i < 1:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0
else:
for k in T.serial(4):
A[i, j] = 2.0
else:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 0.0
else:
for k in T.serial(4):
A[i, j] = 3.0
else:
for j in T.serial(4):
if j < 3:
for k in T.serial(4):
A[i, j] = 1.0
else:
for k in T.serial(4):
A[i, j] = 3.0
class TestHoistIfElseExpr(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer((4, 4), "float32")):
for i, j in T.grid(4, 4):
A[i, j] = T.if_then_else(i < 2, 1.0, 2.0, dtype="float32")
@T.prim_func
def expected(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
if i < 2:
for j in T.serial(4):
A[i, j] = 1.0
else:
for j in T.serial(4):
A[i, j] = 2.0
class TestSuppressHoistIfElseExpr(TestHoistIfElseExpr):
hoisted_conditionals = tvm.testing.parameter(
HoistedConditionals.All & ~HoistedConditionals.IfElseExpr
)
@T.prim_func
def before(A: T.Buffer((4, 4), "float32")):
for i, j in T.grid(4, 4):
A[i, j] = T.if_then_else(i < 2, 1.0, 2.0, dtype="float32")
expected = before
class TestHoistLetExpr(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer((4, 4), "float32")):
for i, j in T.grid(4, 4):
x = T.float32()
A[i, j] = T.Let(5.0 * x + T.cast(j, "float32"), where={x: T.cast(i + 1, "float32")})
@T.prim_func
def expected(A: T.Buffer((4, 4), "float32")):
for i in T.serial(4):
x = T.cast(i + 1, "float32")
for j in T.serial(4):
A[i, j] = 5.0 * x + T.cast(j, "float32")
class TestSuppressHoistLetExpr(BaseBeforeAfter):
hoisted_let_bindings = tvm.testing.parameter(
HoistedLetBindings.All & ~HoistedLetBindings.LetExpr
)
@T.prim_func
def before(A: T.Buffer((4, 4), "float32")):
for i, j in T.grid(4, 4):
x = T.float32()
A[i, j] = T.Let(5.0 * x + T.cast(j, "float32"), where={x: T.cast(i + 1, "float32")})
expected = before
if __name__ == "__main__":
tvm.testing.main()
| 14,659 | 29.798319 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_usmp_transform_convert_pool_allocations_to_offsets.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import tvm
from tvm import PoolInfoProperties, WorkspacePoolInfo
from tvm.script import tir as T, ir as I
from tvm.target import Target
from tvm.tir import stmt_functor
from tvm.tir.usmp import utils as usmp_utils
def _get_primfuncs_from_module(module):
primfuncs = list()
for gv, primfunc in module.functions.items():
primfuncs.append(primfunc)
return primfuncs
def assign_poolinfos_to_allocates_in_primfunc(primfunc, pool_infos):
"""Helper to assign poolinfos to allocate nodes in a tir.PrimFunc"""
def set_poolinfos(stmt):
if isinstance(stmt, tvm.tir.Allocate):
return tvm.tir.Allocate(
buffer_var=stmt.buffer_var,
dtype=stmt.dtype,
extents=stmt.extents,
condition=stmt.condition,
body=stmt.body,
annotations={tvm.tir.usmp.utils.CANDIDATE_MEMORY_POOL_ATTR: pool_infos},
)
return primfunc.with_body(stmt_functor.ir_transform(primfunc.body, None, set_poolinfos))
def assign_poolinfos_to_allocates_in_irmodule(mod, pool_infos):
"""Helper to assign poolinfos to allocate nodes in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = assign_poolinfos_to_allocates_in_primfunc(basefunc, pool_infos)
return ret
def _assign_targets_to_primfuncs_irmodule(mod, target):
"""Helper to assign target for PrimFunc in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = basefunc.with_attr("target", target)
return ret
def _plan_and_convert(tir_mod, pools=None):
target = Target("c")
if pools is None:
pools = [
WorkspacePoolInfo(
"global_workspace",
[target],
)
]
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = assign_poolinfos_to_allocates_in_irmodule(tir_mod, pools)
main_func = tir_mod["__tvm_main__"]
buffer_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
buffer_info_map = buffer_analysis.buffer_info_stmts
fcreate_array_bi = tvm.get_global_func("tir.usmp.CreateArrayBufferInfo")
buffer_info_arr = fcreate_array_bi(buffer_info_map)
fusmp_algo_greedy_by_size = tvm.get_global_func("tir.usmp.algo.greedy_by_size")
buffer_pool_allocations = fusmp_algo_greedy_by_size(
buffer_info_arr, buffer_analysis.memory_pressure
)
fassign_stmt_pool_allocations = tvm.get_global_func("tir.usmp.AssignStmtPoolAllocations")
pool_allocations = fassign_stmt_pool_allocations(buffer_info_map, buffer_pool_allocations)
tir_mod_with_offsets = tvm.tir.usmp.transform.convert_pool_allocations_to_offsets(
pool_allocations, emit_tvmscript_printable=True
)(tir_mod)
return tir_mod_with_offsets
# fmt: off
@tvm.script.ir_module
class LinearStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", "tir.noalias": True})
placeholder_65 = T.match_buffer(placeholder_62, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_66 = T.match_buffer(placeholder_63, [9408], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_67 = T.match_buffer(placeholder_64, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_21 = T.match_buffer(T_cast_20, [289], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_7_data = T.allocate([157323], "int16", "global")
PaddedInput_7 = T.Buffer(shape=[157323], dtype="int16", data=PaddedInput_7_data)
for i0_i1_fused_7 in T.serial(0, 229):
for i2_7, i3_7 in T.grid(229, 3):
PaddedInput_7[(((i0_i1_fused_7*687) + (i2_7*3)) + i3_7)] = T.if_then_else(((((2 <= i0_i1_fused_7) and (i0_i1_fused_7 < 226)) and (2 <= i2_7)) and (i2_7 < 226)), placeholder_65[((((i0_i1_fused_7*672) + (i2_7*3)) + i3_7) - 1350)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7_data = T.allocate([64], "int32", "global")
Conv2dOutput_7 = T.Buffer(shape=[64], dtype="int32", data=Conv2dOutput_7_data)
for ff_3 in T.serial(0, 64):
Conv2dOutput_7[ff_3] = 0
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
Conv2dOutput_7[ff_3] = (Conv2dOutput_7[ff_3] + (T.cast(PaddedInput_7[(((((T.floordiv(ax0_ax1_fused_ax2_fused_7, 112)*1374) + (ry_2*687)) + (T.floormod(ax0_ax1_fused_ax2_fused_7, 112)*6)) + (rx_2*3)) + rc_7)], "int32")*T.cast(placeholder_66[((((ry_2*1344) + (rx_2*192)) + (rc_7*64)) + ff_3)], "int32")))
for ax3_inner_7 in T.serial(0, 64):
T_cast_21[((ax0_ax1_fused_ax2_fused_7*64) + ax3_inner_7)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_7[ax3_inner_7] + placeholder_67[ax3_inner_7]), 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [177], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
tensor_2_data = T.allocate([200704], "uint8", "global")
tensor_2 = T.Buffer(shape=[200704], dtype="uint8", data=tensor_2_data)
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16")
@T.prim_func
def __tvm_main__(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "__tvm_main__", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_9 = T.allocate([301056], "int8", "global")
sid_8 = T.allocate([802816], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, output, dtype="int32"))
# fmt: on
# fmt: off
@tvm.script.ir_module
class LinearStructurePlanned:
@T.prim_func
def __tvm_main__(input: T.handle, fast_memory_0_var: T.handle("uint8"), slow_memory_1_var: T.handle("uint8"), output: T.handle) -> None:
fast_memory_0_buffer_var = T.match_buffer(fast_memory_0_var, [200704], dtype="uint8", strides=[1], elem_offset=0, align=16)
slow_memory_1_buffer_var = T.match_buffer(slow_memory_1_var, [1418528], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_9_let: T.handle("int8") = T.address_of(slow_memory_1_buffer_var[1117472], dtype="handle")
sid_8_let: T.handle("int8") = T.address_of(slow_memory_1_buffer_var[0], dtype="handle")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9_let, fast_memory_0_buffer_var.data, slow_memory_1_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9_let, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8_let, fast_memory_0_buffer_var.data, slow_memory_1_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8_let, output, fast_memory_0_buffer_var.data, slow_memory_1_buffer_var.data, dtype="int32"))
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle, fast_memory_6_var: T.handle("uint8"), slow_memory_7_var: T.handle("uint8")) -> None:
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8")
T_cast_7 = T.match_buffer(T_cast_6, [177], dtype="int16")
fast_memory_6_buffer_var = T.match_buffer(fast_memory_6_var, [200704], dtype="uint8", strides=[1], elem_offset=0, align=16)
slow_memory_7_buffer_var = T.match_buffer(slow_memory_7_var, [1418528], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
tensor_2_let = T.Buffer([200704], dtype="uint8")
with T.LetStmt(T.address_of(fast_memory_6_buffer_var[0], dtype="handle"), var=tensor_2_let.data):
for ax0_ax1_fused_4, ax2_4 in T.grid(56, 56):
for ax3_init in T.serial(0, 64):
tensor_2_let[ax0_ax1_fused_4 * 3584 + ax2_4 * 64 + ax3_init] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2_let[ax0_ax1_fused_4 * 3584 + ax2_4 * 64 + ax3_2] = T.max(tensor_2_let[ax0_ax1_fused_4 * 3584 + ax2_4 * 64 + ax3_2], T.if_then_else(ax0_ax1_fused_4 * 2 + rv0_rv1_fused_1 // 3 < 112 and ax2_4 * 2 + rv0_rv1_fused_1 % 3 < 112, placeholder_29[ax0_ax1_fused_4 * 14336 + rv0_rv1_fused_1 // 3 * 7168 + ax2_4 * 128 + rv0_rv1_fused_1 % 3 * 64 + ax3_2], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5, ax2_5, ax3_3 in T.grid(56, 56, 64):
T_cast_7[ax0_ax1_fused_5 * 3584 + ax2_5 * 64 + ax3_3] = T.cast(tensor_2_let[ax0_ax1_fused_5 * 3584 + ax2_5 * 64 + ax3_3], "int16")
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle, fast_memory_2_var: T.handle("uint8"), slow_memory_3_var: T.handle("uint8")) -> None:
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8")
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16")
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16")
fast_memory_2_buffer_var = T.match_buffer(fast_memory_2_var, [200704], dtype="uint8", strides=[1], elem_offset=0, align=16)
slow_memory_3_buffer_var = T.match_buffer(slow_memory_3_var, [1418528], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
for ax0_ax1_fused_1, ax2_1, ax3_inner_1 in T.grid(224, 224, 3):
T_subtract_1[ax0_ax1_fused_1 * 672 + ax2_1 * 3 + ax3_inner_1] = T.cast(placeholder_4[ax0_ax1_fused_1 * 672 + ax2_1 * 3 + ax3_inner_1], "int16") - placeholder_5[0]
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle, fast_memory_4_var: T.handle("uint8"), slow_memory_5_var: T.handle("uint8")) -> None:
placeholder_65 = T.match_buffer(placeholder_62, [150528], dtype="int16")
placeholder_66 = T.match_buffer(placeholder_63, [9408], dtype="int16")
placeholder_67 = T.match_buffer(placeholder_64, [64], dtype="int32")
T_cast_21 = T.match_buffer(T_cast_20, [289], dtype="uint8")
fast_memory_4_buffer_var = T.match_buffer(fast_memory_4_var, [200704], dtype="uint8", strides=[1], elem_offset=0, align=16)
slow_memory_5_buffer_var = T.match_buffer(slow_memory_5_var, [1418528], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
PaddedInput_7_let = T.Buffer([157323], "int16")
with T.LetStmt(T.address_of(slow_memory_5_buffer_var[802816], dtype="handle"), var=PaddedInput_7_let.data):
for i0_i1_fused_7, i2_7, i3_7 in T.grid(229, 229, 3):
PaddedInput_7_let[i0_i1_fused_7 * 687 + i2_7 * 3 + i3_7] = T.if_then_else(2 <= i0_i1_fused_7 and i0_i1_fused_7 < 226 and 2 <= i2_7 and i2_7 < 226, placeholder_65[i0_i1_fused_7 * 672 + i2_7 * 3 + i3_7 - 1350], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7_let = T.Buffer([64], "int32")
with T.LetStmt(T.address_of(fast_memory_4_buffer_var[0], dtype="handle"), var=Conv2dOutput_7_let.data):
for ff_3 in T.serial(0, 64):
Conv2dOutput_7_let[ff_3] = 0
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
Conv2dOutput_7_let[ff_3] = Conv2dOutput_7_let[ff_3] + T.cast(PaddedInput_7_let[ax0_ax1_fused_ax2_fused_7 // 112 * 1374 + ry_2 * 687 + ax0_ax1_fused_ax2_fused_7 % 112 * 6 + rx_2 * 3 + rc_7], "int32") * T.cast(placeholder_66[ry_2 * 1344 + rx_2 * 192 + rc_7 * 64 + ff_3], "int32")
for ax3_inner_7 in T.serial(0, 64):
T_cast_21[ax0_ax1_fused_ax2_fused_7 * 64 + ax3_inner_7] = T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_7_let[ax3_inner_7] + placeholder_67[ax3_inner_7], 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8")
# fmt: on
def test_mobilenet_subgraph():
before = LinearStructure
expected = LinearStructurePlanned
target = Target("c")
pools = [
WorkspacePoolInfo(
"fast_memory",
[target],
PoolInfoProperties(size_hint_bytes=200704),
),
WorkspacePoolInfo(
"slow_memory",
[target],
),
]
after = _plan_and_convert(before, pools=pools)
tvm.ir.assert_structural_equal(after, expected)
# fmt: off
@tvm.script.ir_module
class ResnetStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast(placeholder: T.handle, placeholder_1: T.handle, T_cast: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", "tir.noalias": True})
placeholder_2 = T.match_buffer(placeholder, [360000], dtype="uint8")
placeholder_3 = T.match_buffer(placeholder_1, [64], dtype="int32")
T_cast_1 = T.match_buffer(T_cast, [215], dtype="int16")
# body
for ax0_ax1_fused, ax2, ax3_outer, ax3_inner in T.grid(75, 75, 4, 16):
T_cast_1[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(T.cast(placeholder_2[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner], "int32") - 94, 1843157232, 31, 1, dtype="int32") + placeholder_3[ax3_outer * 16 + ax3_inner], 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1(placeholder_10: T.handle, placeholder_11: T.handle, placeholder_12: T.handle, T_cast_4: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", "tir.noalias": True})
placeholder_13 = T.match_buffer(placeholder_10, [360000], dtype="int16")
placeholder_14 = T.match_buffer(placeholder_11, [36864], dtype="int16")
placeholder_15 = T.match_buffer(placeholder_12, [64], dtype="int32")
T_cast_5 = T.match_buffer(T_cast_4, [215], dtype="int16")
# body
PaddedInput_1_data = T.allocate([379456], "int16", "global")
PaddedInput_1 = T.Buffer(shape=[379456], dtype="int16", data=PaddedInput_1_data)
for i0_i1_fused_1, i2_1, i3_1 in T.grid(77, 77, 64):
PaddedInput_1[i0_i1_fused_1 * 4928 + i2_1 * 64 + i3_1] = T.if_then_else(1 <= i0_i1_fused_1 and i0_i1_fused_1 < 76 and 1 <= i2_1 and i2_1 < 76, placeholder_13[i0_i1_fused_1 * 4800 + i2_1 * 64 + i3_1 - 4864], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_1 in T.serial(0, 5625):
Conv2dOutput_1_data = T.allocate([64], "int32", "global")
Conv2dOutput_1 = T.Buffer(shape=[64], dtype="int32", data=Conv2dOutput_1_data)
for ff_1 in T.serial(0, 64):
Conv2dOutput_1[ff_1] = 0
for ry, rx, rc_1 in T.grid(3, 3, 64):
Conv2dOutput_1[ff_1] = Conv2dOutput_1[ff_1] + T.cast(PaddedInput_1[T.floordiv(ax0_ax1_fused_ax2_fused_1, 75) * 4928 + ry * 4928 + rx * 64 + T.floormod(ax0_ax1_fused_ax2_fused_1, 75) * 64 + rc_1], "int32") * T.cast(placeholder_14[ry * 12288 + rx * 4096 + rc_1 * 64 + ff_1], "int32")
for ax3_inner_2 in T.serial(0, 64):
T_cast_5[ax0_ax1_fused_ax2_fused_1 * 64 + ax3_inner_2] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_1[ax3_inner_2] + placeholder_15[ax3_inner_2], 1608879842, 31, -7, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_(placeholder_16: T.handle, placeholder_17: T.handle, placeholder_18: T.handle, T_add: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", "tir.noalias": True})
placeholder_19 = T.match_buffer(placeholder_16, [360000], dtype="int16")
placeholder_20 = T.match_buffer(placeholder_17, [16384], dtype="int16")
placeholder_21 = T.match_buffer(placeholder_18, [256], dtype="int32")
T_add_1 = T.match_buffer(T_add, [407], dtype="int32")
# body
PaddedInput_2_data = T.allocate([360000], "int16", "global")
PaddedInput_2 = T.Buffer(shape=[360000], dtype="int16", data=PaddedInput_2_data)
for i0_i1_fused_2, i2_2, i3_2 in T.grid(75, 75, 64):
PaddedInput_2[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2] = placeholder_19[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2]
for ax0_ax1_fused_ax2_fused_2 in T.serial(0, 5625):
Conv2dOutput_2_data = T.allocate([64], "int32", "global")
Conv2dOutput_2 = T.Buffer(shape=[64], dtype="int32", data=Conv2dOutput_2_data)
for ax3_outer_1 in T.serial(0, 4):
for ff_2 in T.serial(0, 64):
Conv2dOutput_2[ff_2] = 0
for rc_2 in T.serial(0, 64):
Conv2dOutput_2[ff_2] = Conv2dOutput_2[ff_2] + T.cast(PaddedInput_2[ax0_ax1_fused_ax2_fused_2 * 64 + rc_2], "int32") * T.cast(placeholder_20[rc_2 * 256 + ax3_outer_1 * 64 + ff_2], "int32")
for ax3_inner_3 in T.serial(0, 64):
T_add_1[ax0_ax1_fused_ax2_fused_2 * 256 + ax3_outer_1 * 64 + ax3_inner_3] = T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_2[ax3_inner_3] + placeholder_21[ax3_outer_1 * 64 + ax3_inner_3], 1711626602, 31, -8, dtype="int32") + 132, 255), 0), "uint8"), "int32") - 132, 2094289803, 31, -2, dtype="int32") + 136
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_(placeholder_22: T.handle, placeholder_23: T.handle, placeholder_24: T.handle, placeholder_25: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_22, [360000], dtype="int16")
placeholder_27 = T.match_buffer(placeholder_23, [16384], dtype="int16")
placeholder_26 = T.match_buffer(placeholder_24, [256], dtype="int32")
placeholder_28 = T.match_buffer(placeholder_25, [1440000], dtype="int32")
T_cast_7 = T.match_buffer(T_cast_6, [407], dtype="uint8")
# body
PaddedInput_3_data = T.allocate([360000], "int16", "global")
PaddedInput_3 = T.Buffer(shape=[360000], dtype="int16", data=PaddedInput_3_data)
for i0_i1_fused_3, i2_3, i3_3 in T.grid(75, 75, 64):
PaddedInput_3[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3] = placeholder_29[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3]
for ax0_ax1_fused_ax2_fused_3 in T.serial(0, 5625):
Conv2dOutput_3_data = T.allocate([64], "int32", "global")
Conv2dOutput_3 = T.Buffer(shape=[64], dtype="int32", data=Conv2dOutput_3_data)
for ax3_outer_2 in T.serial(0, 4):
for ff_3 in T.serial(0, 64):
Conv2dOutput_3[ff_3] = 0
for rc_3 in T.serial(0, 64):
Conv2dOutput_3[ff_3] = Conv2dOutput_3[ff_3] + T.cast(PaddedInput_3[ax0_ax1_fused_ax2_fused_3 * 64 + rc_3], "int32") * T.cast(placeholder_27[rc_3 * 256 + ax3_outer_2 * 64 + ff_3], "int32")
for ax3_inner_4 in T.serial(0, 64):
T_cast_7[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4] = T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_3[ax3_inner_4] + placeholder_26[ax3_outer_2 * 64 + ax3_inner_4], 1343014664, 31, -8, dtype="int32") + 136, 255), 0), "uint8"), "int32") - 136, 1073903788, 31, 1, dtype="int32") + placeholder_28[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4], 255), 0), "uint8")
@T.prim_func
def __tvm_main__(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "__tvm_main__", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_2 = T.allocate([720000], "int8", "global")
sid_6 = T.allocate([5760000], "int8", "global")
sid_7 = T.allocate([720000], "int8", "global")
sid_8 = T.allocate([720000], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", input, T.lookup_param("p0", dtype="handle"), sid_2, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", sid_2, T.lookup_param("p3", dtype="handle"), T.lookup_param("p4", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", sid_8, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), sid_7, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", sid_7, T.lookup_param("p7", dtype="handle"), T.lookup_param("p8", dtype="handle"), sid_6, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", sid_2, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_6, output, dtype="int32"))
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast(placeholder_4: T.handle, placeholder_5: T.handle, placeholder_6: T.handle, T_cast_2: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", "tir.noalias": True})
placeholder_7 = T.match_buffer(placeholder_4, [360000], dtype="int16")
placeholder_8 = T.match_buffer(placeholder_5, [4096], dtype="int16")
placeholder_9 = T.match_buffer(placeholder_6, [64], dtype="int32")
T_cast_3 = T.match_buffer(T_cast_2, [215], dtype="int16")
# body
PaddedInput_data = T.allocate([360000], "int16", "global")
PaddedInput = T.Buffer([360000], "int16", data=PaddedInput_data)
for i0_i1_fused, i2, i3 in T.grid(75, 75, 64):
PaddedInput[i0_i1_fused * 4800 + i2 * 64 + i3] = placeholder_7[i0_i1_fused * 4800 + i2 * 64 + i3]
for ax0_ax1_fused_ax2_fused in T.serial(0, 5625):
Conv2dOutput_data = T.allocate([64], "int32", "global")
Conv2dOutput = T.Buffer([64], "int32", data=Conv2dOutput_data)
for ff in T.serial(0, 64):
Conv2dOutput[ff] = 0
for rc in T.serial(0, 64):
Conv2dOutput[ff] = Conv2dOutput[ff] + T.cast(PaddedInput[ax0_ax1_fused_ax2_fused * 64 + rc], "int32") * T.cast(placeholder_8[rc * 64 + ff], "int32")
for ax3_inner_1 in T.serial(0, 64):
T_cast_3[ax0_ax1_fused_ax2_fused * 64 + ax3_inner_1] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput[ax3_inner_1] + placeholder_9[ax3_inner_1], 1843106743, 31, -6, dtype="int32"), 255), 0), "uint8"), "int16")
# fmt: on
# fmt: off
@tvm.script.ir_module
class ResnetStructurePlanned:
@T.prim_func
def tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast(placeholder: T.handle, placeholder_1: T.handle, T_cast: T.handle, global_workspace_1_var: T.handle("uint8")) -> None:
placeholder_2 = T.match_buffer(placeholder, [360000], dtype="uint8")
placeholder_3 = T.match_buffer(placeholder_1, [64], dtype="int32")
T_cast_1 = T.match_buffer(T_cast, [215], dtype="int16")
global_workspace_1_buffer_var = T.match_buffer(global_workspace_1_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
for ax0_ax1_fused, ax2, ax3_outer, ax3_inner in T.grid(75, 75, 4, 16):
T_cast_1[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(T.cast(placeholder_2[ax0_ax1_fused * 4800 + ax2 * 64 + ax3_outer * 16 + ax3_inner], "int32") - 94, 1843157232, 31, 1, dtype="int32") + placeholder_3[ax3_outer * 16 + ax3_inner], 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_(placeholder_22: T.handle, placeholder_23: T.handle, placeholder_24: T.handle, placeholder_25: T.handle, T_cast_6: T.handle, global_workspace_5_var: T.handle("uint8")) -> None:
placeholder_29 = T.match_buffer(placeholder_22, [360000], dtype="int16")
placeholder_27 = T.match_buffer(placeholder_23, [16384], dtype="int16")
placeholder_26 = T.match_buffer(placeholder_24, [256], dtype="int32")
placeholder_28 = T.match_buffer(placeholder_25, [1440000], dtype="int32")
T_cast_7 = T.match_buffer(T_cast_6, [407], dtype="uint8")
global_workspace_5_buffer_var = T.match_buffer(global_workspace_5_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
PaddedInput_3_let = T.Buffer([360000], 'int16')
with T.LetStmt(T.address_of(global_workspace_5_buffer_var[6480000], dtype="handle"), var=PaddedInput_3_let.data):
for i0_i1_fused_3, i2_3, i3_3 in T.grid(75, 75, 64):
PaddedInput_3_let[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3] = placeholder_29[i0_i1_fused_3 * 4800 + i2_3 * 64 + i3_3]
for ax0_ax1_fused_ax2_fused_3 in T.serial(0, 5625):
Conv2dOutput_3_let = T.Buffer([64], 'int32')
with T.LetStmt(T.address_of(global_workspace_5_buffer_var[7200000], dtype="handle"), var=Conv2dOutput_3_let.data):
for ax3_outer_2 in T.serial(0, 4):
for ff_3 in T.serial(0, 64):
Conv2dOutput_3_let[ff_3] = 0
for rc_3 in T.serial(0, 64):
Conv2dOutput_3_let[ff_3] = Conv2dOutput_3_let[ff_3] + T.cast(PaddedInput_3_let[ax0_ax1_fused_ax2_fused_3 * 64 + rc_3], "int32") * T.cast(placeholder_27[rc_3 * 256 + ax3_outer_2 * 64 + ff_3], "int32")
for ax3_inner_4 in T.serial(0, 64):
T_cast_7[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4] = T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_3_let[ax3_inner_4] + placeholder_26[ax3_outer_2 * 64 + ax3_inner_4], 1343014664, 31, -8, dtype="int32") + 136, 255), 0), "uint8"), "int32") - 136, 1073903788, 31, 1, dtype="int32") + placeholder_28[ax0_ax1_fused_ax2_fused_3 * 256 + ax3_outer_2 * 64 + ax3_inner_4], 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_(placeholder_16: T.handle, placeholder_17: T.handle, placeholder_18: T.handle, T_add: T.handle, global_workspace_4_var: T.handle("uint8")) -> None:
placeholder_19 = T.match_buffer(placeholder_16, [360000], dtype="int16")
placeholder_20 = T.match_buffer(placeholder_17, [16384], dtype="int16")
placeholder_21 = T.match_buffer(placeholder_18, [256], dtype="int32")
T_add_1 = T.match_buffer(T_add, [407], dtype="int32")
global_workspace_4_buffer_var = T.match_buffer(global_workspace_4_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
PaddedInput_2_let = T.Buffer([360000], "int16")
with T.LetStmt(T.address_of(global_workspace_4_buffer_var[7200000], dtype="handle"), var=PaddedInput_2_let.data):
for i0_i1_fused_2, i2_2, i3_2 in T.grid(75, 75, 64):
PaddedInput_2_let[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2] = placeholder_19[i0_i1_fused_2 * 4800 + i2_2 * 64 + i3_2]
for ax0_ax1_fused_ax2_fused_2 in T.serial(0, 5625):
Conv2dOutput_2_let = T.Buffer([64], 'int32')
with T.LetStmt(T.address_of(global_workspace_4_buffer_var[7920000], dtype="handle"), var=Conv2dOutput_2_let.data):
for ax3_outer_1 in T.serial(0, 4):
for ff_2 in T.serial(0, 64):
Conv2dOutput_2_let[ff_2] = 0
for rc_2 in T.serial(0, 64):
Conv2dOutput_2_let[ff_2] = Conv2dOutput_2_let[ff_2] + T.cast(PaddedInput_2_let[ax0_ax1_fused_ax2_fused_2 * 64 + rc_2], "int32") * T.cast(placeholder_20[rc_2 * 256 + ax3_outer_1 * 64 + ff_2], "int32")
for ax3_inner_3 in T.serial(0, 64):
T_add_1[ax0_ax1_fused_ax2_fused_2 * 256 + ax3_outer_1 * 64 + ax3_inner_3] = T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_2_let[ax3_inner_3] + placeholder_21[ax3_outer_1 * 64 + ax3_inner_3], 1711626602, 31, -8, dtype="int32") + 132, 255), 0), "uint8"), "int32") - 132, 2094289803, 31, -2, dtype="int32") + 136
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast(placeholder_4: T.handle, placeholder_5: T.handle, placeholder_6: T.handle, T_cast_2: T.handle, global_workspace_2_var: T.handle("uint8")) -> None:
placeholder_7 = T.match_buffer(placeholder_4, [360000], dtype="int16")
placeholder_8 = T.match_buffer(placeholder_5, [4096], dtype="int16")
placeholder_9 = T.match_buffer(placeholder_6, [64], dtype="int32")
T_cast_3 = T.match_buffer(T_cast_2, [215], dtype="int16")
global_workspace_2_buffer_var = T.match_buffer(global_workspace_2_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
PaddedInput_let = T.Buffer([360000], "int16")
with T.LetStmt(T.address_of(global_workspace_2_buffer_var[7200000], dtype="handle"), var=PaddedInput_let.data):
for i0_i1_fused, i2, i3 in T.grid(75, 75, 64):
PaddedInput_let[i0_i1_fused * 4800 + i2 * 64 + i3] = placeholder_7[i0_i1_fused * 4800 + i2 * 64 + i3]
for ax0_ax1_fused_ax2_fused in T.serial(0, 5625):
Conv2dOutput_let = T.Buffer([64], "int32")
with T.LetStmt(T.address_of(global_workspace_2_buffer_var[7920000], dtype="handle"), var=Conv2dOutput_let.data):
for ff in T.serial(0, 64):
Conv2dOutput_let[ff] = 0
for rc in T.serial(0, 64):
Conv2dOutput_let[ff] = Conv2dOutput_let[ff] + T.cast(PaddedInput_let[ax0_ax1_fused_ax2_fused * 64 + rc], "int32") * T.cast(placeholder_8[rc * 64 + ff], "int32")
for ax3_inner_1 in T.serial(0, 64):
T_cast_3[ax0_ax1_fused_ax2_fused * 64 + ax3_inner_1] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_let[ax3_inner_1] + placeholder_9[ax3_inner_1], 1843106743, 31, -6, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1(placeholder_10: T.handle, placeholder_11: T.handle, placeholder_12: T.handle, T_cast_4: T.handle, global_workspace_3_var: T.handle("uint8")) -> None:
placeholder_13 = T.match_buffer(placeholder_10, [360000], dtype="int16")
placeholder_14 = T.match_buffer(placeholder_11, [36864], dtype="int16")
placeholder_15 = T.match_buffer(placeholder_12, [64], dtype="int32")
T_cast_5 = T.match_buffer(T_cast_4, [215], dtype="int16")
global_workspace_3_buffer_var = T.match_buffer(global_workspace_3_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
PaddedInput_1_let = T.Buffer([379456], "int16")
with T.LetStmt(T.address_of(global_workspace_3_buffer_var[0], dtype="handle"), var=PaddedInput_1_let.data):
for i0_i1_fused_1, i2_1, i3_1 in T.grid(77, 77, 64):
PaddedInput_1_let[i0_i1_fused_1 * 4928 + i2_1 * 64 + i3_1] = T.if_then_else(1 <= i0_i1_fused_1 and i0_i1_fused_1 < 76 and 1 <= i2_1 and i2_1 < 76, placeholder_13[i0_i1_fused_1 * 4800 + i2_1 * 64 + i3_1 - 4864], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_1 in T.serial(0, 5625):
Conv2dOutput_1_let = T.Buffer([64], "int32")
with T.LetStmt(T.address_of(global_workspace_3_buffer_var[7200000], dtype="handle"), var=Conv2dOutput_1_let.data):
for ff_1 in T.serial(0, 64):
Conv2dOutput_1_let[ff_1] = 0
for ry, rx, rc_1 in T.grid(3, 3, 64):
Conv2dOutput_1_let[ff_1] = Conv2dOutput_1_let[ff_1] + T.cast(PaddedInput_1_let[ax0_ax1_fused_ax2_fused_1 // 75 * 4928 + ry * 4928 + rx * 64 + ax0_ax1_fused_ax2_fused_1 % 75 * 64 + rc_1], "int32") * T.cast(placeholder_14[ry * 12288 + rx * 4096 + rc_1 * 64 + ff_1], "int32")
for ax3_inner_2 in T.serial(0, 64):
T_cast_5[ax0_ax1_fused_ax2_fused_1 * 64 + ax3_inner_2] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift(Conv2dOutput_1_let[ax3_inner_2] + placeholder_15[ax3_inner_2], 1608879842, 31, -7, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def __tvm_main__(input: T.handle, global_workspace_0_var: T.handle("uint8"), output: T.handle) -> None:
global_workspace_0_buffer_var = T.match_buffer(global_workspace_0_var, [7920256], dtype="uint8", strides=[1], elem_offset=0, align=16)
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_2_let: T.handle("int8") = T.address_of(global_workspace_0_buffer_var[5760000], dtype="handle")
sid_6_let: T.handle("int8") = T.address_of(global_workspace_0_buffer_var[0], dtype="handle")
sid_7_let: T.handle("int8") = T.address_of(global_workspace_0_buffer_var[6480000], dtype="handle")
sid_8_let: T.handle("int8") = T.address_of(global_workspace_0_buffer_var[6480000], dtype="handle")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract_fixed_point_multiply_add_clip_cast_cast", input, T.lookup_param("p0", dtype="handle"), sid_2_let, global_workspace_0_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", sid_2_let, T.lookup_param("p3", dtype="handle"), T.lookup_param("p4", dtype="handle"), sid_8_let, global_workspace_0_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", sid_8_let, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), sid_7_let, global_workspace_0_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_15934180698220515269_", sid_7_let, T.lookup_param("p7", dtype="handle"), T.lookup_param("p8", dtype="handle"), sid_6_let, global_workspace_0_buffer_var.data, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_add_clip_cast_cast_subtract_fixed_point_4200876283395191415_", sid_2_let, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_6_let, output, global_workspace_0_buffer_var.data, dtype="int32"))
__tvm_meta__ = None
# fmt: on
def test_resnet_subgraph():
before = ResnetStructure
expected = ResnetStructurePlanned
after = _plan_and_convert(before)
tvm.ir.assert_structural_equal(after, expected)
@tvm.script.ir_module
class TensorIntrinStructure:
@T.prim_func
def tensor_intrin_primfunc() -> None:
dense_data = T.allocate([10], "int32", "global")
T.evaluate(
T.call_extern(
"intrin_function",
T.tvm_access_ptr(
T.type_annotation(dtype="int32"), dense_data, 0, 1, 2, dtype="handle"
),
dtype="int32",
)
)
dense = T.Buffer([10], "int32", data=dense_data)
dense[0] = T.q_multiply_shift(dense[0], 1608879842, 31, -7, dtype="int32")
@T.prim_func
def __tvm_main__(input: T.handle, output: T.handle) -> None:
T.evaluate(T.call_extern("tensor_intrin_primfunc", dtype="int32"))
@tvm.script.ir_module
class TensorIntrinStructurePlanned:
@T.prim_func
def tensor_intrin_primfunc(global_workspace_1_var: T.handle("uint8")) -> None:
global_workspace_1_buffer_var = T.match_buffer(
global_workspace_1_var, [40], dtype="uint8", strides=[1], elem_offset=0, align=16
)
dense_let = T.Buffer([10], "int32")
with T.LetStmt(
T.address_of(global_workspace_1_buffer_var[0], dtype="handle"), var=dense_let.data
):
T.evaluate(
T.call_extern(
"intrin_function",
T.tvm_access_ptr(
T.type_annotation(dtype="int32"), dense_let.data, 0, 1, 2, dtype="handle"
),
dtype="int32",
)
)
dense_let[0] = T.q_multiply_shift(dense_let[0], 1608879842, 31, -7, dtype="int32")
@T.prim_func
def __tvm_main__(
input: T.handle, global_workspace_1_var: T.handle("uint8"), output: T.handle
) -> None:
global_workspace_1_buffer_var = T.match_buffer(
global_workspace_1_var, [40], dtype="uint8", strides=[1], elem_offset=0, align=16
)
T.evaluate(
T.call_extern(
"tensor_intrin_primfunc", global_workspace_1_buffer_var.data, dtype="int32"
)
)
def test_tensor_intrin():
before = TensorIntrinStructure
after = _plan_and_convert(before)
expected = TensorIntrinStructurePlanned
tvm.ir.assert_structural_equal(after, expected)
class TestMergeAllocations(tvm.testing.CompareBeforeAfter):
def transform(self):
return _plan_and_convert
def before(self):
@I.ir_module
class mod:
@T.prim_func
def __tvm_main__(A: T.Buffer(256, "int8"), D: T.Buffer(256, "int8")):
B = T.allocate([256], "int8")
T.call_extern("subroutine", A.data, B, dtype="int32")
C = T.allocate([256], "int8")
T.call_extern("subroutine", B, C, dtype="int32")
T.call_extern("subroutine", C, D.data, dtype="int32")
@T.prim_func
def subroutine(A: T.Buffer(256, "int8"), B: T.Buffer(256, "int8")):
for i in range(256):
B[i] = A[i]
return mod
def expected(self):
@I.ir_module
class mod:
@T.prim_func
def __tvm_main__(
A: T.Buffer(256, "int8"),
D: T.Buffer(256, "int8"),
workspace_var: T.handle("uint8"),
):
workspace = T.match_buffer(workspace_var, 512, "uint8", strides=[1], align=16)
B: T.handle("int8") = T.address_of(workspace[256])
T.call_extern("subroutine", A.data, B, workspace.data, dtype="int32")
C: T.handle("int8") = T.address_of(workspace[0])
T.call_extern("subroutine", B, C, workspace.data, dtype="int32")
T.call_extern("subroutine", C, D.data, workspace.data, dtype="int32")
@T.prim_func
def subroutine(
A: T.Buffer(256, "int8"),
B: T.Buffer(256, "int8"),
workspace_var: T.handle("uint8"),
):
workspace = T.match_buffer(workspace_var, 512, "uint8", strides=[1], align=16)
for i in range(256):
B[i] = A[i]
return mod
class TestMergeAllocationsWithDeclBuffer(tvm.testing.CompareBeforeAfter):
"""Like TestMergeAllocations, but using T.decl_buffer"""
def transform(self):
return _plan_and_convert
def before(self):
@I.ir_module
class mod:
@T.prim_func
def __tvm_main__(A: T.Buffer(256, "int8"), D: T.Buffer(256, "int8")):
B = T.decl_buffer([256], "int8")
T.call_extern("subroutine", A.data, B.data, dtype="int32")
C = T.decl_buffer([256], "int8")
T.call_extern("subroutine", B.data, C.data, dtype="int32")
T.call_extern("subroutine", C.data, D.data, dtype="int32")
@T.prim_func
def subroutine(A: T.Buffer(256, "int8"), B: T.Buffer(256, "int8")):
for i in range(256):
B[i] = A[i]
return mod
def expected(self):
@I.ir_module
class mod:
@T.prim_func
def __tvm_main__(
A: T.Buffer(256, "int8"),
D: T.Buffer(256, "int8"),
workspace_var: T.handle("uint8"),
):
workspace = T.match_buffer(workspace_var, 512, "uint8", strides=[1], align=16)
B_data: T.handle("int8") = T.address_of(workspace[256])
B = T.decl_buffer(256, "int8", data=B_data)
T.call_extern("subroutine", A.data, B.data, workspace.data, dtype="int32")
C_data: T.handle("int8") = T.address_of(workspace[0])
C = T.decl_buffer(256, "int8", data=C_data)
T.call_extern("subroutine", B.data, C.data, workspace.data, dtype="int32")
T.call_extern("subroutine", C.data, D.data, workspace.data, dtype="int32")
@T.prim_func
def subroutine(
A: T.Buffer(256, "int8"),
B: T.Buffer(256, "int8"),
workspace_var: T.handle("uint8"),
):
workspace = T.match_buffer(workspace_var, 512, "uint8", strides=[1], align=16)
for i in range(256):
B[i] = A[i]
return mod
if __name__ == "__main__":
tvm.testing.main()
| 46,486 | 65.695839 | 484 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_printer_annotation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
import pytest
from tvm.runtime import ObjectPath
from tvm.script import tir as T
@T.prim_func
def _func():
T.evaluate(-1)
T.evaluate(1)
T.evaluate(2)
T.evaluate(3)
T.evaluate(4)
T.evaluate(5)
T.evaluate(6)
T.evaluate(7)
def test_annotation_multi_object_paths():
result = _func.script(
path_to_annotate={
ObjectPath.root().attr("body").attr("seq").array_index(1): "annotation 1",
ObjectPath.root().attr("body").attr("seq").array_index(3): "annotation 3",
ObjectPath.root().attr("body").attr("seq").array_index(5): "annotation 5",
ObjectPath.root().attr("body").attr("seq").array_index(7): "annotation 7",
}
)
assert (
result
== """# from tvm.script import tir as T
@T.prim_func
def main():
T.evaluate(-1)
T.evaluate(1) # annotation 1
T.evaluate(2)
T.evaluate(3) # annotation 3
T.evaluate(4)
T.evaluate(5) # annotation 5
T.evaluate(6)
T.evaluate(7) # annotation 7"""
)
def test_annotate_from_multi_obj():
result = _func.script(
obj_to_annotate={
_func.body.seq[1]: "annotation 1",
_func.body.seq[3]: "annotation 3",
_func.body.seq[5]: "annotation 5",
_func.body.seq[7]: "annotation 7",
}
)
assert (
result
== """# from tvm.script import tir as T
@T.prim_func
def main():
T.evaluate(-1)
T.evaluate(1) # annotation 1
T.evaluate(2)
T.evaluate(3) # annotation 3
T.evaluate(4)
T.evaluate(5) # annotation 5
T.evaluate(6)
T.evaluate(7) # annotation 7"""
)
| 2,466 | 27.356322 | 86 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_hoist_if.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
import numpy as np
import pytest
from tvm.testing import enabled_targets
var_list = []
def verify_structure(stmt, expected_struct):
node_dict = {}
struct = {}
def _extract_vars(op):
global var_list
if isinstance(op, tvm.tir.Var):
var_list.append(op.name)
def _visit(op):
key = op
if isinstance(op, tvm.tir.IfThenElse):
global var_list
tvm.tir.stmt_functor.post_order_visit(op.condition, _extract_vars)
val = [(op.then_case, op.else_case), ("tir.IfThenElse", tuple(var_list))]
var_list.clear()
elif isinstance(op, tvm.tir.For):
val = [(op.body,), ("tir.For", op.loop_var.name)]
elif isinstance(op, tvm.tir.AttrStmt):
val = [(op.body,), ("tir.AttrStmt", op.attr_key, int(op.value))]
else:
return
node_dict[key] = val
tvm.tir.stmt_functor.post_order_visit(stmt, _visit)
for key, val in node_dict.items():
struct[val[1]] = tuple(
node_dict[child][1] if child in node_dict else None for child in val[0]
)
assert struct == expected_struct, "Structure mismatch: expect %s but got %s" % (
expected_struct,
struct,
)
var_list.clear()
def _opaque_eval(var):
return tvm.tir.Evaluate(tvm.tir.call_extern("int32", "dummy", var))
def test_hoist_top_for():
ib = tvm.tir.ir_builder.create()
l = te.var("l")
m = te.var("m")
n = te.var("n")
data = ib.pointer("float32", name="data")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(ib.likely(i < 2)):
ib.emit(_opaque_eval(m))
with ib.else_scope():
ib.emit(_opaque_eval(n))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.For", "j"): (("tir.For", "k"),),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), ("tir.For", "j")),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
}
verify_structure(new_stmt, expected_struct)
def test_hoist_multi_var_if():
ib = tvm.tir.ir_builder.create()
l = te.var("l")
m = te.var("m")
n = te.var("n")
data = ib.pointer("float32", name="data")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(ib.likely(i + j < 2)):
ib.emit(_opaque_eval(m))
with ib.else_scope():
ib.emit(_opaque_eval(n))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_mod = tvm.tir.transform.HoistIfThenElse()(mod)
new_stmt = new_mod["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.IfThenElse", ("i", "j")): (("tir.For", "k"), ("tir.For", "k")),
("tir.For", "j"): (("tir.IfThenElse", ("i", "j")),),
("tir.For", "i"): (("tir.For", "j"),),
}
verify_structure(new_stmt, expected_struct)
def test_hoist_no_match_for():
ib = tvm.tir.ir_builder.create()
l = te.var("l")
m = te.var("m")
n = te.var("n")
data = ib.pointer("float32", name="data")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
data[i * 3 + j] = data[i * 3 + j] + 0.5
with ib.for_range(0, n, "k") as k:
with ib.if_scope(ib.likely(i < 2)):
ib.emit(_opaque_eval(m))
with ib.else_scope():
ib.emit(_opaque_eval(n))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.IfThenElse", ("i",)): (("tir.For", "k"), ("tir.For", "k")),
("tir.For", "j"): (None,),
("tir.For", "i"): (("tir.For", "j"),),
}
verify_structure(new_stmt, expected_struct)
def test_no_else():
ib = tvm.tir.ir_builder.create()
l = te.var("l")
m = te.var("m")
n = te.var("n")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(ib.likely(i < 2)):
ib.emit(_opaque_eval(m))
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.For", "j"): (("tir.For", "k"),),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), None),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
}
verify_structure(new_stmt, expected_struct)
def test_attr_stmt():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(tvm.tir.any(i < 4, j >= 8)):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.5
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.0
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.IfThenElse", ("i", "j")): (("tir.For", "k"), ("tir.For", "k")),
("tir.For", "j"): (("tir.IfThenElse", ("i", "j")),),
("tir.For", "i"): (("tir.For", "j"),),
("tir.AttrStmt", "thread_extent", 64): (("tir.For", "i"),),
("tir.AttrStmt", "thread_extent", 32): (("tir.AttrStmt", "thread_extent", 64),),
}
verify_structure(new_stmt, expected_struct)
def test_nested_for():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
with ib.for_range(0, 5, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.if_scope(i >= 3):
data[i * 3 + j] = data[i * 3 + j] + 0.5
with ib.for_range(0, 15, "k") as k:
with ib.for_range(0, 20, "l") as l:
with ib.if_scope(tvm.tir.any(i < 4, j >= 8)):
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] * 2
with ib.else_scope():
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] * 1.5
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.For", "l"): (None,),
("tir.For", "k"): (("tir.For", "l"),),
("tir.IfThenElse", ("i", "j")): (("tir.For", "k"), ("tir.For", "k")),
("tir.For", "j"): (None,),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), None),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
}
verify_structure(new_stmt, expected_struct)
def test_if_block():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
n = te.var("n")
with ib.for_range(0, 5, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.if_scope(i >= 3):
data[i * 3 + j] = data[i * 3 + j] + 0.5
with ib.for_range(0, 15, "k") as k:
with ib.for_range(0, 20, "l") as l:
with ib.if_scope(tvm.tir.any(i < 4, j >= 8)):
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] * 2
with ib.else_scope():
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] * 1.5
with ib.if_scope(j < 5):
data[i * 3 + j + k + l] = data[i * 3 + j + k + l] - 1
with ib.for_range(0, 5, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.for_range(0, 15, "k") as k:
with ib.if_scope(n >= 3):
data[i * 3 + j + k] = data[i * 3 + j + k] + 0.6
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
expected_struct = {
("tir.IfThenElse", ("i", "j")): (None, None),
("tir.IfThenElse", ("j",)): (None, None),
("tir.For", "l"): (None,),
("tir.For", "k"): (None,),
("tir.For", "j"): (("tir.For", "j"),),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), None),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
("tir.IfThenElse", ("n",)): (("tir.For", "j"), None),
}
verify_structure(new_stmt, expected_struct)
def test_multi_if():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
with ib.for_range(0, 10, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.for_range(0, 10, "k") as k:
with ib.if_scope(3 <= i):
with ib.if_scope(3 <= j):
data[i * 100 + j * 10 + k] = data[i * 100 + j * 10 + k] + 0.5
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_mod = tvm.tir.transform.HoistIfThenElse()(mod)
new_stmt = new_mod["main"].body
expected_struct = {
("tir.For", "k"): (None,),
("tir.IfThenElse", ("j",)): (("tir.For", "k"), None),
("tir.For", "j"): (("tir.IfThenElse", ("j",)),),
("tir.IfThenElse", ("i",)): (("tir.For", "j"), None),
("tir.For", "i"): (("tir.IfThenElse", ("i",)),),
}
verify_structure(new_stmt, expected_struct)
def test_no_hoisting_1():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
n = te.var("n")
with ib.for_range(0, 10, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.for_range(0, 10, "k") as k:
with ib.if_scope(k <= 3):
data[i * 100 + j * 10 + k] = data[i * 100 + j * 10 + k] + 0.5
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_no_hoisting_2():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data")
n = te.var("n")
x = te.var("x")
with ib.for_range(0, 10, "i") as i:
with ib.for_range(0, 10, "j") as j:
with ib.for_range(0, 10, "k") as k:
with ib.if_scope(i <= 3):
data[i * 100 + j * 10 + k] = data[i * 100 + j * 10 + k] + 0.3
data[i * 100 + j * 10 + k] = data[i * 100 + j * 10 + k] + 0.5
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
@pytest.mark.xfail(reason="Inconsistent thread_extent", strict=True)
def test_no_hoisting_3():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
ib.scope_attr(tx, "thread_extent", dshape_inner[0])
ib.scope_attr(bx, "thread_extent", dshape_inner[1])
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_no_hoisting_4():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
ib.scope_attr(tx, "thread_extent", dshape_inner[0])
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
@pytest.mark.xfail(reason="Inconsistent thread_extent", strict=True)
def test_no_hoisting_5():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
ib.scope_attr(bx, "thread_extent", dshape_inner[1])
with ib.for_range(0, n, "k") as k:
ib.scope_attr(tx, "thread_extent", dshape_inner[0])
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_no_hoisting_6():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope((tx + k) < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_no_hoisting_7():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.if_scope((tx + j) < 9):
with ib.for_range(0, n, "k") as k:
with ib.if_scope((tx + k) < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_1():
n = te.size_var("n")
m = te.size_var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "k")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
s = te.create_schedule(B.op)
ko, ki = s[B].split(B.op.reduce_axis[0], factor=16)
BF = s.rfactor(B, ki)
xo, xi = s[B].split(s[B].op.axis[0], factor=32)
s[B.op].bind(xo, te.thread_axis("blockIdx.x"))
s[B.op].bind(xi, te.thread_axis("threadIdx.y"))
s[B].bind(s[B].op.reduce_axis[0], te.thread_axis("threadIdx.x"))
s[BF].compute_at(s[B], s[B].op.reduce_axis[0])
mod = tvm.driver.build_module.schedule_to_module(s, [A, B], "main", None)
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
stmt = mod["main"].body
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_2():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
# ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
mod = tvm.tir.transform.Simplify()(mod)
mod = tvm.tir.transform.RemoveNoOp()(mod)
stmt = mod["main"].body
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
@pytest.mark.xfail(reason="Inconsistent thread_extent", strict=True)
def test_hoisting_block_scope_3():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
dshape_inner = (33, 63)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
ib.scope_attr(tx, "thread_extent", dshape_inner[0])
ib.scope_attr(bx, "thread_extent", dshape_inner[1])
with ib.for_range(0, n, "k") as k:
with ib.if_scope(tx < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_4():
nn = 1024
n = tvm.runtime.convert(nn)
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
AA = te.compute((n,), lambda *i: A(*i), name="A")
BB = te.compute((n,), lambda *i: B(*i), name="B")
T = te.compute(A.shape, lambda *i: AA(*i) + BB(*i), name="T")
C = te.compute(A.shape, lambda *i: T(*i), name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=4)
xo1, xo2 = s[C].split(xo, factor=13)
s[C].parallel(xo2)
s[C].pragma(xo1, "parallel_launch_point")
s[C].pragma(xo2, "parallel_stride_pattern")
s[C].pragma(xo2, "parallel_barrier_when_finish")
s[C].vectorize(xi)
mod = tvm.driver.build_module.schedule_to_module(s, [A, B, C], "main", None)
mod = tvm.tir.transform.Simplify()(mod)
stmt = mod["main"].body
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_5():
ib = tvm.tir.ir_builder.create()
data = ib.pointer("float32", name="data", scope="global")
l = te.var("l")
m = te.var("m")
n = te.var("n")
g = te.var("g")
ib.scope_attr(data, "storage_scope", "global")
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope(data[g] < 3):
data[9 * j + 3 * j * k] = data[9 * j + 3 * j * k] + 0.3
with ib.else_scope():
data[9 * j + 3 * j * k] = data[9 * j + 3 * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
stmt = new_stmt
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_6():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope((tx + n) < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
def test_hoisting_block_scope_7():
ib = tvm.tir.ir_builder.create()
dshape = (32, 64)
data = ib.pointer("float32", name="data")
l = te.var("l")
m = te.var("m")
n = te.var("n")
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
ib.scope_attr(tx, "thread_extent", dshape[0])
ib.scope_attr(bx, "thread_extent", dshape[1])
with ib.for_range(0, l, "i") as i:
with ib.for_range(0, m, "j") as j:
with ib.for_range(0, n, "k") as k:
with ib.if_scope((tx + i) < 3):
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 0.3
with ib.else_scope():
data[bx * j + tx * j * k] = data[bx * j + tx * j * k] + 1.3
stmt = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt))
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
tvm.ir.assert_structural_equal(new_stmt, stmt)
with tvm.transform.PassContext(
config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
new_stmt = tvm.tir.transform.HoistIfThenElse()(mod)["main"].body
assert not tvm.ir.structural_equal(new_stmt, stmt)
@pytest.mark.skip()
def test_hoisting_op_conv():
dtype = "float32"
dshape = (1, 80, 73, 73)
kshape = (192, 80, 3, 3)
padding = (1, 1)
groups = 1
dilation = (1, 1)
kernel_size = (3, 3)
channels = 192
scale = 1
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(
x,
w,
padding=padding,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
)
func = relay.Function([x, w], y)
mod = tvm.IRModule()
mod["main"] = func
mod = relay.transform.InferType()(mod)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
params = {"w": tvm.nd.array(kernel)}
for target, dev in enabled_targets():
with tvm.transform.PassContext(opt_level=3):
lib = relay.build_module.build(mod, target=target, params=params)
m = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
x = np.random.uniform(size=dshape)
data_tvm = tvm.nd.array(data)
m.set_input("x", data_tvm)
m.run()
e = m.module.time_evaluator("run", dev, number=300, repeat=3)
t1 = e(data_tvm).results
t1 = np.array(t1) * 1000
print("{} ms".format(t1.mean()))
with tvm.transform.PassContext(
opt_level=3, config={"tir.HoistIfThenElse": {"support_block_scope_hosting": True}}
):
lib = relay.build_module.build(mod, target=target, params=params)
m = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
x = np.random.uniform(size=dshape)
data_tvm = tvm.nd.array(data)
m.set_input("x", data_tvm)
m.set_input(**params)
m.run()
e = m.module.time_evaluator("run", dev, number=300, repeat=3)
t2 = e(data_tvm).results
t2 = np.array(t2) * 1000
print("{} ms".format(t2.mean()))
tvm.testing.assert_allclose(t1.mean(), t2.mean(), atol=1, rtol=1e-1)
if __name__ == "__main__":
tvm.testing.main()
| 30,297 | 36.266913 | 94 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_space_cpu_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for MetaSchedule search space on CPU"""
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation import (
check_sketches,
generate_design_space,
)
from tvm.meta_schedule.testing.te_workload import create_te_workload
from tvm.script import tir as T
from tvm.target import Target
def _target():
return Target("aws/cpu/c5.9xlarge")
def _design_space(mod):
return generate_design_space(
kind="llvm",
mod=mod,
target=_target(),
types=ms.ScheduleRule,
)
def test_cpu_nhwc():
# fmt: off
@T.prim_func
def cpu_nhwc_0(X: T.Buffer((1, 14, 14, 128), "float32"), W: T.Buffer((6, 6, 128, 128), "float32"), conv2d_winograd: T.Buffer((1, 12, 12, 128), "float32")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
# body
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":64, "meta_schedule.vectorize":64})
data_pad = T.alloc_buffer([1, 16, 16, 128], dtype="float32")
input_tile = T.alloc_buffer([6, 6, 9, 128], dtype="float32")
data_pack = T.alloc_buffer([6, 6, 9, 128], dtype="float32")
bgemm = T.alloc_buffer([6, 6, 9, 128], dtype="float32")
inverse = T.alloc_buffer([4, 4, 9, 128], dtype="float32")
bgemm_global = T.alloc_buffer([6, 6, 9, 128], dtype="float32")
for i2_0 in T.serial(9):
for ax0, ax1, ax2, ax3 in T.grid(1, 6, 6, 128):
with T.block("data_pad"):
i0 = T.axis.spatial(1, ax0)
i1 = T.axis.spatial(16, i2_0 // 3 * 4 + ax1)
i2 = T.axis.spatial(16, i2_0 % 3 * 4 + ax2)
i3 = T.axis.spatial(128, ax3)
T.reads(X[i0, i1, i2, i3])
T.writes(data_pad[i0, i1, i2, i3])
T.block_attr({"schedule_rule":"None"})
data_pad[i0, i1, i2, i3] = T.if_then_else(0 <= i1 and i1 < 14 and 0 <= i2 and i2 < 14, X[i0, i1, i2, i3], T.float32(0), dtype="float32")
for i3_0 in T.serial(2):
for ax0, ax1, ax2, ax3 in T.grid(6, 6, 1, 64):
with T.block("input_tile"):
eps, nu = T.axis.remap("SS", [ax0, ax1])
p = T.axis.spatial(9, i2_0 + ax2)
ci = T.axis.spatial(128, i3_0 * 64 + ax3)
T.reads(data_pad[p // 9, p % 9 // 3 * 4 + eps, p % 3 * 4 + nu, ci])
T.writes(input_tile[eps, nu, p, ci])
T.block_attr({"schedule_rule":"None"})
input_tile[eps, nu, p, ci] = data_pad[p // 9, p % 9 // 3 * 4 + eps, p % 3 * 4 + nu, ci]
for i2_1, i3_1 in T.grid(1, 64):
for i0 in T.unroll(6):
for i1 in T.unroll(6):
for i4 in T.unroll(6):
for i5 in T.unroll(6):
with T.block("data_pack"):
eps, nu = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(9, i2_0 + i2_1)
ci = T.axis.spatial(128, i3_0 * 64 + i3_1)
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.reads(input_tile[r_a, r_b, p, ci])
T.writes(data_pack[eps, nu, p, ci])
T.block_attr({"schedule_rule":"conv2d_nhwc_winograd_data_pack"})
with T.init():
data_pack[eps, nu, p, ci] = T.float32(0)
data_pack[eps, nu, p, ci] = data_pack[eps, nu, p, ci] + input_tile[r_a, r_b, p, ci] * T.Select(r_a % 6 == 5 and eps % 6 == 5, T.float32(1), T.Select(r_a % 6 == 5 and eps % 6 == 4, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 3, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 2, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 1, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 0, T.float32(0), T.Select(r_a % 6 == 4 and eps % 6 == 5, T.float32(1.5), T.Select(r_a % 6 == 4 and eps % 6 == 4, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 3, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 2, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 1, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 0, T.float32(1), T.Select(r_a % 6 == 3 and eps % 6 == 5, T.float32(-2), T.Select(r_a % 6 == 3 and eps % 6 == 4, T.float32(-0.5), T.Select(r_a % 6 == 3 and eps % 6 == 3, T.float32(2), T.Select(r_a % 6 == 3 and eps % 6 == 2, T.float32(2.5), T.Select(r_a % 6 == 3 and eps % 6 == 1, T.float32(0.5), T.Select(r_a % 6 == 3 and eps % 6 == 0, T.float32(1.5), T.Select(r_a % 6 == 2 and eps % 6 == 5, T.float32(-1.5), T.Select(r_a % 6 == 2 and eps % 6 == 4, T.float32(-1), T.Select(r_a % 6 == 2 and eps % 6 == 3, T.float32(-1), T.Select(r_a % 6 == 2 and eps % 6 == 2, T.float32(0.5), T.Select(r_a % 6 == 2 and eps % 6 == 1, T.float32(-2.5), T.Select(r_a % 6 == 2 and eps % 6 == 0, T.float32(-2), T.Select(r_a % 6 == 1 and eps % 6 == 5, T.float32(1), T.Select(r_a % 6 == 1 and eps % 6 == 4, T.float32(0.5), T.Select(r_a % 6 == 1 and eps % 6 == 3, T.float32(-2), T.Select(r_a % 6 == 1 and eps % 6 == 2, T.float32(-1), T.Select(r_a % 6 == 1 and eps % 6 == 1, T.float32(1), T.Select(r_a % 6 == 1 and eps % 6 == 0, T.float32(-1.5), T.Select(r_a % 6 == 0 and eps % 6 == 5, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 4, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 3, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 2, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 1, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 0, T.float32(1), T.float32(0))))))))))))))))))))))))))))))))))))) * T.Select(r_b % 6 == 5 and nu % 6 == 5, T.float32(1), T.Select(r_b % 6 == 5 and nu % 6 == 4, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 3, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 2, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 1, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 0, T.float32(0), T.Select(r_b % 6 == 4 and nu % 6 == 5, T.float32(1.5), T.Select(r_b % 6 == 4 and nu % 6 == 4, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 3, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 2, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 1, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 0, T.float32(1), T.Select(r_b % 6 == 3 and nu % 6 == 5, T.float32(-2), T.Select(r_b % 6 == 3 and nu % 6 == 4, T.float32(-0.5), T.Select(r_b % 6 == 3 and nu % 6 == 3, T.float32(2), T.Select(r_b % 6 == 3 and nu % 6 == 2, T.float32(2.5), T.Select(r_b % 6 == 3 and nu % 6 == 1, T.float32(0.5), T.Select(r_b % 6 == 3 and nu % 6 == 0, T.float32(1.5), T.Select(r_b % 6 == 2 and nu % 6 == 5, T.float32(-1.5), T.Select(r_b % 6 == 2 and nu % 6 == 4, T.float32(-1), T.Select(r_b % 6 == 2 and nu % 6 == 3, T.float32(-1), T.Select(r_b % 6 == 2 and nu % 6 == 2, T.float32(0.5), T.Select(r_b % 6 == 2 and nu % 6 == 1, T.float32(-2.5), T.Select(r_b % 6 == 2 and nu % 6 == 0, T.float32(-2), T.Select(r_b % 6 == 1 and nu % 6 == 5, T.float32(1), T.Select(r_b % 6 == 1 and nu % 6 == 4, T.float32(0.5), T.Select(r_b % 6 == 1 and nu % 6 == 3, T.float32(-2), T.Select(r_b % 6 == 1 and nu % 6 == 2, T.float32(-1), T.Select(r_b % 6 == 1 and nu % 6 == 1, T.float32(1), T.Select(r_b % 6 == 1 and nu % 6 == 0, T.float32(-1.5), T.Select(r_b % 6 == 0 and nu % 6 == 5, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 4, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 3, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 2, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 1, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))))))))))))))
for i0_0, i1_0, i2_0, i3_0, i0_1, i1_1, i2_1, i3_1 in T.grid(3, 2, 3, 1, 1, 1, 1, 1):
for i4_0, i0_2, i1_2, i2_2, i3_2, i4_1, i0_3, i1_3, i2_3, i3_3 in T.grid(32, 1, 1, 1, 2, 4, 2, 3, 3, 64):
with T.block("bgemm"):
eps = T.axis.spatial(6, i0_0 * 2 + i0_1 * 2 + i0_2 * 2 + i0_3)
nu = T.axis.spatial(6, i1_0 * 3 + i1_1 * 3 + i1_2 * 3 + i1_3)
p = T.axis.spatial(9, i2_0 * 3 + i2_1 * 3 + i2_2 * 3 + i2_3)
co = T.axis.spatial(128, i3_0 * 128 + i3_1 * 128 + i3_2 * 64 + i3_3)
ci = T.axis.reduce(128, i4_0 * 4 + i4_1)
T.reads(data_pack[eps, nu, p, ci], W[eps, nu, co, ci])
T.writes(bgemm_global[eps, nu, p, co])
T.block_attr({"meta_schedule.tiling_structure":"SSRSRS", "meta_schedule.write_cache_level":[2]})
with T.init():
bgemm_global[eps, nu, p, co] = T.float32(0)
bgemm_global[eps, nu, p, co] = bgemm_global[eps, nu, p, co] + data_pack[eps, nu, p, ci] * W[eps, nu, co, ci]
for ax0, ax1, ax2, ax3 in T.grid(2, 3, 3, 128):
with T.block("bgemm_global"):
v0 = T.axis.spatial(6, i0_0 * 2 + ax0)
v1 = T.axis.spatial(6, i1_0 * 3 + ax1)
v2 = T.axis.spatial(9, i2_0 * 3 + ax2)
v3 = T.axis.spatial(128, ax3)
T.reads(bgemm_global[v0, v1, v2, v3])
T.writes(bgemm[v0, v1, v2, v3])
bgemm[v0, v1, v2, v3] = bgemm_global[v0, v1, v2, v3]
for i2_0, i3_0, i2_1, i3_1 in T.grid(3, 8, 3, 16):
for i0 in T.unroll(4):
for i1 in T.unroll(4):
for i4 in T.unroll(6):
for i5 in T.unroll(6):
with T.block("inverse"):
vh, vw = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(9, i2_0 * 3 + i2_1)
co = T.axis.spatial(128, i3_0 * 16 + i3_1)
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.reads(bgemm[r_a, r_b, p, co])
T.writes(inverse[vh, vw, p, co])
T.block_attr({"schedule_rule":"conv2d_nhwc_winograd_inverse"})
with T.init():
inverse[vh, vw, p, co] = T.float32(0)
inverse[vh, vw, p, co] = inverse[vh, vw, p, co] + bgemm[r_a, r_b, p, co] * T.Select(r_a % 6 == 5 and vh % 4 == 3, T.float32(1), T.Select(r_a % 6 == 5 and vh % 4 == 2, T.float32(0), T.Select(r_a % 6 == 5 and vh % 4 == 1, T.float32(0), T.Select(r_a % 6 == 5 and vh % 4 == 0, T.float32(0), T.Select(r_a % 6 == 4 and vh % 4 == 3, T.float32(-8), T.Select(r_a % 6 == 4 and vh % 4 == 2, T.float32(4), T.Select(r_a % 6 == 4 and vh % 4 == 1, T.float32(-2), T.Select(r_a % 6 == 4 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 3 and vh % 4 == 3, T.float32(0.125), T.Select(r_a % 6 == 3 and vh % 4 == 2, T.float32(0.25), T.Select(r_a % 6 == 3 and vh % 4 == 1, T.float32(0.5), T.Select(r_a % 6 == 3 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 3, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 2, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 1, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 1 and vh % 4 == 3, T.float32(-1), T.Select(r_a % 6 == 1 and vh % 4 == 2, T.float32(1), T.Select(r_a % 6 == 1 and vh % 4 == 1, T.float32(-1), T.Select(r_a % 6 == 1 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 0 and vh % 4 == 3, T.float32(0), T.Select(r_a % 6 == 0 and vh % 4 == 2, T.float32(0), T.Select(r_a % 6 == 0 and vh % 4 == 1, T.float32(0), T.Select(r_a % 6 == 0 and vh % 4 == 0, T.float32(1), T.float32(0))))))))))))))))))))))))) * T.Select(r_b % 6 == 5 and vw % 4 == 3, T.float32(1), T.Select(r_b % 6 == 5 and vw % 4 == 2, T.float32(0), T.Select(r_b % 6 == 5 and vw % 4 == 1, T.float32(0), T.Select(r_b % 6 == 5 and vw % 4 == 0, T.float32(0), T.Select(r_b % 6 == 4 and vw % 4 == 3, T.float32(-8), T.Select(r_b % 6 == 4 and vw % 4 == 2, T.float32(4), T.Select(r_b % 6 == 4 and vw % 4 == 1, T.float32(-2), T.Select(r_b % 6 == 4 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 3 and vw % 4 == 3, T.float32(0.125), T.Select(r_b % 6 == 3 and vw % 4 == 2, T.float32(0.25), T.Select(r_b % 6 == 3 and vw % 4 == 1, T.float32(0.5), T.Select(r_b % 6 == 3 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 3, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 2, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 1, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 1 and vw % 4 == 3, T.float32(-1), T.Select(r_b % 6 == 1 and vw % 4 == 2, T.float32(1), T.Select(r_b % 6 == 1 and vw % 4 == 1, T.float32(-1), T.Select(r_b % 6 == 1 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 0 and vw % 4 == 3, T.float32(0), T.Select(r_b % 6 == 0 and vw % 4 == 2, T.float32(0), T.Select(r_b % 6 == 0 and vw % 4 == 1, T.float32(0), T.Select(r_b % 6 == 0 and vw % 4 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))
for i0, i1, i2, i3 in T.grid(1, 12, 12, 128):
with T.block("conv2d_winograd"):
n, h, w, co = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inverse[h % 4, w % 4, n * 9 + h // 4 * 3 + w // 4, co])
T.writes(conv2d_winograd[n, h, w, co])
conv2d_winograd[n, h, w, co] = inverse[h % 4, w % 4, n * 9 + h // 4 * 3 + w // 4, co]
# fmt: on
decision_0 = [
("SamplePerfectTile", [3, 3]),
("SamplePerfectTile", [8, 16]),
("SamplePerfectTile", [9, 1]),
("SamplePerfectTile", [2, 64]),
("SampleComputeLocation", 1),
("SampleComputeLocation", 0),
("SamplePerfectTile", [3, 1, 1, 2]),
("SamplePerfectTile", [2, 1, 1, 3]),
("SamplePerfectTile", [3, 1, 1, 3]),
("SamplePerfectTile", [1, 1, 2, 64]),
("SamplePerfectTile", [32, 4]),
("SampleCategorical", 2),
]
with _target():
mod = create_te_workload("C2D_WIN_NHWC", 0)
actual = _design_space(mod)
check_sketches(
mod,
sketches=actual,
expected_mods=[cpu_nhwc_0],
expected_decisions=[decision_0],
)
if __name__ == "__main__":
test_cpu_nhwc()
| 15,686 | 92.375 | 4,141 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_common_subexpr_elim.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hashlib
import tvm
from tvm import auto_scheduler, te, topi
from tvm.ir.base import save_json
from tvm.ir.module import IRModule
from tvm.script import tir as T
# -----------------------------------------------------
# Basic test for the expected Behavior of the CSE pass
# -----------------------------------------------------
# A test program which gives the opportunity for the CSE pass to introduce two new variables,
# at two different levels
def test_cse():
z1 = te.var("z1")
z2 = te.var("z2")
z3 = te.var("z3")
i1 = te.var("i1")
i2 = te.var("i2")
x = te.var("x")
y = te.var("y")
a = te.var("a")
b = te.var("b")
dtype = "int32"
buffer = tvm.tir.decl_buffer((50,), dtype)
# Test prog :
# let z1=1 in let z2=2 in
# Mem[i1] = z1+z2;
# let x = 1 in let y = 1 in
# let a = (x+y) + (z1+z2) in
# let b = (x+y) + z3 in
# Mem[i2] = a+b;
body = tvm.tir.LetStmt(
z1,
1,
tvm.tir.LetStmt(
z2,
2,
tvm.tir.SeqStmt(
[
tvm.tir.BufferStore(buffer, z1 + z2, [i1]),
tvm.tir.LetStmt(
x,
1,
tvm.tir.LetStmt(
y,
1,
tvm.tir.LetStmt(
a,
(x + y) + (z1 + z2),
tvm.tir.LetStmt(
b, (x + y) + z3, tvm.tir.BufferStore(buffer, a + b, [i2])
),
),
),
),
]
),
),
)
# This test program gives the opportunity to introduce two new variables, at two different
# levels and to perform replacements in the value of "a" and "b", using these new variables.
# We will check all of that underneath and more, making also sure that nothing else has changed
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, z3], body))
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert body.var.name == "z1"
assert body.value == 1
body = body.body
assert body.var.name == "z2"
assert body.value == 2
# This is the let-in for the first variable generated cse_var_1
assert isinstance(body.body, tvm.tir.LetStmt)
body = body.body
# And this is the name and value of this variable
cse_var_1 = body.var # Keep the variable accessible for later checking the replacements
assert body.var.name == "cse_var_1"
assert tvm.ir.structural_equal(body.value, z1 + z2)
assert isinstance(body.body, tvm.tir.SeqStmt)
body = body.body
assert isinstance(body[0], tvm.tir.BufferStore)
assert isinstance(body[1], tvm.tir.LetStmt)
body = body[1]
assert body.var.name == "x"
assert body.value == 1
body = body.body
assert body.var.name == "y"
assert body.value == 1
# This is the let-in for the second variable generated cse_var_2
assert isinstance(body.body, tvm.tir.LetStmt)
body = body.body
# And this is the name and value of this variable
cse_var_2 = body.var # Keep the variable accessible for later checking the replacements
assert body.var.name == "cse_var_2"
assert tvm.ir.structural_equal(body.value, x + y)
body = body.body
body.var.name == "a"
# Check that the replacement has been done correctly!
assert tvm.ir.structural_equal(body.value, cse_var_2 + cse_var_1)
body = body.body
body.var.name == "b"
# Check that the replacement has been done correctly!
assert tvm.ir.structural_equal(body.value, cse_var_2 + z3)
assert isinstance(body.body, tvm.tir.BufferStore)
# -----------------------------------------------------
# Tests related to If nodes
# -----------------------------------------------------
# First specific test for if nodes : Some duplicated computations appear only in one branch (here
# the Then branch), not in both branches.
# In this case, the CSE pass should introduce the redundant computation at the top of the Then
# branch, not before the whole If (otherwise that would lead to some computations being computed
# for nothing when it is the Else branch that is executed).
def test_cse_ifNode_1():
b = te.var("b")
i1 = te.var("i1")
i2 = te.var("i2")
i3 = te.var("i3")
y = te.var("y")
z = te.var("z")
dtype = "int32"
buffer = tvm.tir.decl_buffer((50,), dtype)
# Test prog :
# let b=1 in
# if(b) {
# Mem[i1] = y+z
# Mem[i2] = y+z
# }
# else {
# Mem[i3] = y
# }
body = tvm.tir.LetStmt(
b,
1,
tvm.tir.IfThenElse(
b,
tvm.tir.SeqStmt(
[tvm.tir.BufferStore(buffer, y + z, [i1]), tvm.tir.BufferStore(buffer, y + z, [i2])]
),
tvm.tir.BufferStore(buffer, y, [i3]),
),
)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, i3, y, z], body))
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert body.var.name == "b"
assert body.value == 1
assert isinstance(body.body, tvm.tir.IfThenElse)
body = body.body
assert isinstance(body.then_case, tvm.tir.LetStmt)
body = body.then_case
# The let-in introduced by the CSE should appear now, inside the Then branch of the If node
assert body.var.name == "cse_var_1"
# and it should contain the expression (y+z) that was redundant
assert tvm.ir.structural_equal(body.value, y + z)
# Second test for if nodes : Some duplicated computations appear in both the Then and Else branch.
# In this case, the CSE pass should introduce the redundant computation before the whole If node,
# because regardless of the execution path, it is going to be computed.
def test_cse_ifNode_2():
b = te.var("b")
i1 = te.var("i1")
i2 = te.var("i2")
i3 = te.var("i3")
y = te.var("y")
z = te.var("z")
dtype = "int32"
buffer = tvm.tir.decl_buffer((50,), dtype)
# Test prog :
# let b=1 in
# if(b) {
# Mem[i1] = y+z
# Mem[i2] = y
# }
# else {
# Mem[i3] = y+z
# }
body = tvm.tir.LetStmt(
b,
1,
tvm.tir.IfThenElse(
b,
tvm.tir.SeqStmt(
[
tvm.tir.BufferStore(buffer, y + z, [i1]), # (y+z) is present in Then branch
tvm.tir.BufferStore(buffer, y, [i2]),
]
),
tvm.tir.BufferStore(buffer, y + z, [i3]), # and also present in the Else branch
),
)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, i3, y, z], body))
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert isinstance(body, tvm.tir.LetStmt)
# The let-in introduced by the CSE should appear now, at the toplevel (i.e. before the If)
assert body.var.name == "cse_var_1"
# and it should contain the expression (y+z) that was redundant
assert tvm.ir.structural_equal(body.value, y + z)
# -------------------------------------------------------------------------------------------------
# Test commoning in cascade : after having introduced a big exp ((x+y)+z) into a new variable,
# it will become possible to do another commoning for (x+y) which appears both in the new variable
# and in the rest of the program.
# -------------------------------------------------------------------------------------------------
def test_cse_cascade():
i1 = te.var("i1")
i2 = te.var("i2")
i3 = te.var("i3")
x = te.var("x")
y = te.var("y")
z = te.var("z")
dtype = "int32"
buffer = tvm.tir.decl_buffer((50,), dtype)
# Test prog :
# Mem[i1] = (x+y)+z;
# Mem[i2] = (x+y)+z;
# Mem[i3] = x+y
body = tvm.tir.SeqStmt(
[
tvm.tir.BufferStore(buffer, (x + y) + z, [i1]),
tvm.tir.BufferStore(buffer, (x + y) + z, [i2]),
tvm.tir.BufferStore(buffer, (x + y), [i3]),
]
)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, i3, x, y, z], body))
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert isinstance(body, tvm.tir.LetStmt)
# The second let-in (by order introduced) introduced by the CSE should appear first
cse_var_2 = body.var # Keep the variable accessible for later checking the replacements
assert body.var.name == "cse_var_2"
# and it should contain the expression (x+y)
assert tvm.ir.structural_equal(body.value, (x + y))
body = body.body
assert isinstance(body, tvm.tir.LetStmt)
# The first let-in (by order introduced) introduced by the CSE should appear now, after the 2nd
cse_var_1 = body.var # Keep the variable accessible for later checking the replacements
assert body.var.name == "cse_var_1"
# and it should contain the expression cse_var_2+z
assert tvm.ir.structural_equal(body.value, cse_var_2 + z)
body = body.body
assert isinstance(body, tvm.tir.SeqStmt)
assert isinstance(body[0], tvm.tir.BufferStore)
assert isinstance(body[1], tvm.tir.BufferStore)
assert isinstance(body[2], tvm.tir.BufferStore)
store1 = body[0]
store2 = body[1]
store3 = body[2]
assert tvm.ir.structural_equal(store1.value, cse_var_1)
assert tvm.ir.structural_equal(store2.value, cse_var_1)
assert tvm.ir.structural_equal(store3.value, cse_var_2)
# -----------------------------------------------------------------------------------------
# A test which ensures that we don't perform normalizations outside of introduced variables
# -----------------------------------------------------------------------------------------
def test_no_normalization_without_commoning():
x = te.var("x")
y = te.var("y")
z = te.var("z")
a = te.var("a")
# Test prog :
# let a = x + (y + z) in a
body = tvm.tir.LetStmt(a, x + (y + z), tvm.tir.Evaluate(a))
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([x, y, z], body))
body = tvm.tir.transform.CommonSubexprElimTIR(identify_equiv_terms=True)(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert body.var.name == "a"
assert tvm.ir.structural_equal(body.value, x + (y + z))
# -------------------------------------------------
# Part for testing the commoning with equivalences
# -------------------------------------------------
@T.prim_func
def func_distributivity(i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32) -> None:
B = T.Buffer((50,), "int32")
B[i1] = x * (y + z)
B[i2] = x * y + x * z
@T.prim_func
def func_distributivity_expected(
i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32
) -> None:
B = T.Buffer((50,), "int32")
with T.LetStmt(x * y + x * z) as cse_var_1:
B[i1] = cse_var_1
B[i2] = cse_var_1
@T.prim_func
def func_associativity(i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32) -> None:
B = T.Buffer((50,), "int32")
B[i1] = (x + y) + z
B[i2] = x + (y + z)
@T.prim_func
def func_associativity_expected(
i1: T.int32, i2: T.int32, x: T.int32, y: T.int32, z: T.int32
) -> None:
B = T.Buffer((50,), "int32")
with T.LetStmt((x + y) + z) as cse_var_1:
B[i1] = cse_var_1
B[i2] = cse_var_1
def _check(original, transformed):
func = original
mod = tvm.IRModule.from_expr(func)
body = tvm.tir.transform.CommonSubexprElimTIR(identify_equiv_terms=True)(mod)
tvm.transform.PrintIR()(body)
tvm.ir.assert_structural_equal(body["main"], transformed)
def test_semantic_equiv_distributivity():
_check(func_distributivity, func_distributivity_expected)
def test_semantic_equiv_associativity():
_check(func_associativity, func_associativity_expected)
# -----------------------------------------------------
# Tests that verify the determinism of the pass
# -----------------------------------------------------
def test_deterministic_cse():
import random
"""Test deterministic allocation of CSE vars
We expect something like
result = (x + 1) + (x + 2) + (x + 3) + (x + 1) + (x + 2) + (x + 3)
-->
cse_var_3 = (x + 1)
cse_var_2 = (x + 2)
cse_var_1 = (x + 3)
result = cse_var_3 + cse_var_2 + cse_var_1 + cse_var_3 + cse_var_2 + cse_var_1
"""
NUM_TERMS = 10
REPEATS = 10
x = te.var("x")
result = te.var("result")
offsets = sorted([i + 1 for i in range(NUM_TERMS)])
inc1 = [(x + offsets[i]) for i in range(NUM_TERMS)]
inc2 = [(x + offsets[i]) for i in range(NUM_TERMS)]
expression = x
for add in inc1 + inc2:
expression = expression + add
let_stmt = tvm.tir.LetStmt(result, expression, tvm.tir.Evaluate(result))
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([x], let_stmt))
initial_hash = None
for _ in range(REPEATS):
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
body = body["main"]
# Hash and ensure serialize json is the same every time
json_val = save_json(body)
json_hash = hashlib.sha256(json_val.encode()).hexdigest()
if initial_hash is None:
initial_hash = json_hash
assert json_hash == initial_hash
# Needed for the second test on determinism
LOG_LINE = '{"i": [["[\\"conv2d_layer\\", 1, 7, 7, 512, 512, 3, 3, [1, 1], [1, 1]]", \
"llvm -keys=cpu -mcpu=broadwell -num-cores=2", \
[8, 64, 64, 0, 0, 0, 0, 0], "", 1, []], [[], [["CI", 5], \
["SP", 3, 0, 1, [1, 1, 1], 1], ["SP", 3, 4, 512, [1, 32, 16], 1], \
["SP", 3, 8, 7, [7, 1, 1], 1], ["SP", 3, 12, 7, [1, 1, 1], 1], \
["SP", 3, 16, 512, [1], 1], ["SP", 3, 18, 3, [1], 1], ["SP", 3, 20, 3, [3], 1], \
["RE", 3, [0, 4, 8, 12, 1, 5, 9, 13, 16, 18, 20, 2, 6, 10, 14, 17, 19, 21, 3, 7, \
11, 15]], ["FSP", 6, 0, 1, 2], ["FSP", 6, 3, 2, 2], ["FSP", 6, 6, 3, 2], \
["FSP", 6, 9, 4, 2], ["RE", 6, [0, 3, 6, 9, 1, 4, 7, 10, 2, 5, 8, 11]], \
["CA", 3, 6, 7], ["CA", 1, 6, 5], ["FU", 6, [0, 1, 2, 3, 4, 5]], ["AN", 6, 0, 3], \
["PR", 3, 0, "auto_unroll_max_step$512"], ["AN", 1, 3, 2], ["AN", 3, 21, 2], \
["AN", 6, 6, 2]]]], "r": [[0.0331129], 0, 0.900362, 1647464342], "v": "v0.6"}\n'
# The workload associated with the log
@auto_scheduler.register_workload
def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
data = te.placeholder((N, CI, H, W), name="data")
kernel = te.placeholder((CO, CI, KH, KW), name="kernel")
bias = te.placeholder((1, CO, 1, 1), name="bias")
conv = topi.nn.conv2d_nchw(data, kernel, stride, padding, dilation=1, out_dtype="float32")
out = topi.nn.relu(conv + bias)
return [data, kernel, bias, out]
def test_deterministic_cse_2():
inp, inr = auto_scheduler.measure_record.load_record_from_string(LOG_LINE)
inp = auto_scheduler.measure.recover_measure_input(inp, rebuild_state=True)
initial_hash = None
for _ in range(10):
sch, args = inp.task.compute_dag.apply_steps_from_state(inp.state)
ir_module = tvm.lower(sch, args)
primfunc = ir_module["main"]
json_str = save_json(primfunc)
new_hash = hashlib.sha256(json_str.encode("utf-8")).hexdigest()
# Make sure that all the hashes are going to be the same
if initial_hash is None:
initial_hash = new_hash
assert new_hash == initial_hash
if __name__ == "__main__":
# Basic test:
test_cse()
# Tests related to If nodes:
test_cse_ifNode_1()
test_cse_ifNode_2()
# Test performing a commoning on a commoning:
test_cse_cascade()
# Test that verifies that the input program itself is not being normalized by the pass:
test_no_normalization_without_commoning()
# Tests that turn on the equivalence of terms and verify the commoning with equivalences:
test_semantic_equiv_distributivity()
test_semantic_equiv_associativity()
# Tests that verify the determinism of the pass:
test_deterministic_cse()
test_deterministic_cse_2()
| 17,574 | 33.596457 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_arith_intset.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm import tir
from tvm.arith.analyzer import Analyzer
class IntSetChecker:
def __init__(self):
self.analyzer = tvm.arith.Analyzer()
def verify(self, data, dmap, expected):
res = self.analyzer.int_set(data, dmap)
def err_msg():
return "\ndata={}\ndmap={}\nres={}\nexpected={}".format(data, dmap, res, expected)
assert self.analyzer.can_prove_equal(res.min_value, expected[0]), err_msg()
assert self.analyzer.can_prove_equal(res.max_value, expected[1]), err_msg()
def test_basic():
s = tvm.arith.IntervalSet(2, 3)
assert s.min_value.value == 2
assert s.max_value.value == 3
s = tvm.arith.IntSet.single_point(2)
assert s.min_value.value == 2
assert s.max_value.value == 2
def test_vector():
base = 10
stride = 3
lanes = 2
s = tvm.arith.IntSet.vector(tvm.tir.Ramp(base, stride, lanes))
assert s.min_value.value == base
assert s.max_value.value == base + stride * (lanes - 1)
def test_add_sub():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
ck.verify(x + y, {x: tvm.arith.IntervalSet(0, 10)}, (y, 10 + y))
ck.verify(x + y, {x: tvm.arith.IntervalSet(0, 10), y: tvm.arith.IntervalSet(1, 11)}, (1, 21))
ck.verify(x - y, {x: tvm.arith.IntervalSet(0, 10), y: tvm.arith.IntervalSet(1, 11)}, (-11, 9))
def test_mul_div():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
tdiv = tvm.tir.truncdiv
ck.analyzer.update(y, tvm.arith.ConstIntBound(1, 100), override=True)
ck.verify(x * y, {x: tvm.arith.IntervalSet(0, 10)}, (0, 10 * y))
ck.verify(x * 2, {x: tvm.arith.IntervalSet(1, 10)}, (2, 20))
ck.verify(x * -2, {x: tvm.arith.IntervalSet(1, 10)}, (-20, -2))
ck.verify(tdiv(x, y), {x: tvm.arith.IntervalSet(0, 10)}, (0, tdiv(10, y)))
ck.verify(tdiv(x, 2), {x: tvm.arith.IntervalSet(1, 10)}, (0, 5))
fld = tvm.te.floordiv
ck.verify(fld(x, y), {x: tvm.arith.IntervalSet(0, 10)}, (0, fld(10, y)))
ck.verify(fld(x, 2), {x: tvm.arith.IntervalSet(-1, 10)}, (-1, 5))
def test_mod():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
tmod = tvm.tir.truncmod
ck.analyzer.update(y, tvm.arith.ConstIntBound(1, 100), override=True)
ck.verify(tmod(x, y), {x: tvm.arith.IntervalSet(0, 10)}, (0, y - 1))
ck.verify(tmod(x, 10), {x: tvm.arith.IntervalSet(1, 10)}, (0, 9))
flm = tvm.te.floormod
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(-10, 10)}, (0, 9))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(3, 5)}, (3, 5))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(13, 15)}, (3, 5))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(3, 15)}, (0, 9))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(3, 11)}, (0, 9))
ck.verify(flm(x, 10), {x: tvm.arith.IntervalSet(1, 21)}, (0, 9))
fld = tvm.te.floordiv
z = te.var("z")
ck.analyzer.bind(x, tvm.ir.Range.from_min_extent(0, 3))
ck.verify(
flm(y, 8),
{y: tvm.arith.IntervalSet(z * 8 + x * 4, z * 8 + x * 4 + 3)},
(
z * 8 + x * 4 - 8 * fld(z * 8 + x * 4, 8),
z * 8 + x * 4 + 3 - 8 * fld(z * 8 + x * 4, 8),
),
)
ck1 = IntSetChecker()
ck1.analyzer.bind(x, tvm.ir.Range.from_min_extent(0, 2))
ck1.verify(
flm(y, 8), {y: tvm.arith.IntervalSet(z * 8 + x * 4, z * 8 + x * 4 + 3)}, (x * 4, x * 4 + 3)
)
def test_max_min():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
ck.verify(tvm.te.max(x, x + 1), {x: tvm.arith.IntervalSet(0, 10)}, (1, 11))
ck.verify(tvm.te.min(x - 1, x + 1), {x: tvm.arith.IntervalSet(0, 10)}, (-1, 9))
ck.verify(tvm.te.min(x, y), {}, (tvm.te.min(x, y), tvm.te.min(x, y)))
ck.verify(tvm.te.max(x, y), {}, (tvm.te.max(x, y), tvm.te.max(x, y)))
def test_select():
ck = IntSetChecker()
x, y = te.var("x"), te.var("y")
ck.verify(tvm.tir.Select(x > 0, x - 1, x + 1), {x: tvm.arith.IntervalSet(0, 10)}, (-1, 11))
def check_region_bound(expect_region, var_dom, mode, predicate=None):
"""Helper to check region bound estimation.
Parameters
----------
expect_region: dict
The keys are of form (begin, end) or PrimExpr as a single point. The values are
expected estimated region or region dict on different bindings.
var_dom: dict
Map var to iteration domain range.
mode: str
Specify "lowerbound", "upperbound" or else use strict bound estimation.
predicate: PrimExpr
Extra predicate, defaults to True.
"""
if predicate is None:
predicate = tvm.tir.IntImm("bool", 1)
region = []
expect = []
for k, v in expect_region.items():
if not isinstance(k, (tuple, list)):
k = (k, k + 1)
region.append(tvm.ir.Range.from_min_extent(k[0], Analyzer().simplify(k[1] - k[0])))
expect.append(v)
if mode == "lowerbound":
result = tvm.arith.estimate_region_lower_bound(
region=region, var_dom=var_dom, predicate=predicate
)
elif mode == "upperbound":
result = tvm.arith.estimate_region_upper_bound(
region=region, var_dom=var_dom, predicate=predicate
)
else:
result = tvm.arith.estimate_region_strict_bound(
region=region, var_dom=var_dom, predicate=predicate
)
if result is None:
assert all([_ is None for _ in expect])
return
assert len(result) == len(expect)
for intset, expect_desc in zip(result, expect):
if isinstance(expect_desc, dict):
# check range on different free var bindings
for binding in expect_desc:
analyzer = Analyzer()
for k, v in binding:
analyzer.bind(k, v)
expect_begin, expect_end = expect_desc[binding]
result_begin = analyzer.simplify(intset.min_value, 3)
result_end = analyzer.simplify(intset.max_value + 1, 3)
assert analyzer.can_prove_equal(
result_begin - expect_begin, 0
), f"{result_begin} vs {expect_begin}"
assert analyzer.can_prove_equal(
result_end - expect_end, 0
), f"{result_end} vs {expect_end}"
else:
# check range
expect_begin, expect_end = expect_desc
analyzer = Analyzer()
assert analyzer.can_prove_equal(
intset.min_value - expect_begin, 0
), f"{intset.min_value} vs {expect_begin}"
assert analyzer.can_prove_equal(
intset.max_value - expect_end + 1, 0
), f"{intset.max_value} vs {expect_end - 1}"
def test_region_bound_not_independent():
# (i, i+2) and (i+2, i+4) are dependent, this the lowerbound is not available
i = tvm.tir.Var("i", "int32")
var_dom = {
i: tvm.ir.Range(begin=0, end=64),
}
check_region_bound({(i, i + 2): None, (i + 2, i + 4): None}, var_dom, mode="lowerbound")
check_region_bound({(i, i + 2): (0, 65), (i + 2, i + 4): (2, 67)}, var_dom, mode="upperbound")
# when only a subset of access indices are affine
i, j, k = tvm.tir.Var("i", "int32"), tvm.tir.Var("j", "int32"), tvm.tir.Var("k", "int32")
var_dom = {
i: tvm.ir.Range(begin=0, end=16),
j: tvm.ir.Range(begin=0, end=16),
k: tvm.ir.Range(begin=0, end=16),
}
check_region_bound(
{i // 4: None, j * 4 + i % 4: None, tir.truncdiv(k, 2): None},
var_dom,
predicate=j * 4 + i % 4 > 3,
mode="lowerbound",
)
check_region_bound(
{i // 4: (0, 4), j * 4 + i % 4: (4, 64), tir.truncdiv(k, 2): (0, 8)},
var_dom,
predicate=j * 4 + i % 4 > 3,
mode="upperbound",
)
def test_region_bound_stride_too_wide():
i = tvm.tir.Var("i", "int32")
var_dom = {i: tvm.ir.Range(begin=0, end=64)}
check_region_bound({(i * 4, i * 4 + 2): None}, var_dom, mode="lowerbound")
check_region_bound({(i * 4, i * 4 + 2): (0, 254)}, var_dom, mode="upperbound")
def test_region_bound_small_stride():
i = tvm.tir.Var("i", "int32")
var_dom = {
i: tvm.ir.Range(begin=0, end=64),
}
check_region_bound({(i * 4, i * 4 + 8): (0, 260)}, var_dom, mode="lowerbound")
def test_region_lower_bound_split_predicate():
x_o = tvm.tir.Var("xo", "int32")
x_i = tvm.tir.Var("xi", "int32")
x = x_o * 4 + x_i
var_dom = {
x_o: tvm.ir.Range(begin=0, end=16),
x_i: tvm.ir.Range(begin=0, end=4),
}
check_region_bound({(x * 4, x * 4 + 8): (0, 256)}, var_dom, predicate=x < 63, mode="lowerbound")
check_region_bound(
{(x * 4, x * 4 + 8): (0, 256), (x * 3, x * 3 + 5): (0, 191)},
var_dom,
predicate=x < 63,
mode="upperbound",
)
def test_region_lower_bound_multiple_variables():
div = tvm.tir.floordiv
mod = tvm.tir.floormod
x = tvm.tir.Var("x", "int32")
wid = tvm.tir.Var("wid", "int32")
i = div(x, 16)
j = div(mod(x, 16), 4) * 8 + mod(x, 4) + div(wid, 32) * 4
k = wid % 32
var_dom = {
x: tvm.ir.Range(begin=0, end=32),
wid: tvm.ir.Range(begin=0, end=64),
}
check_region_bound({i: (0, 2), j: (0, 32), k: (0, 32)}, var_dom, mode="lowerbound")
def test_region_lower_bound_negative_scale():
i = tvm.tir.Var("i", "int32")
j = tvm.tir.Var("j", "int32")
var_dom = {
i: tvm.ir.Range(begin=0, end=4),
j: tvm.ir.Range(begin=0, end=4),
}
check_region_bound(
{(1 - i, 5 - i): (-2, 5), (20 - j * 4, 36 - j * 4): (8, 36)}, var_dom, mode="lowerbound"
)
def test_region_lower_bound_for_non_perfect_tile():
h1 = tvm.tir.Var("h1", "int32")
h2 = tvm.tir.Var("h2", "int32")
h3 = tvm.tir.Var("h3", "int32")
# non-uniform tiling, single inner variable
var_dom = {
h2: tvm.ir.Range(begin=0, end=10),
}
check_region_bound(
{
h3 * 8
+ h2: {
(): (
tvm.tir.max(h3 * 8, 1),
tvm.tir.min(0, h3 * 8 - 214) + 224,
),
((h3, 0),): (1, 10), # h3 == 0: region is [1, 10)
((h3, 10),): (h3 * 8, h3 * 8 + 10), # 0 < h3 <= 26: region is [h3 * 8, h3 * 8 + 10)
((h3, 27),): (h3 * 8, 224), # h3 > 26: region is [h3 * 8, 224)
}
},
var_dom,
predicate=tvm.tir.all(1 <= h3 * 8 + h2, h3 * 8 + h2 < 224),
mode="lowerbound",
)
# non-uniform tiling, two inner variables
var_dom = {
h1: tvm.ir.Range(begin=0, end=5),
h2: tvm.ir.Range(begin=0, end=2),
}
check_region_bound(
{
h3 * 8
+ h2 * 5
+ h1: {
(): (
tvm.tir.max(h3 * 8, 1),
tvm.tir.min(0, h3 * 8 - 214) + 224,
),
((h3, 0),): (1, 10),
((h3, 10),): (h3 * 8, h3 * 8 + 10),
((h3, 27),): (h3 * 8, 224),
}
},
var_dom,
predicate=tvm.tir.all(1 <= h3 * 8 + h2 * 5 + h1, h3 * 8 + h2 * 5 + h1 < 224),
mode="lowerbound",
)
# lowerbound should fail on incompatible predicates
check_region_bound(
{h3 * 8 + h2 * 5 + h1: None},
var_dom,
predicate=tvm.tir.all(1 <= h3 * 8 + h2 * 5 + h1, h3 * 8 + h1 * 2 + h2 < 224),
mode="lowerbound",
)
check_region_bound(
{h3 * 8 + h2 * 5 + h1: (h3 * 8, h3 * 8 + 10)},
var_dom,
predicate=tvm.tir.all(1 <= h3 * 8 + h2 * 5 + h1, h3 * 8 + h1 * 2 + h2 < 224),
mode="upperbound",
)
def test_region_lower_bound_unfusable():
var_dom = {
tvm.tir.Var("i", "int32"): tvm.ir.Range(8),
tvm.tir.Var("j", "int32"): tvm.ir.Range(4),
}
i, j = var_dom
check_region_bound({(i + j) // 2: (0, 6)}, var_dom, mode="lowerbound")
def test_union_lower_bound():
neg_inf = tvm.arith.int_set.neg_inf()
pos_inf = tvm.arith.int_set.pos_inf()
set_0 = tvm.arith.IntervalSet(min_value=neg_inf, max_value=0)
set_1 = tvm.arith.IntervalSet(min_value=1, max_value=pos_inf)
result = tvm.arith.int_set.union_lower_bound([set_0, set_1])
assert result.min_value.same_as(neg_inf)
assert result.max_value.same_as(pos_inf)
set_2 = tvm.arith.IntervalSet(min_value=pos_inf, max_value=neg_inf)
result = tvm.arith.int_set.union_lower_bound([set_0, set_1, set_2])
assert result.min_value.same_as(neg_inf)
assert result.max_value.same_as(pos_inf)
if __name__ == "__main__":
tvm.testing.main()
| 13,442 | 34.007813 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_convert_ssa.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T, ir as I
class BaseBeforeAfter(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.ConvertSSA()
class TestReuseInSequentialLetStmt(BaseBeforeAfter):
"""De-dup sequential variable bindings"""
def before(self):
# Manually construct the PrimFunc body, as SSA violations are
# not valid TIR, and may not be expressible in future versions
# of TVMSCript.
var = tir.Var("var", "int32")
sequential_bindings = tir.SeqStmt(
[
tir.LetStmt(var, 16, tir.Evaluate(var)),
tir.LetStmt(var, 32, tir.Evaluate(var)),
]
)
func = tir.PrimFunc([], sequential_bindings)
return func
def expected(self):
@T.prim_func
def func():
with T.LetStmt(T.int32(16)) as var1:
T.evaluate(var1)
with T.LetStmt(T.int32(32)) as var2:
T.evaluate(var2)
return func
class TestReuseInNestedLetStmt(BaseBeforeAfter):
"""De-dup nested bindings
Use of a variable with nested bindings is de-duplicated to refer
to the inner-most binding that contains the use site.
"""
def before(self):
# Manually construct the PrimFunc body, as SSA violations are
# not valid TIR, and may not be expressible in future versions
# of TVMSCript.
var = tir.Var("var", "int32")
inner_let = tir.LetStmt(var, 16, tir.Evaluate(var))
outer_let = tir.LetStmt(
var,
32,
tir.SeqStmt(
[
tir.Evaluate(var),
inner_let,
tir.Evaluate(var),
]
),
)
func = tir.PrimFunc([], outer_let)
return func
def expected(self):
@T.prim_func
def func():
with T.LetStmt(T.int32(32)) as outer:
T.evaluate(outer)
with T.LetStmt(T.int32(16)) as inner:
T.evaluate(inner)
T.evaluate(outer)
return func
class TestReusedVarAcrossModule(BaseBeforeAfter):
"""De-duplicate Var bindings across entire module"""
def before(self):
@T.prim_func
def func():
with T.LetStmt(10) as var:
T.evaluate(var)
return tvm.IRModule({"func_a": func, "func_b": func})
def expected(self):
@I.ir_module
class mod:
@T.prim_func
def func_a():
var = T.int32(10)
T.evaluate(var)
@T.prim_func
def func_b():
var = T.int32(10)
T.evaluate(var)
return mod
class TestReusedParameter(BaseBeforeAfter):
"""De-duplicate Var usage in parameters
In this test, the same `tir.Var` instance is used for the
parameter `n` in both functions.
"""
def before(self):
@T.prim_func
def func(n: T.int32):
T.evaluate(n)
return tvm.IRModule({"func_a": func, "func_b": func})
def expected(self):
@I.ir_module
class mod:
@T.prim_func
def func_a(n: T.int32):
T.evaluate(n)
@T.prim_func
def func_b(n: T.int32):
T.evaluate(n)
return mod
class TestReusedBufferObj(BaseBeforeAfter):
"""De-duplicate buffer usage across entire module"""
def before(self):
@T.prim_func
def func(a: T.handle("float32")):
A = T.Buffer(shape=1, dtype="float32", data=a)
T.evaluate(A[0])
return tvm.IRModule({"func_a": func, "func_b": func})
def expected(self):
@I.ir_module
class mod:
@T.prim_func
def func_a(a: T.handle("float32")):
A = T.Buffer(shape=1, dtype="float32", data=a)
T.evaluate(A[0])
@T.prim_func
def func_b(a: T.handle("float32")):
A = T.Buffer(shape=1, dtype="float32", data=a)
T.evaluate(A[0])
return mod
class TestReusedBufferParameter(BaseBeforeAfter):
"""De-duplicate buffer_map across entire module"""
def before(self):
@T.prim_func
def func(A: T.Buffer(1, "float32")):
T.evaluate(A[0])
return tvm.IRModule({"func_a": func, "func_b": func})
def expected(self):
@I.ir_module
class mod:
@T.prim_func
def func_a(A: T.Buffer(1, "float32")):
T.evaluate(A[0])
@T.prim_func
def func_b(A: T.Buffer(1, "float32")):
T.evaluate(A[0])
return mod
def test_no_change_if_already_ssa():
"""A module that is already SSA should be unchanged"""
@I.ir_module
class before:
@T.prim_func
def func(A: T.Buffer(1, "float32")):
T.evaluate(A[0])
after = tvm.tir.transform.ConvertSSA()(before)
tvm.ir.assert_structural_equal(before, after)
assert before.same_as(after)
class TestDedupAutoBroadcastBuffer(BaseBeforeAfter):
"""De-dup auto-broadcast buffers
Auto-broadcast buffers can define additional variables during the
`Buffer::Buffer` constructor for the strides. This is intended to
be used for match buffers, where these variables are defined based
on the argument being passed in.
These additional variables can cause errors when copying a buffer
with the `Buffer::Buffer` constructor. If a buffer has non-empty
shape, empty strides, and kAutoBroadcast type, then the resulting
buffer will have additional strides defined. Such a buffer can
result from lowering of a scalar buffer, which will be flattened
to a shape of [1].
Previous implementations of ConvertSSA incorrectly handled this
case, resulting in undefined stride variables.
"""
def _make_func(self):
@T.prim_func
def func(a: T.handle):
A = T.match_buffer(a, shape=(), dtype="float32", buffer_type="auto")
A[()] = 1.0
return tvm.lower(func)["main"]
def before(self):
func = self._make_func()
return tvm.IRModule({"func_a": func, "func_b": func})
def expected(self):
return tvm.IRModule({"func_a": self._make_func(), "func_b": self._make_func()})
if __name__ == "__main__":
tvm.testing.main()
| 7,277 | 27.653543 | 87 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_module_property.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import tvm.runtime._ffi_api
import tvm.target._ffi_api
def checker(mod, expected):
assert mod.is_binary_serializable == expected["is_binary_serializable"]
assert mod.is_runnable == expected["is_runnable"]
assert mod.is_dso_exportable == expected["is_dso_exportable"]
def create_csource_module():
return tvm.runtime._ffi_api.CSourceModuleCreate("", "cc", [], None)
def create_llvm_module():
A = te.placeholder((1024,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
return tvm.build(s, [A, B], "llvm", name="myadd0")
def create_aot_module():
return tvm.get_global_func("relay.build_module._AOTExecutorCodegen")()
def test_property():
checker(
create_csource_module(),
expected={"is_binary_serializable": False, "is_runnable": False, "is_dso_exportable": True},
)
checker(
create_llvm_module(),
expected={"is_binary_serializable": False, "is_runnable": True, "is_dso_exportable": True},
)
checker(
create_aot_module(),
expected={"is_binary_serializable": False, "is_runnable": True, "is_dso_exportable": False},
)
if __name__ == "__main__":
tvm.testing.main()
| 2,049 | 31.539683 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_ptx_cp_async.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
import numpy as np
import tvm.testing
@T.prim_func
def ptx_cp_async(A: T.Buffer((32, 128), "float16"), B: T.Buffer((32, 128), "float16")) -> None:
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
bx = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(bx, 1)
T.launch_thread(tx, 32)
with T.block():
A_shared = T.alloc_buffer([32, 128], "float16", scope="shared")
T.reads(A[0:32, 0:128])
T.writes(B[0:32, 0:128])
for i in range(16):
T.evaluate(
T.ptx_cp_async(
A_shared.data, tx * 128 + 8 * i, A.data, tx * 128 + 8 * i, 16, dtype="float16"
)
)
# TODO(masahi): Remove dtype requirement from TVMScript parser
T.evaluate(T.ptx_commit_group(dtype=""))
T.evaluate(T.ptx_wait_group(0, dtype=""))
for i in range(128):
B[tx, i] = A_shared[tx, i]
@tvm.testing.requires_cuda_compute_version(8)
def test_ptx_cp_async():
f = ptx_cp_async
mod = tvm.build(f, target="cuda")
A_np = np.random.rand(32, 128).astype("float16")
B_np = np.zeros((32, 128)).astype("float16")
dev = tvm.cuda(0)
A_nd = tvm.nd.array(A_np, device=dev)
B_nd = tvm.nd.array(B_np, device=dev)
mod(A_nd, B_nd)
tvm.testing.assert_allclose(B_nd.numpy(), A_np)
if __name__ == "__main__":
test_ptx_cp_async()
| 2,260 | 33.257576 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_autotvm_record.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""test the correctness of dump and load of data log"""
from io import StringIO
from os import PathLike
import time
from tvm.contrib import utils
from tvm import autotvm
from tvm.autotvm.measure import MeasureInput, MeasureResult, MeasureErrorNo
from tvm.autotvm.record import encode, decode, ApplyHistoryBest, measure_str_key
from tvm.testing.autotvm import get_sample_task
def test_load_dump():
task, target = get_sample_task()
inp = MeasureInput(target, task, task.config_space.get(0))
result = MeasureResult(
(2.0, 2.23, 0.23, 0.123, 0.234, 0.123), MeasureErrorNo.NO_ERROR, 2.3, time.time()
)
for protocol in ["json", "pickle"]:
row = encode(inp, result, protocol=protocol)
inp_2, result_2 = decode(row, protocol=protocol)
assert measure_str_key(inp) == measure_str_key(inp_2), "%s vs %s" % (
measure_str_key(inp),
measure_str_key(inp_2),
)
assert result.costs == result_2.costs
assert result.error_no == result_2.error_no
assert result.timestamp == result_2.timestamp
def test_file_io():
temp = utils.tempdir()
file_path = temp.relpath("temp.log")
tsk, target = get_sample_task()
inputs = [MeasureInput(target, tsk, tsk.config_space.get(i)) for i in range(0, 10)]
results = [MeasureResult((i,), 0, 0, 0) for i in range(0, 10)]
invalid_inp = MeasureInput(target, tsk, tsk.config_space.get(10))
invalid_res = MeasureResult((10,), 0, 0, 0)
# Erase the entity map to test if it will be ignored when loading back.
invalid_inp.config._entity_map = {}
with open(file_path, "w") as fo:
cb = autotvm.callback.log_to_file(fo)
cb(None, inputs, results)
cb(None, [invalid_inp], [invalid_res])
ref = zip(inputs, results)
for x, y in zip(ref, autotvm.record.load_from_file(file_path)):
assert x[1] == y[1]
# Confirm functionality of multiple file loads
hist_best = ApplyHistoryBest([file_path, file_path])
x = hist_best.query(target, tsk.workload)
assert str(x) == str(inputs[0][2])
def test_apply_history_best(tmpdir):
tsk, target = get_sample_task()
best = str(tsk.config_space.get(2))
inputs_batch_1 = [MeasureInput(target, tsk, tsk.config_space.get(i)) for i in range(3)]
results_batch_1 = [MeasureResult((i,), 0, 0, 0) for i in range(1, 3)]
results_batch_1.append(MeasureResult((0.5,), 0, 2.3, 0))
# Write data out to file
filepath_batch_1 = tmpdir / "batch_1.log"
with open(filepath_batch_1, "w") as file:
autotvm.callback.log_to_file(file)(None, inputs_batch_1, results_batch_1)
# Load best results from Path
assert isinstance(filepath_batch_1, PathLike)
hist_best = ApplyHistoryBest(filepath_batch_1)
assert str(hist_best.query(target, tsk.workload)) == best
# Load best results from str(Path)
hist_best = ApplyHistoryBest(str(filepath_batch_1))
assert str(hist_best.query(target, tsk.workload)) == best
# Write data into StringIO buffer
stringio_batch_1 = StringIO()
assert isinstance(filepath_batch_1, PathLike)
callback = autotvm.callback.log_to_file(stringio_batch_1)
callback(None, inputs_batch_1, results_batch_1)
stringio_batch_1.seek(0)
# Load best results from strIO
hist_best = ApplyHistoryBest(stringio_batch_1)
assert str(hist_best.query(target, tsk.workload)) == best
# Load best result from list of tuples (MeasureInput, MeasureResult)
hist_best = ApplyHistoryBest(list(zip(inputs_batch_1, results_batch_1)))
assert str(hist_best.query(target, tsk.workload)) == best
# Same thing, but iterable instead of list (i.e. no subscripting)
hist_best = ApplyHistoryBest(zip(inputs_batch_1, results_batch_1))
assert str(hist_best.query(target, tsk.workload)) == best
def test_apply_history_best_multiple_batches(tmpdir):
tsk, target = get_sample_task()
best = str(tsk.config_space.get(2))
inputs_batch_1 = [MeasureInput(target, tsk, tsk.config_space.get(i)) for i in range(2)]
results_batch_1 = [MeasureResult((i,), 0, 0, 0) for i in range(1, 3)]
filepath_batch_1 = tmpdir / "batch_1.log"
with open(filepath_batch_1, "w") as file:
autotvm.callback.log_to_file(file)(None, inputs_batch_1, results_batch_1)
inputs_batch_2 = [MeasureInput(target, tsk, tsk.config_space.get(i)) for i in range(2, 4)]
results_batch_2 = [MeasureResult((0.5,), 0, 0, 0), MeasureResult((3,), 0, 0, 0)]
filepath_batch_2 = tmpdir / "batch_2.log"
with open(filepath_batch_2, "w") as file:
autotvm.callback.log_to_file(file)(None, inputs_batch_2, results_batch_2)
# Check two Path filepaths works
hist_best = ApplyHistoryBest([filepath_batch_1, filepath_batch_2])
assert str(hist_best.query(target, tsk.workload)) == best
# Check that an arbitrary Iterable of Paths works
# Calling zip() on a single list gives a non-subscriptable Iterable
hist_best = ApplyHistoryBest(zip([filepath_batch_1, filepath_batch_2]))
assert str(hist_best.query(target, tsk.workload)) == best
# Check that Iterable of Iterable of tuples is correctly merged
hist_best = ApplyHistoryBest(
zip(
[
zip(inputs_batch_1, results_batch_1),
zip(inputs_batch_2, results_batch_2),
]
)
)
assert str(hist_best.query(target, tsk.workload)) == best
if __name__ == "__main__":
test_load_dump()
test_apply_history_best()
test_file_io()
| 6,307 | 37.463415 | 94 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_builder.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test Meta Schedule Builder """
import os
import sys
import time
from typing import List
import pytest
import tvm.testing
from tvm import script
from tvm._ffi import register_func
from tvm.meta_schedule.builder import (
BuilderInput,
BuilderResult,
LocalBuilder,
PyBuilder,
)
from tvm.runtime import Module
from tvm.script import tir as T
from tvm.target import Target
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,missing-docstring
@script.ir_module
class MatmulModule:
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=no-self-argument
T.func_attr({"global_symbol": "matmul", "tir.noalias": True})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@script.ir_module
class MatmulReluModule:
@T.prim_func
def matmul_relu( # pylint: disable=no-self-argument
a: T.handle, b: T.handle, d: T.handle
) -> None:
T.func_attr({"global_symbol": "matmul_relu", "tir.noalias": True})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
D = T.match_buffer(d, (1024, 1024), "float32")
C = T.alloc_buffer((1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(1024, 1024):
with T.block("relu"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = T.max(C[vi, vj], 0.0)
@script.ir_module
class BatchMatmulModule:
@T.prim_func
def batch_matmul( # pylint: disable=no-self-argument
a: T.handle, b: T.handle, c: T.handle
) -> None:
T.func_attr({"global_symbol": "batch_matmul", "tir.noalias": True})
A = T.match_buffer(a, [16, 128, 128])
B = T.match_buffer(b, [16, 128, 128])
C = T.match_buffer(c, [16, 128, 128])
for n, i, j, k in T.grid(16, 128, 128, 128):
with T.block("update"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k])
with T.init():
C[vn, vi, vj] = 0.0
C[vn, vi, vj] = C[vn, vi, vj] + A[vn, vi, vk] * B[vn, vj, vk]
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,missing-docstring
def _check_build_results(builder_results: List[BuilderResult]):
"""Simple check whether the build is successful"""
for result in builder_results:
artifact_path = result.artifact_path
error_msg = result.error_msg
assert artifact_path is not None
assert error_msg is None
os.remove(artifact_path)
os.rmdir(os.path.dirname(artifact_path))
def test_meta_schedule_single_build():
"""Test meta schedule builder for a single build"""
mod = MatmulModule
builder = LocalBuilder()
builder_inputs = [BuilderInput(mod, Target("llvm"))]
builder_results = builder.build(builder_inputs)
assert len(builder_results) == len(builder_inputs)
_check_build_results(builder_results)
def test_meta_schedule_multiple_build():
"""Test meta schedule builder for multiple builds"""
builder = LocalBuilder()
builder_inputs = [
BuilderInput(MatmulModule, Target("llvm")),
BuilderInput(MatmulReluModule, Target("llvm")),
BuilderInput(BatchMatmulModule, Target("llvm")),
]
builder_results = builder.build(builder_inputs)
assert len(builder_results) == len(builder_inputs)
_check_build_results(builder_results)
def test_meta_schedule_error_handle_test_builder():
"""Test the error handing during building"""
class TestBuilder(PyBuilder):
def build( # pylint: disable=no-self-use
self,
build_inputs: List[BuilderInput],
) -> List[BuilderResult]:
return [BuilderResult(None, "error") for w in build_inputs]
builder = TestBuilder()
builder_inputs = [
BuilderInput(MatmulModule, Target("llvm")),
BuilderInput(MatmulReluModule, Target("llvm")),
BuilderInput(BatchMatmulModule, Target("llvm")),
]
builder_results = builder.build(builder_inputs)
assert len(builder_results) == len(builder_inputs)
for result in builder_results:
artifact_path = result.artifact_path
error_msg = result.error_msg
assert artifact_path is None
assert error_msg == "error"
def test_meta_schedule_error_handle_build_func():
"""Test the error handing during building"""
def initializer():
@register_func("meta_schedule.builder.test_build")
def test_build(mod: Module, target: Target, _) -> None: # pylint: disable=unused-variable
raise ValueError("Builder intended Test Error (build func).")
builder = LocalBuilder(f_build="meta_schedule.builder.test_build", initializer=initializer)
builder_inputs = [BuilderInput(MatmulModule, Target("llvm"))]
builder_results = builder.build(builder_inputs)
assert len(builder_results) == len(builder_inputs)
for result in builder_results:
artifact_path = result.artifact_path
error_msg = result.error_msg
assert artifact_path is None
assert error_msg.startswith("LocalBuilder: An exception occurred")
def test_meta_schedule_error_handle_export_func():
"""Test the error handing during building"""
def initializer():
@register_func("meta_schedule.builder.test_export")
def test_build(mod: Module) -> str: # pylint: disable=unused-variable
raise ValueError("Builder intended Test Error (export func).")
builder = LocalBuilder(f_export="meta_schedule.builder.test_export", initializer=initializer)
builder_inputs = [BuilderInput(MatmulModule, Target("llvm"))]
builder_results = builder.build(builder_inputs)
assert len(builder_results) == len(builder_inputs)
for result in builder_results:
artifact_path = result.artifact_path
error_msg = result.error_msg
assert artifact_path is None
assert error_msg.startswith("LocalBuilder: An exception occurred")
def test_meta_schedule_error_handle_time_out():
"""Test the error handing time out during building"""
def initializer():
@register_func("meta_schedule.builder.test_time_out")
def timeout_build(mod, target, _): # pylint: disable=unused-argument, unused-variable
time.sleep(2)
builder = LocalBuilder(
timeout_sec=1,
f_build="meta_schedule.builder.test_time_out",
initializer=initializer,
)
builder_inputs = [BuilderInput(MatmulModule, Target("llvm"))]
builder_results = builder.build(builder_inputs)
assert len(builder_results) == len(builder_inputs)
for result in builder_results:
artifact_path = result.artifact_path
error_msg = result.error_msg
assert artifact_path is None
assert error_msg.startswith("LocalBuilder: Timeout")
def test_meta_schedule_missing_build_func():
with pytest.raises(ValueError):
LocalBuilder(f_build="wrong-name")
if __name__ == "__main__":
tvm.testing.main()
| 8,481 | 35.878261 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_div_to_mul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
import pytest
import numpy as np
@pytest.mark.parametrize("dtype, rtol", [("float16", 1e-3), ("float32", 1e-7), ("float64", 1e-12)])
def test_div_to_mul(dtype, rtol):
x = relay.var("x", relay.TensorType((), dtype))
y = relay.Constant(tvm.nd.array(np.array([1.5]).astype(dtype)))
z = x / y
mod = tvm.IRModule.from_expr(z)
transformed = relay.transform.DivToMul()(mod)
assert transformed["main"].body.op.name == "multiply"
np.testing.assert_allclose(transformed["main"].body.args[1].data.numpy()[0], 1 / 1.5, rtol=rtol)
| 1,366 | 41.71875 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_set_dtype.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
@T.prim_func
def element_wise(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_set_dtype(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")):
B = T.alloc_buffer((128, 128), "float16")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = T.cast(A[vi, vj] * 2.0, "float16")
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = T.cast(B[vi, vj], "float32") + 1.0
@T.prim_func
def element_wise_subregion_match(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0 = T.match_buffer(B[vi, vj], [], offset_factor=1)
B_subregion0[()] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1 = T.match_buffer(B[vi, vj], [], offset_factor=1)
C[vi, vj] = B_subregion1[()] + 1.0
@T.prim_func
def element_wise_subregion_match_set_dtype(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None:
B = T.alloc_buffer((128, 128), "float16")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B_subregion0 = T.match_buffer(B[vi, vj], (), "float16", offset_factor=1)
B_subregion0[()] = T.cast(A[vi, vj] * 2.0, "float16")
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(C[vi, vj])
B_subregion1 = T.match_buffer(B[vi, vj], (), "float16", offset_factor=1)
C[vi, vj] = T.cast(B_subregion1[()], "float32") + 1.0
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_set_dtype(use_block_name):
func = element_wise
sch = tir.Schedule(func, debug_mask="all")
sch.unsafe_set_dtype("B" if use_block_name else sch.get_block("B"), 0, "float16")
tvm.ir.assert_structural_equal(element_wise_set_dtype, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func)
def test_set_dtype_fail_on_output_buffer(use_block_name):
func = element_wise
sch = tir.Schedule(func, debug_mask='all')
with pytest.raises(tvm.tir.ScheduleError):
sch.unsafe_set_dtype('C' if use_block_name else sch.get_block("C"), 0, "float16")
def test_set_dtype_fail_on_index_out_of_bound():
func = element_wise
sch = tir.Schedule(func, debug_mask='all')
with pytest.raises(tvm.tir.ScheduleError):
sch.unsafe_set_dtype(sch.get_block("B"), 1, "float64")
with pytest.raises(tvm.tir.ScheduleError):
sch.unsafe_set_dtype(sch.get_block("B"), -1, "float64")
def test_set_dtype_subregion():
func = element_wise_subregion_match
sch = tir.Schedule(func, debug_mask='all')
sch.unsafe_set_dtype(sch.get_block("B"), 0, "float16")
tvm.ir.assert_structural_equal(element_wise_subregion_match_set_dtype, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func)
if __name__ == "__main__":
tvm.testing.main()
| 5,046 | 39.055556 | 123 | py |
tvm | tvm-main/tests/python/unittest/test_arith_modular_set.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
def test_cast():
analyzer = tvm.arith.Analyzer()
x = te.var("x", dtype="int8")
m = analyzer.modular_set((x * 3).astype("uint32"))
assert m.coeff == 3
assert m.base == 0
m = analyzer.modular_set((x * 3 + 1).astype("float32").astype("int32"))
assert m.coeff == 3
assert m.base == 1
def test_add_sub():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x", "int64"), te.var("y", "int64")
m = analyzer.modular_set(x * 6 + y * 4)
assert m.coeff == 2
assert m.base == 0
analyzer.bind(y, x * 4 + 1)
m = analyzer.modular_set(1 - y)
assert m.coeff == 4
assert m.base == 0
def test_mul():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
m = analyzer.modular_set((x * 4 + 2) * (y * 6 + 1))
assert m.coeff == 4
assert m.base == 2
def test_floormod():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
m = analyzer.modular_set(tvm.tir.floormod(x * 128 + y * 4, 256))
assert m.coeff == 4
assert m.base == 0
def test_div_shift():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
# not sure if x is non-negative
tdiv = tvm.tir.truncdiv
m = analyzer.modular_set(tdiv(x * 4 + 2, 2))
assert m.coeff == 1
assert m.base == 0
# right shift always round down so it is fine
m = analyzer.modular_set((x * 4 + 2) >> 1)
assert m.coeff == 2
assert m.base == 1
fld = tvm.te.floordiv
m = analyzer.modular_set(fld(x * 4 + 2, 2))
assert m.coeff == 2
assert m.base == 1
# x is non-negative
analyzer.update(x, tvm.arith.ConstIntBound(0, 100))
m = analyzer.modular_set(tdiv(x * 4 + 2, 2))
assert m.coeff == 2
assert m.base == 1
def test_mod():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
tmod = tvm.tir.truncmod
fmod = tvm.tir.floormod
# not sure if x is non-negative
m = analyzer.modular_set(tmod(x * 4 + 1, 4))
assert m.coeff == 1
assert m.base == 0
# no need to be positive if base == 0
m = analyzer.modular_set(tmod(x * 4, 4))
assert m.coeff == 4
assert m.base == 0
# floor mod tests
m = analyzer.modular_set(fmod(x * 4 + 3, 2))
assert m.coeff == 2
assert m.base == 1
m = analyzer.modular_set(fmod(x * 4 + 3, 8))
assert m.coeff == 4
assert m.base == 3
# x is non-negative
analyzer.update(x, tvm.arith.ConstIntBound(0, 100))
m = analyzer.modular_set(tmod(x * 4 + 3, 2))
assert m.coeff == 2
assert m.base == 1
def test_min_max_select():
analyzer = tvm.arith.Analyzer()
x, y = te.var("x"), te.var("y")
m = analyzer.modular_set(tvm.te.min(x * 3, y * 9))
assert m.coeff == 3
assert m.base == 0
m = analyzer.modular_set(tvm.te.max(x * 3 + 1, y * 9 + 4))
assert m.coeff == 3
assert m.base == 1
m = analyzer.modular_set(tvm.tir.Select(x > 0, x * 3 + 1, y * 9 + 2))
assert m.coeff == 1
assert m.base == 0
def test_mix_index():
a = te.var("a")
b = te.var("b")
analyzer = tvm.arith.Analyzer()
tdiv = tvm.tir.truncdiv
m = analyzer.modular_set(a * 4 + b * 6 + 7)
assert m.coeff == 2
assert m.base == 1
m = analyzer.modular_set((a * 4 + 1) * (b * 8 + 3))
assert m.coeff == 4
assert m.base == 3
m = analyzer.modular_set(tdiv(a * 4 + 1, b * 8 + 3))
assert m.coeff == 1
assert m.base == 0
m = analyzer.modular_set((a * 4 + 1) * tdiv(b * 8, 4))
assert m.coeff == 2
assert m.base == 0
m = analyzer.modular_set((a * 12 + 1) - (b * 3 * 7 + 2))
assert m.coeff == 3
assert m.base == 2
m = analyzer.modular_set(a * 12 + tvm.te.min(b * 3 * 7, 2))
assert m.coeff == 1
assert m.base == 0
def test_constraint_scope():
a = te.var("a")
b = te.var("b")
analyzer = tvm.arith.Analyzer()
tmod = tvm.tir.truncmod
with analyzer.constraint_scope(tmod(b, 4) == 2):
m = analyzer.modular_set(b + 1)
assert m.coeff == 4
assert m.base == 3
with analyzer.constraint_scope(tmod(a, 2) == 1):
m = analyzer.modular_set(b + a * 2)
assert m.coeff == 4
assert m.base == 0
m = analyzer.modular_set(b + a * 2)
assert m.coeff == 2
assert m.base == 0
m = analyzer.modular_set(b + 1)
assert m.coeff == 1
assert m.base == 0
def test_intersect():
a = te.var("a")
analyzer = tvm.arith.Analyzer()
tmod = tvm.tir.truncmod
with analyzer.constraint_scope(tmod(a, 4) == 1):
with analyzer.constraint_scope(tmod(a, 3) == 1):
m = analyzer.modular_set(a)
assert m.coeff == 12
assert m.base == 1
with analyzer.constraint_scope(tmod(a, 3) == 2):
with analyzer.constraint_scope(tmod(a, 5) == 3):
with analyzer.constraint_scope(tmod(a, 7) == 2):
m = analyzer.modular_set(a)
assert m.coeff == 105
assert m.base == 23
def test_let():
analyzer = tvm.arith.Analyzer()
x = te.var("x")
y = te.var("y")
m = analyzer.modular_set(tvm.tir.Let(x, y * 10, x + 1))
assert m.coeff == 10
assert m.base == 1
def test_bitwise_and():
analyzer = tvm.arith.Analyzer()
x = te.var("x")
y = te.var("y")
# RHS of bitwise_and is 2^p - 1
m = analyzer.modular_set((x * 16 + y * 4) & 31)
assert m.coeff == 4
assert m.base == 0
# arbitrary RHS
m = analyzer.modular_set((x * 16 + y * 4) & 17)
assert m.coeff == 1
assert m.base == 0
if __name__ == "__main__":
tvm.testing.main()
| 6,425 | 27.433628 | 75 | py |
tvm | tvm-main/tests/python/unittest/test_target_codegen_cuda.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import tvm
from tvm import te
import numpy as np
from tvm import topi
from tvm.contrib.nvcc import have_fp16, have_int8, have_bf16
import tvm.testing
import pytest
tx = te.thread_axis("threadIdx.x")
bx = te.thread_axis("blockIdx.x")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_vectorize_add():
num_thread = 8
def check_cuda(dtype, n, lanes):
if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
if dtype == "int8" and not have_int8(tvm.cuda(0).compute_version):
print("skip because gpu does not support int8")
return
A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.compute((n,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
fun = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n, lanes)))
c = tvm.nd.empty((n,), B.dtype, dev)
fun(a, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1)
check_cuda("float32", 64, 2)
check_cuda("float32", 64, 3)
check_cuda("float32", 64, 4)
check_cuda("int8", 64, 2)
check_cuda("int8", 64, 3)
check_cuda("int8", 64, 4)
check_cuda("uint8", 64, 2)
check_cuda("uint8", 64, 3)
check_cuda("uint8", 64, 4)
check_cuda("float16", 64, 2)
check_cuda("float16", 64, 4)
check_cuda("float16", 64, 6)
check_cuda("float16", 64, 8)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_bf16_vectorize_add():
if not have_bf16(tvm.cuda(0).compute_version):
print("skip because gpu does not support bf16")
return
num_thread = 8
def np_float2np_bf16(arr):
"""Convert a numpy array of float to a numpy array
of bf16 in uint16"""
orig = arr.view("<u4")
bias = np.bitwise_and(np.right_shift(orig, 16), 1) + 0x7FFF
return np.right_shift(orig + bias, 16).astype("uint16")
def np_bf162np_float(arr):
"""Convert a numpy array of bf16 (uint16) to a numpy array
of float"""
u32 = np.left_shift(arr.astype("uint32"), 16)
return u32.view("<f4")
def check_cuda(n, lanes):
A = te.placeholder((n,), name="A", dtype="bfloat16x%d" % lanes)
B = te.compute((n,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
with tvm.transform.PassContext(
disabled_pass=["tir.BF16Promote", "tir.BF16CastElimination", "tir.BF16TypeLowering"]
):
fun = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
np_a = np.random.uniform(size=(n, lanes)).astype("float32")
np_a = np_bf162np_float(np_float2np_bf16(np_a))
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np_float2np_bf16(np_a))
c = tvm.nd.empty((n,), B.dtype, dev)
fun(a, c)
c = tvm.nd.empty((n, lanes), "uint16", dev).copyfrom(c)
tvm.testing.assert_allclose(c.numpy(), np_float2np_bf16(np_a + 1))
check_cuda(64, 2)
check_cuda(64, 4)
check_cuda(64, 6)
check_cuda(64, 8)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_multiply_add():
num_thread = 8
def check_cuda(dtype, n, lanes):
if dtype == "int8" and not have_int8(tvm.cuda(0).compute_version):
print("skip because gpu does not support int8")
return
A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.placeholder((n,), name="B", dtype="%sx%d" % (dtype, lanes))
C = te.placeholder((n,), name="C", dtype="int32")
D = te.compute(
(n,), lambda i: tvm.tir.call_pure_extern("int32", "__dp4a", A[i], B[i], C[i]), name="D"
)
s = te.create_schedule(D.op)
xo, xi = s[D].split(D.op.axis[0], factor=num_thread)
s[D].bind(xo, bx)
s[D].bind(xi, tx)
fun = tvm.build(s, [A, B, C, D], "cuda")
np_a = np.random.randint(low=-128, high=127, size=(n, lanes))
np_b = np.random.randint(low=-128, high=127, size=(n, lanes))
np_c = np.random.randint(low=0, high=127, size=(n,))
np_d = [sum(x * y) + z for x, y, z in zip(np_a, np_b, np_c)]
dev = tvm.cuda(0)
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np_a)
b = tvm.nd.empty((n,), B.dtype, dev).copyfrom(np_b)
c = tvm.nd.empty((n,), C.dtype, dev).copyfrom(np_c)
d = tvm.nd.empty((n,), D.dtype, dev)
fun(a, b, c, d)
tvm.testing.assert_allclose(d.numpy(), np_d)
check_cuda("int8", 64, 4)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_vectorize_load():
num_thread = 8
def check_cuda(dtype, n, lanes):
dev = tvm.cuda(0)
A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes))
B = te.compute((n,), lambda i: A[i], name="B")
s = te.create_schedule(B.op)
block, thread = s[B].split(B.op.axis[0], factor=num_thread)
s[B].bind(block, bx)
s[B].bind(thread, tx)
fun = tvm.build(s, [A, B], "cuda", name="vector_load")
np_a = np.random.randint(low=-128, high=127, size=(n, lanes))
a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np_a)
b = tvm.nd.empty((n,), B.dtype, dev)
fun(a, b)
tvm.testing.assert_allclose(a.numpy(), b.numpy())
check_cuda("int8", 64, 2)
check_cuda("int8", 64, 3)
check_cuda("int8", 64, 4)
check_cuda("int8", 64, 8)
check_cuda("int8", 64, 16)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_make_int8():
def check_cuda(n, value, lanes):
dtype = "int8"
dev = tvm.cuda(0)
A = te.compute((n, lanes), lambda i, j: tvm.tir.const(value, dtype=dtype))
s = te.create_schedule(A.op)
y, x = s[A].op.axis
s[A].vectorize(x)
s[A].bind(y, bx)
fun = tvm.build(s, [A], "cuda", name="make_int8x4")
np_a = np.full((n, lanes), value, dtype=dtype)
a = tvm.nd.empty(np_a.shape, dtype, dev)
fun(a)
np.testing.assert_equal(a.numpy(), np_a)
check_cuda(64, np.int8(0xAB), 4)
check_cuda(64, 0, 4)
check_cuda(64, -3, 4)
check_cuda(64, np.int8(0xAB), 3)
check_cuda(64, 0, 3)
check_cuda(64, -3, 3)
check_cuda(64, np.int8(0xAB), 2)
check_cuda(64, 0, 2)
check_cuda(64, -3, 2)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_make_int4():
def check_cuda(n, value, lanes):
dtype = "int4"
dev = tvm.cuda(0)
A = te.compute((n, lanes), lambda i, j: tvm.tir.const(value, dtype=dtype))
s = te.create_schedule(A.op)
y, x = s[A].op.axis
s[A].vectorize(x)
s[A].bind(y, bx)
kernel_name = "make_int4x" + str(lanes)
fun = tvm.build(s, [A], "cuda", name=kernel_name)
np_a = np.full((n, lanes), value, dtype="int8")
a = tvm.nd.empty((n, lanes), dtype, dev)
fun(a)
np.testing.assert_equal(a.numpy(), np_a)
check_cuda(64, 1, 4)
check_cuda(64, 7, 4)
check_cuda(64, 1, 8)
check_cuda(64, 7, 8)
check_cuda(64, 1, 16)
check_cuda(64, 7, 16)
check_cuda(64, 1, 32)
check_cuda(64, 7, 32)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_inf_nan():
target = "cuda"
def check_inf_nan(dev, n, value, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
inf_value = tvm.tir.const(value, dtype=dtype)
C = te.compute((n,), lambda i: inf_value, name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], tx)
fun = tvm.build(s, [A, C], target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
# Only need to test compiling here
fun(a, c)
dev = tvm.device(target, 0)
check_inf_nan(dev, 1, -float("inf"), "float32")
check_inf_nan(dev, 1, -float("inf"), "float64")
check_inf_nan(dev, 1, float("inf"), "float32")
check_inf_nan(dev, 1, float("inf"), "float64")
check_inf_nan(dev, 1, float("nan"), "float32")
check_inf_nan(dev, 1, float("nan"), "float64")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_shuffle():
idxm = tvm.tir.indexmod
a = te.placeholder((64,), "int32")
b = te.placeholder((64,), "int32")
c = te.compute((64,), lambda x: a[x] + b[x - idxm(x, 4) + (3 - idxm(x, 4))])
sch = te.create_schedule(c.op)
x = c.op.axis[0]
xo, xi = sch[c].split(x, 4)
thrx = te.thread_axis("threadIdx.x")
sch[c].bind(xo, thrx)
sch[c].vectorize(xi)
def MyVectorize():
def vectorizer(op):
if op.kind == tvm.tir.ForKind.VECTORIZED:
idx = tvm.tir.Ramp(4 * thrx.var, 1, 4)
store = op.body
value = store.value
new_a = tvm.tir.BufferLoad(value.a.buffer, [idx])
bs, ids = [], []
for i in range(4):
bs.append(tvm.tir.BufferLoad(value.b.buffer, [4 * thrx.var + i]))
ids.append(3 - i)
new_b = tvm.tir.Shuffle(bs, ids)
return tvm.tir.BufferStore(store.buffer, new_a + new_b, [idx])
return None
def _transform(f, *_):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, None, vectorizer, ["tir.For"])
)
return tvm.tir.transform.prim_func_pass(_transform, opt_level=0, name="MyVectorize")
with tvm.transform.PassContext(config={"tir.add_lower_pass": [(1, MyVectorize())]}):
module = tvm.build(sch, [a, b, c], target="cuda")
a_ = np.array(list(range(64)), dtype="int32")
b_ = np.array((list(range(4))[::-1]) * 16, dtype="int32")
c_ = np.zeros((64,), dtype="int32")
ref = a_ + np.array((list(range(4))) * 16, dtype="int32")
nda, ndb, ndc = [tvm.nd.array(i, tvm.cuda(0)) for i in [a_, b_, c_]]
module(nda, ndb, ndc)
tvm.testing.assert_allclose(ndc.numpy(), ref)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_crossthread_reduction1(target, dev):
n = te.var("n")
m = te.var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "m")
B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
def sched(nthd):
s = te.create_schedule(B.op)
ko, _ = s[B].split(B.op.reduce_axis[0], nparts=nthd)
s[B].bind(ko, te.thread_axis("threadIdx.x"))
s[B].bind(B.op.axis[0], te.thread_axis("blockIdx.x"))
func = tvm.build(s, [A, B], target)
return func
def verify(nthd):
func = sched(nthd)
nn = 3
# checks three typical cases
vals = [nthd - 1, nthd, nthd + 1]
for kk in [x for x in vals]:
size = (nn, kk)
a = tvm.nd.array(np.random.uniform(size=size).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=1), rtol=1e-3)
verify(16)
verify(32)
verify(64)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_crossthread_reduction2(target, dev):
n = te.var("n")
k0 = te.var("k0")
k1 = te.var("k1")
A = te.placeholder((n, k0, k1), name="A")
k0 = te.reduce_axis((0, k0), "k0")
k1 = te.reduce_axis((0, k1), "k1")
B = te.compute((n,), lambda i: te.sum(A[i, k0, k1], axis=(k0, k1)), name="B")
def sched(nthdx, nthdy):
s = te.create_schedule(B.op)
k0o, _ = s[B].split(B.op.reduce_axis[0], nparts=nthdx)
k1o, _ = s[B].split(B.op.reduce_axis[1], nparts=nthdy)
s[B].bind(k0o, te.thread_axis("threadIdx.x"))
s[B].bind(k1o, te.thread_axis("threadIdx.y"))
s[B].bind(B.op.axis[0], te.thread_axis("blockIdx.x"))
func = tvm.build(s, [A, B], target)
return func
def verify(nthdx, nthdy):
func = sched(nthdx, nthdy)
nn = 3
# checks three typical cases
vx = [nthdx - 1, nthdx, nthdx + 1]
vy = [nthdy - 1, nthdy, nthdy + 1]
for kk0, kk1 in [(x, y) for x in vx for y in vy]:
size = (nn, kk0, kk1)
a = tvm.nd.array(np.random.uniform(size=size).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=(1, 2)), rtol=1e-3)
verify(16, 16)
verify(32, 32)
verify(16, 32)
verify(32, 16)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_reduction_binding():
k = te.reduce_axis((0, 32), "k")
A = te.placeholder((96, 32), name="A")
B = te.compute((96,), lambda m: te.sum(A[m, k], axis=k), name="B")
s = te.create_schedule(B.op)
s[B].reorder(B.op.reduce_axis[0], B.op.axis[0])
mo, _ = s[B].split(B.op.axis[0], 32)
s[B].bind(mo, te.thread_axis("blockIdx.x"))
fcuda = tvm.build(s, [A, B], "cuda")
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_rfactor_predicates(target, dev):
n = te.reduce_axis((0, 129), "n")
A = te.placeholder((129,), name="A")
B = te.compute((1,), lambda b: te.sum(A[n], axis=n), name="B")
s = te.create_schedule(B.op)
_, ni = s[B].split(s[B].op.reduce_axis[0], factor=8)
BF = s.rfactor(B, ni, 0)
s[B].set_store_predicate(tx.var.equal(0))
s[B].bind(s[B].op.reduce_axis[0], tx)
s[B].bind(s[B].op.axis[0], bx)
s[BF].compute_at(s[B], s[B].op.axis[0])
_, noi = s[BF].split(s[BF].op.reduce_axis[0], factor=2)
BF2 = s.rfactor(BF, noi, 0)
s[BF].bind(s[BF].op.axis[0], tx)
s[BF2].compute_at(s[BF], s[BF].op.axis[1])
fcuda = tvm.build(s, [A, B], target)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_const_float_to_half():
# This import is required to use nvcc to perform code gen;
# otherwise it is found that the code gen is done by nvrtc.
from tvm import autotvm
shape = (2, 3, 4)
a = te.placeholder(shape, dtype="float16", name="a")
b = tvm.tir.const(0.5, dtype="float16")
c = te.compute(shape, lambda i, j, k: a[i, j, k] > b, name="c")
s = te.create_schedule(c.op)
axes = [axis for axis in c.op.axis]
fused = s[c].fuse(*axes)
bx, tx = s[c].split(fused, factor=64)
s[c].bind(bx, te.thread_axis("blockIdx.x"))
s[c].bind(tx, te.thread_axis("threadIdx.x"))
func = tvm.build(s, [a, c], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=shape).astype(a.dtype)
c_np = np.zeros(shape=shape, dtype=c.dtype)
a = tvm.nd.array(a_np, dev)
c = tvm.nd.array(c_np, dev)
func(a, c)
np.testing.assert_equal(c.numpy(), a_np > b.value)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_reduction():
def check(device, dtype, m=32, n=32):
if not tvm.testing.device_enabled(device):
print("Skipping", device)
return
dev = tvm.device(device, 0)
a = te.placeholder((m, n), name="a", dtype=dtype)
b = te.placeholder((m, n), name="b", dtype=dtype)
c = a + b
d = a * b
e = topi.elemwise_sum([c, d])
g = topi.sum(e)
with tvm.target.Target(device):
sg = topi.cuda.schedule_reduce(g)
func = tvm.build(sg, [a, b, g], device)
a_np = np.random.uniform(size=(m, n)).astype(a.dtype)
b_np = np.random.uniform(size=(m, n)).astype(b.dtype)
g_np = np.sum(np.add(a_np * b_np, a_np + b_np))
a_nd = tvm.nd.array(a_np, dev)
b_nd = tvm.nd.array(b_np, dev)
g_nd = tvm.nd.array(np.zeros(g_np.shape, dtype=g_np.dtype), dev)
func(a_nd, b_nd, g_nd)
tvm.testing.assert_allclose(g_nd.numpy(), g_np, rtol=1e-3)
check("cuda", "float32")
check("rocm", "float32")
check("cuda", "float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_mix_threaded_and_normal_reduction():
def check(device, dtype, m=32, n=32):
if not tvm.testing.device_enabled(device):
print("Skipping", device)
return
dev = tvm.device(device, 0)
if dtype == "float16" and not have_fp16(dev.compute_version):
print("Skip because gpu does not have fp16 support")
return
a = tvm.te.placeholder((m, n), name="a", dtype=dtype)
b = topi.sum(a)
with tvm.target.Target(device):
sb = tvm.te.create_schedule(b.op)
i, _ = b.op.reduce_axis
sb[b].bind(i, tvm.te.thread_axis("threadIdx.x"))
func = tvm.build(sb, [a, b], device)
a_np = np.random.uniform(size=(m, n)).astype(a.dtype)
b_np = np.sum(a_np)
a_nd = tvm.nd.array(a_np, dev)
b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), dev)
func(a_nd, b_nd)
tvm.testing.assert_allclose(b_nd.numpy(), b_np, rtol=1e-3)
check("cuda", "float32")
check("rocm", "float32")
check("cuda", "float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_floordiv_with_vectorization():
with tvm.target.cuda():
# B[i] = A[floordiv(i, k)]
n = 256
k = 37
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda i: A[tvm.tir.floordiv(i, k)], name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], nparts=1)
xio, xii = s[B].split(xi, factor=4)
s[B].vectorize(xii)
s[B].bind(xo, bx)
s[B].bind(xio, tx)
func = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
b_np = np.array([a_np[i // k] for i in range(0, n)])
a_nd = tvm.nd.array(a_np, dev)
b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), dev)
func(a_nd, b_nd)
tvm.testing.assert_allclose(b_nd.numpy(), b_np, rtol=1e-3)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_floormod_with_vectorization():
with tvm.target.cuda():
# B[i] = A[floormod(i, k)]
n = 256
k = 37
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda i: A[tvm.tir.floormod(i, k)], name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], nparts=1)
xio, xii = s[B].split(xi, factor=4)
s[B].vectorize(xii)
s[B].bind(xo, bx)
s[B].bind(xio, tx)
func = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
a_np = np.random.uniform(size=(n,)).astype(A.dtype)
b_np = np.array([a_np[i % k] for i in range(0, n)])
a_nd = tvm.nd.array(a_np, dev)
b_nd = tvm.nd.array(np.zeros(b_np.shape, dtype=b_np.dtype), dev)
func(a_nd, b_nd)
tvm.testing.assert_allclose(b_nd.numpy(), b_np, rtol=1e-3)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_casts():
def check(t0, t1, factor):
if (t0 == "float16" or t1 == "float16") and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
# compute
n = 128
A = te.placeholder((n,), dtype=t0, name="A")
B = te.placeholder((n,), dtype=t1, name="B")
C = te.compute((n,), lambda i: A[i] + topi.cast(B[i], A.dtype), name="C")
# schedule
s = tvm.te.create_schedule(C.op)
ob, ib = s[C].split(s[C].op.axis[0], factor=factor)
s[C].vectorize(ib)
s[C].bind(ob, tx)
func = tvm.build(s, [A, B, C], "cuda")
# correctness
dev = tvm.cuda(0)
low, high = (0, 20) if t0.startswith("u") or t1.startswith("u") else (-10, 10)
a_np = np.random.randint(low, high, size=n).astype(A.dtype)
b_np = np.random.randint(low, high, size=n).astype(B.dtype)
c_np = (a_np + b_np).astype(A.dtype)
a_nd = tvm.nd.array(a_np, dev)
b_nd = tvm.nd.array(b_np, dev)
c_nd = tvm.nd.array(np.zeros(c_np.shape, dtype=c_np.dtype), dev)
func(a_nd, b_nd, c_nd)
tvm.testing.assert_allclose(c_nd.numpy(), c_np, rtol=1e-3)
def skip(t0, t1):
if t0 == t1:
return True
# CUDA does support cast between {u}int8 and fp16.
skip_set = {"float16", "uint8", "int8"}
if t0 in skip_set and t1 in skip_set:
return True
return False
types_4 = [
"float16",
"float32",
"int8",
"uint8",
"int16",
"uint16",
"int32",
"uint32",
"float64",
"int64",
"uint64",
]
types_8 = ["float16", "float32", "int8", "uint8", "int16", "uint16", "int32", "uint32"]
for t0, t1 in [(x, y) for x in types_4 for y in types_4 if not skip(x, y)]:
check(t0, t1, 4)
for t0, t1 in [(x, y) for x in types_8 for y in types_8 if not skip(x, y)]:
check(t0, t1, 8)
check("int8", "uint8", 16)
check("uint8", "int8", 16)
def sched(B):
s = te.create_schedule(B.op)
io, ii = s[B].split(s[B].op.axis[0], nparts=1)
iio, iii = s[B].split(ii, nparts=32)
_, iiii = s[B].split(iii, factor=4)
s[B].vectorize(iiii)
s[B].bind(io, bx)
s[B].bind(iio, tx)
return s
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_intrin1():
test_funcs = [
(tvm.tir.floor, lambda x: np.floor(x)),
(tvm.tir.ceil, lambda x: np.ceil(x)),
(tvm.tir.trunc, lambda x: np.trunc(x)),
(tvm.tir.abs, lambda x: np.fabs(x)),
(tvm.tir.round, lambda x: np.round(x)),
(tvm.tir.exp, lambda x: np.exp(x)),
(tvm.tir.exp2, lambda x: np.exp2(x)),
(tvm.tir.exp10, lambda x: np.power(10, x)),
(tvm.tir.log, lambda x: np.log(x)),
(tvm.tir.log2, lambda x: np.log2(x)),
(tvm.tir.log10, lambda x: np.log10(x)),
(tvm.tir.tan, lambda x: np.tan(x)),
(tvm.tir.cos, lambda x: np.cos(x)),
(tvm.tir.cosh, lambda x: np.cosh(x)),
(tvm.tir.sin, lambda x: np.sin(x)),
(tvm.tir.sinh, lambda x: np.sinh(x)),
(tvm.tir.atan, lambda x: np.arctan(x)),
(tvm.tir.tanh, lambda x: np.tanh(x)),
(tvm.tir.sqrt, lambda x: np.sqrt(x)),
]
def run_test(tvm_intrin, np_func, dtype):
if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
# set of intrinsics does not support fp16 yet.
skip_set = {
tvm.tir.abs,
tvm.tir.round,
tvm.tir.tan,
tvm.tir.atan,
tvm.tir.tanh,
tvm.tir.cosh,
tvm.tir.sinh,
}
if dtype == "float16" and tvm_intrin in skip_set:
print("Skip because '{0}' does not support fp16 yet".format(tvm_intrin.__name__))
return
n = 128
A = te.placeholder((n,), dtype=dtype, name="A")
B = te.compute((n,), lambda *i: tvm_intrin(A(*i)), name="B")
s = sched(B)
f = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(shape=(n,)).astype(A.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np_func(a.numpy()), atol=1e-3, rtol=1e-3)
for func in test_funcs:
run_test(*func, "float32")
run_test(*func, "float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_intrin2(dtype="float32"):
c2 = tvm.tir.const(2, dtype=dtype)
test_funcs = [
(tvm.tir.power, lambda x: np.power(x, 2.0)),
(tvm.tir.fmod, lambda x: np.fmod(x, 2.0)),
]
def run_test(tvm_intrin, np_func):
n = 128
A = te.placeholder((n,), dtype=dtype, name="A")
B = te.compute((n,), lambda i: tvm_intrin(A[i], c2), name="B")
s = sched(B)
f = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
a = tvm.nd.array(np.random.uniform(0, 1, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(shape=(n,)).astype(A.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np_func(a.numpy()), atol=1e-3, rtol=1e-3)
for func in test_funcs:
run_test(*func)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_popcount():
def ref_popcount(x):
cnt = 0
while x:
x -= x & -x
cnt += 1
return cnt
def run_test(dtype):
n = 128
A = te.placeholder((n,), dtype=dtype, name="A")
B = te.compute((n,), lambda i: tvm.tir.popcount(A[i]), name="B")
s = sched(B)
f = tvm.build(s, [A, B], "cuda")
dev = tvm.cuda(0)
a = tvm.nd.array(np.random.randint(0, 100000, size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(shape=(n,)).astype(B.dtype), dev)
f(a, b)
ref = np.vectorize(ref_popcount)(a.numpy())
tvm.testing.assert_allclose(b.numpy(), ref)
run_test("uint32")
run_test("uint64")
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_cuda_vectorize_load_permute_pad():
def check_cuda(dtype, n, l, padding, lanes):
if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
dev = tvm.cuda(0)
A = tvm.te.placeholder((n, l), name="A", dtype=dtype)
B = tvm.te.compute(
(n // lanes, l + 2 * padding, lanes),
lambda i, j, k: tvm.te.if_then_else(
tvm.te.any(j < padding, j >= l + padding),
tvm.runtime.convert(0).astype(dtype),
A[i * lanes + k, j - padding],
),
name="B",
)
s = te.create_schedule(B.op)
block, thread, vectorize = s[B].op.axis
s[B].bind(block, bx)
s[B].bind(thread, tx)
s[B].vectorize(vectorize)
fun = tvm.build(s, [A, B], "cuda", name="vector_load_permute_pad")
np_a = np.random.randint(low=-128, high=127, size=(n, l)).astype(A.dtype)
a = tvm.nd.empty((n, l), A.dtype, dev).copyfrom(np_a)
b = tvm.nd.empty((n // lanes, l + padding * 2, lanes), B.dtype, dev)
fun(a, b)
np_a_reshape = np_a.reshape(n // lanes, lanes, l).transpose(0, 2, 1)
ref = np.pad(
np_a_reshape, ((0, 0), (padding, padding), (0, 0)), mode="constant", constant_values=0
)
tvm.testing.assert_allclose(b.numpy(), ref)
check_cuda("int8", 64, 16, 3, 2)
check_cuda("uint8", 64, 16, 3, 2)
check_cuda("int8", 64, 16, 3, 4)
check_cuda("uint8", 64, 16, 3, 4)
check_cuda("int32", 64, 16, 3, 4)
check_cuda("float16", 64, 16, 3, 4)
check_cuda("float32", 64, 16, 3, 4)
def vcf_check_common(s, args):
N = 512
# To check if every vectorize loop transforms to ramp expr successfully
stmt = tvm.lower(s, args)
# Use this as a stack flag to show whether this stmt is inside a BroadcastNode
inside_broadcast = [False]
# Possible patterns:
# Reduce init: BufferStore[Ramp] = Broadcast(0)
# Shared memory copy: BufferStore[Ramp] = BufferLoad[Ramp]
# Compute: BufferStore[Ramp] = BufferLoad[Ramp] ... Broadcast[Load]
def pre_visit(stmt):
if isinstance(stmt, tvm.tir.Broadcast):
inside_broadcast[0] = True
# Check Broadcast[Imm numbers] or Broadcast[Load] patterns
assert isinstance(stmt.value, (tvm.tir.IntImm, tvm.tir.FloatImm, tvm.tir.BufferLoad))
if isinstance(stmt, (tvm.tir.BufferStore, tvm.tir.BufferLoad)):
is_ramp_index = isinstance(stmt.indices[-1], tvm.tir.Ramp)
is_vectorized_buffer = re.match(r"^.*x\d+$", stmt.buffer.dtype)
if isinstance(stmt, tvm.tir.BufferLoad):
# Check Broadcast[BufferLoad] or BufferLoad[Ramp] patterns
assert inside_broadcast[0] or is_ramp_index or is_vectorized_buffer
# Skip the rest of the BufferLoad
return stmt
else:
assert is_ramp_index or is_vectorized_buffer
return None
def post_visit(stmt):
if isinstance(stmt, tvm.tir.Broadcast):
inside_broadcast[0] = False
return None
tvm.tir.stmt_functor.ir_transform(stmt["main"].body, pre_visit, post_visit)
tgt = tvm.target.cuda()
mod = tvm.build(s, args, tgt)
# To check if every vectorize loop transforms to correct instruction
# print(mod.imported_modules[0].get_source())
dev = tvm.device("cuda", 0)
a = tvm.nd.array(np.random.uniform(size=(512, 512)).astype("float32"), dev)
b = tvm.nd.array(np.random.uniform(size=(512, 512)).astype("float32"), dev)
c = tvm.nd.array(np.zeros((512, 512), dtype="float32"), dev)
mod(a, b, c)
tvm.testing.assert_allclose(c.numpy(), np.dot(a.numpy(), b.numpy()), rtol=1e-5)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_cooperative_fetching_x():
N = 512
A = te.placeholder((N, N), name="A", dtype="float32")
B = te.placeholder((N, N), name="B", dtype="float32")
k = te.reduce_axis((0, N), name="k")
C = te.compute((N, N), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k))
s = te.create_schedule(C.op)
i, j = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
i3, i4 = s[C].split(i, factor=4)
i2, i3 = s[C].split(i3, factor=2)
i1, i2 = s[C].split(i2, factor=8)
i0, i1 = s[C].split(i1, factor=1)
j3, j4 = s[C].split(j, factor=4)
j2, j3 = s[C].split(j3, factor=2)
j1, j2 = s[C].split(j2, factor=8)
j0, j1 = s[C].split(j1, factor=2)
k1, k2 = s[C].split(k, factor=8)
k0, k1 = s[C].split(k1, factor=8)
s[C].reorder(i0, j0, i1, j1, i2, j2, k0, k1, i3, j3, k2, i4, j4)
block_it = s[C].fuse(i0, j0)
s[C].bind(block_it, tvm.te.thread_axis("blockIdx.x"))
vthread_it = s[C].fuse(i1, j1)
s[C].bind(vthread_it, tvm.te.thread_axis("vthread"))
thread_it = s[C].fuse(i2, j2)
s[C].bind(thread_it, tvm.te.thread_axis("threadIdx.x"))
s[C].vectorize(j4)
s[AA].compute_at(s[C], k0)
iaa, jaa = s[AA].op.axis
s[BB].compute_at(s[C], k0)
ibb, jbb = s[BB].op.axis
aa_fused = s[AA].fuse(iaa, jaa)
bb_fused = s[BB].fuse(ibb, jbb)
aa1, aa2 = s[AA].split(aa_fused, factor=4)
aa0, aa1 = s[AA].split(aa1, factor=64)
bb1, bb2 = s[BB].split(bb_fused, factor=4)
bb0, bb1 = s[BB].split(bb1, factor=64)
s[AA].bind(aa1, tvm.te.thread_axis("threadIdx.x"))
s[AA].vectorize(aa2)
s[BB].bind(bb1, tvm.te.thread_axis("threadIdx.x"))
s[BB].vectorize(bb2)
vcf_check_common(s, [A, B, C])
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorized_cooperative_fetching_xy():
N = 512
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
k = te.reduce_axis((0, N), name="k")
C = te.compute((N, N), lambda i, j: te.sum(A[i, k] * B[k, j], axis=k))
s = te.create_schedule(C.op)
i, j = s[C].op.axis
k = s[C].op.reduce_axis[0]
AA = s.cache_read(A, "shared", [C])
BB = s.cache_read(B, "shared", [C])
i3, i4 = s[C].split(i, factor=4)
i2, i3 = s[C].split(i3, factor=2)
i1, i2 = s[C].split(i2, factor=8)
i0, i1 = s[C].split(i1, factor=1)
j3, j4 = s[C].split(j, factor=4)
j2, j3 = s[C].split(j3, factor=2)
j1, j2 = s[C].split(j2, factor=8)
j0, j1 = s[C].split(j1, factor=2)
k1, k2 = s[C].split(k, factor=8)
k0, k1 = s[C].split(k1, factor=8)
s[C].reorder(i0, j0, i1, j1, i2, j2, k0, k1, i3, j3, k2, i4, j4)
block_it = s[C].fuse(i0, j0)
s[C].bind(block_it, tvm.te.thread_axis("blockIdx.x"))
vthread_it = s[C].fuse(i1, j1)
s[C].bind(vthread_it, tvm.te.thread_axis("vthread"))
s[C].bind(i2, tvm.te.thread_axis("threadIdx.y"))
s[C].bind(j2, tvm.te.thread_axis("threadIdx.x"))
s[C].vectorize(j4)
s[AA].compute_at(s[C], k0)
iaa, jaa = s[AA].op.axis
s[BB].compute_at(s[C], k0)
ibb, jbb = s[BB].op.axis
aa_fused = s[AA].fuse(iaa, jaa)
bb_fused = s[BB].fuse(ibb, jbb)
aa2, aa3 = s[AA].split(aa_fused, factor=4)
aa1, aa2 = s[AA].split(aa2, factor=8)
aa0, aa1 = s[AA].split(aa1, factor=8)
bb2, bb3 = s[BB].split(bb_fused, factor=4)
bb1, bb2 = s[BB].split(bb2, factor=8)
bb0, bb1 = s[BB].split(bb1, factor=8)
s[AA].bind(aa1, tvm.te.thread_axis("threadIdx.y"))
s[AA].bind(aa2, tvm.te.thread_axis("threadIdx.x"))
s[AA].vectorize(aa3)
s[BB].bind(bb1, tvm.te.thread_axis("threadIdx.y"))
s[BB].bind(bb2, tvm.te.thread_axis("threadIdx.x"))
s[BB].vectorize(bb3)
vcf_check_common(s, [A, B, C])
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_unrolled_vectorization():
dtype = "float32"
target = "cuda"
# Compute declaration
N = 128
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
k = te.reduce_axis((0, N), name="k")
C = te.compute((N, N), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
# Schedule
s = te.create_schedule([C.op])
CC = s.cache_write(C, "local")
i, j = s[C].op.axis
bx, tx, ii, ji = s[C].tile(i, j, 1, 2)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
s[C].vectorize(ji)
s[CC].compute_at(s[C], tx)
i, j = s[CC].op.axis
k = s[CC].op.reduce_axis[0]
ko, ki = s[CC].split(k, 2)
s[CC].unroll(ki)
s[CC].vectorize(j)
# Check correctness
dev = tvm.device(target)
a_tvm = tvm.nd.array(np.ones((N, N)).astype(dtype), device=dev)
b_tvm = tvm.nd.array(np.ones((N, N)).astype(dtype), device=dev)
c_tvm = tvm.nd.empty((N, N), device=dev)
func_tvm = tvm.build(s, [A, B, C], target=target)
func_tvm(a_tvm, b_tvm, c_tvm)
c_np = c_tvm.numpy()
tvm.testing.assert_allclose(c_np, N * np.ones((N, N)))
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_try_unaligned_vector_load():
def get_compute(N, C_N, offset):
A = te.placeholder((N,), name="A", dtype="float16")
C = te.compute((C_N,), lambda i: A[i + offset], name="C")
return N, C_N, A, C
def get_compute_unaligned():
return get_compute(3, 2, 1)
def get_compute_aligned():
return get_compute(4, 2, 2)
def build(A, C, N, C_N):
s = te.create_schedule(C.op)
oi, ii = s[C].split(C.op.axis[0], factor=2)
s[C].bind(oi, te.thread_axis("threadIdx.x"))
s[C].vectorize(ii) # BUG: misalignment
tgt = tvm.target.Target(target="cuda", host="llvm")
dev = tvm.device(tgt.kind.name, 0)
f = tvm.build(s, [A, C], tgt, name="foo")
kernel_source = f.imported_modules[0].get_source()
a_data = np.arange(0, N).astype(A.dtype)
a = tvm.nd.array(a_data, dev)
c = tvm.nd.array(np.zeros(C_N, dtype=C.dtype), dev)
f(a, c)
return a_data, c.numpy(), kernel_source
N, C_N, A, C = get_compute_unaligned()
a_data, c, kernel_source = build(A, C, N, C_N)
# (uint1*)(A + (1)) is invalid
assert "A + (1)" not in kernel_source
expected = a_data[1 : C_N + 1]
assert np.allclose(c, expected), f"expected={expected}\nactual={c}"
N, C_N, A, C = get_compute_aligned()
a_data, c, kernel_source = build(A, C, N, C_N)
# (uint1*)(A + (2)) is a valid vector load
assert "A + 2" in kernel_source
expected = a_data[2 : C_N + 2]
assert np.allclose(c, expected), f"expected={expected}\nactual={c}"
if __name__ == "__main__":
tvm.testing.main()
| 36,996 | 34.268827 | 99 | py |
tvm | tvm-main/tests/python/unittest/test_custom_datatypes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for the Bring Your Own Datatype framework.
TODO(@gussmith23 @hypercubestart) link to documentation"""
import numpy as np
import pytest
import tvm
import tvm.topi.testing
import tvm.testing
from tvm import relay
from tvm.relay.testing.layers import batch_norm_infer
from tvm.target.datatype import (
create_lower_func,
create_min_lower_func,
lower_call_pure_extern,
lower_ite,
register,
register_min_func,
register_op,
)
from tvm.tir.op import call_pure_extern
from tvm.script import tir as T
# note: we can't use relay.testing models because params are randomly initialized,
# which lead the output to have the same values
# get mobilenet model from Gluon CV
# because: https://discuss.tvm.apache.org/t/mobilenet-intermediate-values-are-0/7812
def get_mobilenet():
dshape = (1, 3, 224, 224)
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("mobilenet0.25", pretrained=True)
shape_dict = {"data": dshape}
return relay.frontend.from_mxnet(block, shape_dict)
# use real image instead of random data for end-to-end model training
# or else output would all be around the same value
def get_cat_image(dimensions):
from PIL import Image
from tvm.contrib.download import download_testdata
url = "https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png"
dst = "cat.png"
real_dst = download_testdata(url, dst, module="data")
img = Image.open(real_dst).resize(dimensions)
# CoreML's standard model image format is BGR
img_bgr = np.array(img)[:, :, ::-1]
img = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
return np.asarray(img, dtype="float32")
# we use a random seed to generate input_data
# to guarantee stable tests
np.random.seed(0)
def convert_ndarray(dst_dtype, array):
"""Converts NDArray(s) into the specified datatype"""
x = relay.var("x", shape=array.shape, dtype=str(array.dtype))
cast = relay.Function([x], x.astype(dst_dtype))
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
return relay.create_executor("graph").evaluate(cast)(array)
def change_dtype(src, dst, module, params):
"""Convert constants and functions in module from src type to dst type.
Returns changed module and converted params of type dst_type.
"""
module = relay.frontend.ChangeDatatype(src, dst)(module)
module = relay.transform.InferType()(module)
params = {k: convert_ndarray(dst, v) for k, v in params.items()}
return module, params
def compare(module, input, src_dtype, dst_dtype, rtol, atol, params={}, target="llvm"):
module = relay.transform.InferType()(module)
module = relay.transform.SimplifyInference()(module)
correct = relay.create_executor("graph", mod=module).evaluate()(*input, **params)
module, converted_params = change_dtype(src_dtype, dst_dtype, module, params)
# converts all inputs to dst_dtype
x_converted = [convert_ndarray(dst_dtype, arr) for arr in input]
# Vectorization is not implemented with custom datatypes
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
maybe_correct = relay.create_executor("graph", mod=module, target=target).evaluate()(
*x_converted, **converted_params
)
# currently this only works for comparing single output
maybe_correct_converted = convert_ndarray(src_dtype, maybe_correct)
np.testing.assert_allclose(
maybe_correct_converted.numpy(), correct.numpy(), rtol=rtol, atol=atol
)
def setup_myfloat():
"""Set up tests for myfloat (a custom datatype that under the hood is float)
Currently, this registers some custom datatypes using the Bring Your
Own Datatypes framework.
"""
def _setup_myfloat_inner():
# To use datatype operations in an external library, you should first load
# the library containing the datatype implementation:
# CDLL("libposit.so", RTLD_GLOBAL)
# In this case, the datatype library we are using is built right into TVM,
# so we do not need to explicitly load any library.
# You can pick a code for your datatype arbitrarily, as long as it is
# greater than 128 and has not already been chosen.
register("myfloat", 131)
register_op(
create_lower_func({(32, 32): "FloatToCustom32"}), "Cast", "llvm", "float", "myfloat"
)
register_op(
create_lower_func({(32, 32): "Custom32ToFloat"}), "Cast", "llvm", "myfloat", "float"
)
register_op(create_lower_func({32: "Custom32Add"}), "Add", "llvm", "myfloat")
register_op(
create_lower_func(
{
32: "Custom32Sub",
}
),
"Sub",
"llvm",
"myfloat",
)
register_op(create_lower_func({32: "Custom32Mul"}), "Mul", "llvm", "myfloat")
register_op(
create_lower_func(
{
32: "FloatToCustom32",
}
),
"FloatImm",
"llvm",
"myfloat",
)
register_op(
create_lower_func(
{
32: "Custom32Div",
}
),
"Div",
"llvm",
"myfloat",
)
register_op(create_lower_func({32: "Custom32Max"}), "Max", "llvm", "myfloat")
register_op(
create_lower_func({32: "Custom32Sqrt"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.sqrt",
)
register_op(
create_lower_func({32: "Custom32Exp"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.exp",
)
register_op(
create_lower_func({32: "Custom32Log"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.log",
)
register_op(
create_lower_func({32: "Custom32Sigmoid"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.sigmoid",
)
register_op(
create_lower_func({32: "Custom32Tanh"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.tanh",
)
register_op(lower_ite, "Call", "llvm", "myfloat", intrinsic_name="tir.if_then_else")
register_op(
lower_call_pure_extern, "Call", "llvm", "myfloat", intrinsic_name="tir.call_pure_extern"
)
register_min_func(create_min_lower_func({32: "MinCustom32"}, "myfloat"), "myfloat")
try:
_setup_myfloat_inner()
except tvm._ffi.base.TVMError as e:
# Ignore this specific error which can happen if another test
# that uses "myfloat" has already run.
if "float is already registered" not in str(e):
raise e
def setup_posites2():
"""Set up tests for posites2
Currently, this registers some custom datatypes using the Bring Your
Own Datatypes framework.
"""
# To use datatype operations in an external library, you should first load
# the library containing the datatype implementation:
# CDLL("libposit.so", RTLD_GLOBAL)
# In this case, the datatype library we are using is built right into TVM,
# so we do not need to explicitly load any library.
# You can pick a code for your datatype arbitrarily, as long as it is
# greater than 128 and has not already been chosen.
register("posites2", 132)
register_op(
create_lower_func(
{
(32, 32): "FloatToPosit32es2",
(32, 16): "FloatToPosit16es2",
(32, 8): "FloatToPosit8es2",
}
),
"Cast",
"llvm",
"float",
"posites2",
)
register_op(
create_lower_func(
{
(32, 32): "Posit32es2ToFloat",
(16, 32): "Posit16es2ToFloat",
(8, 32): "Posit8es2ToFloat",
}
),
"Cast",
"llvm",
"posites2",
"float",
)
register_op(
create_lower_func({32: "Posit32es2Add", 16: "Posit16es2Add", 8: "Posit8es2Add"}),
"Add",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Sub", 16: "Posit16es2Sub", 8: "Posit8es2Sub"}),
"Sub",
"llvm",
"posites2",
)
register_op(
create_lower_func(
{32: "FloatToPosit32es2", 16: "FloatToPosit16es2", 8: "FloatToPosit8es2"}
),
"FloatImm",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Mul", 16: "Posit16es2Mul", 8: "Posit8es2Mul"}),
"Mul",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Div", 16: "Posit16es2Div", 8: "Posit8es2Div"}),
"Div",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Max", 16: "Posit16es2Max", 8: "Posit8es2Max"}),
"Max",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Sqrt", 16: "Posit16es2Sqrt", 8: "Posit8es2Sqrt"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.sqrt",
)
register_op(lower_ite, "Call", "llvm", "posites2", intrinsic_name="tir.if_then_else")
register_op(
lower_call_pure_extern, "Call", "llvm", "posites2", intrinsic_name="tir.call_pure_extern"
)
register_op(
create_lower_func({32: "Posit32es2Exp", 16: "Posit16es2Exp", 8: "Posit8es2Exp"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.exp",
)
register_op(
create_lower_func({32: "Posit32es2Log", 16: "Posit16es2Log", 8: "Posit8es2Log"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.log",
)
register_op(
create_lower_func(
{32: "Posit32es2Sigmoid", 16: "Posit16es2Sigmoid", 8: "Posit8es2Sigmoid"}
),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.sigmoid",
)
register_op(
create_lower_func({32: "Posit32es2Tanh", 16: "Posit16es2Tanh", 8: "Posit8es2Tanh"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.tanh",
)
register_min_func(
create_min_lower_func(
{32: "MinPosit32es2", 16: "MinPosit16es2", 8: "MinPosit8es2"}, "posites2"
),
"posites2",
)
def run_ops(src_dtype, dst_dtype, rtol=1e-7, atol=1e-7):
"""Run the same op, but with two different datatypes"""
# used for unary ops, first shape in binary ops
shape1 = (5, 10, 5)
# second shape for binary ops
shape2 = (5,)
def check_unary_op(op, src_dtype, dst_dtype, shape):
t1 = relay.TensorType(shape, src_dtype)
x = relay.var("x", t1)
z = op(x)
x_data = np.random.rand(*shape).astype(t1.dtype)
module = tvm.IRModule.from_expr(relay.Function([x], z))
compare(module, (x_data,), src_dtype, dst_dtype, rtol, atol)
# test unary ops
for op in [
relay.nn.softmax,
tvm.relay.log,
tvm.relay.exp,
tvm.relay.sqrt,
tvm.relay.rsqrt,
tvm.relay.sigmoid,
tvm.relay.tanh,
relay.nn.relu,
relay.nn.batch_flatten,
]:
check_unary_op(op, src_dtype, dst_dtype, shape1)
# test unary ops over 4d data
for op in [relay.nn.max_pool2d, relay.nn.avg_pool2d, relay.nn.global_avg_pool2d]:
shape_2d = (3, 32, 32, 32)
check_unary_op(op, src_dtype, dst_dtype, shape_2d)
def check_binary_op(opfunc, src_dtype, dst_dtype):
t1 = relay.TensorType(shape1, src_dtype)
t2 = relay.TensorType(shape2, src_dtype)
x = relay.var("x", t1)
y = relay.var("y", t2)
z = opfunc(x, y)
x_data = np.random.rand(*shape1).astype(t1.dtype)
y_data = np.random.rand(*shape2).astype(t2.dtype)
module = tvm.IRModule.from_expr(relay.Function([x, y], z))
compare(module, (x_data, y_data), src_dtype, dst_dtype, rtol, atol)
for op in [
relay.add,
relay.subtract,
relay.divide,
relay.multiply,
]:
check_binary_op(op, src_dtype, dst_dtype)
# we would like to test tvm_if_then_else
# but Relay.IfNode is not lowered to this intrinsic,
# so to keep our tests consistent with relay, we decide to not unit test
# Note: tvm_if_then_else is tested as part of the mobile_net model
def run_model(get_workload, input, src_dtype, dst_dtype, rtol=1e-4, atol=1e-4):
module, params = get_workload()
# we don't generate random data here
# because then the output data would all be around the same value
compare(module, input, src_dtype, dst_dtype, rtol, atol, params)
def run_conv2d(src_dtype, dst_dtype, rtol=1e-7, atol=1e-4):
def run_test_conv2d(
src_dtype,
dst_dtype,
scale,
dshape,
kshape,
padding=(1, 1),
groups=1,
dilation=(1, 1),
**attrs,
):
x = relay.var("x", shape=dshape, dtype=src_dtype)
w = relay.var("w", shape=kshape, dtype=src_dtype)
y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)
module = tvm.IRModule.from_expr(relay.Function([x, w], y))
data = np.random.uniform(-scale, scale, size=dshape).astype(src_dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(src_dtype)
compare(module, (data, kernel), src_dtype, dst_dtype, rtol, atol)
# depthwise conv2d
dshape = (1, 32, 18, 18)
kshape = (32, 1, 3, 3)
run_test_conv2d(
src_dtype,
dst_dtype,
1,
dshape,
kshape,
padding=(1, 1),
channels=32,
groups=32,
kernel_size=(3, 3),
)
# CUDA is disabled for 'direct' schedule:
# https://github.com/dmlc/tvm/pull/3070#issuecomment-486597553
# group conv2d
dshape = (1, 32, 18, 18)
kshape = (32, 4, 3, 3)
run_test_conv2d(
src_dtype,
dst_dtype,
1,
dshape,
kshape,
padding=(1, 1),
channels=32,
groups=8,
kernel_size=(3, 3),
)
# also group conv2d
dshape = (1, 32, 18, 18)
kshape = (64, 1, 3, 3)
run_test_conv2d(
src_dtype,
dst_dtype,
1,
dshape,
kshape,
padding=(1, 1),
channels=64,
groups=32,
kernel_size=(3, 3),
)
# normal conv2d
dshape = (1, 3, 224, 224)
kshape = (10, 3, 3, 3)
run_test_conv2d(
src_dtype, dst_dtype, 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3, 3)
)
# dilated conv2d
dshape = (1, 3, 18, 18)
kshape = (10, 3, 3, 3)
run_test_conv2d(
src_dtype,
dst_dtype,
1,
dshape,
kshape,
padding=(1, 1),
channels=10,
kernel_size=(3, 3),
dilation=(3, 3),
)
def run_batchnorm(src_dtype, dst_dtype, rtol=1e-6, atol=1e-6):
shape = (3, 32, 32)
t = relay.TensorType(shape, src_dtype)
x = relay.var("x", t)
bn = batch_norm_infer(data=x, epsilon=2e-5, scale=False, name="bn_x")
f = relay.Function(relay.analysis.free_vars(bn), bn)
x_data = np.random.rand(*shape).astype(t.dtype)
module = tvm.IRModule.from_expr(f)
zero_data = np.zeros((32), "float32")
compare(
module,
(x_data, zero_data, zero_data, zero_data, zero_data),
src_dtype,
dst_dtype,
rtol,
atol,
)
def test_myfloat():
setup_myfloat()
run_ops("float32", "custom[myfloat]32", rtol=1e-6, atol=1e-6)
run_conv2d("float32", "custom[myfloat]32", rtol=1e-6, atol=1e-6)
run_batchnorm("float32", "custom[myfloat]32", rtol=1e-6, atol=1e-6)
# mxnet python package not available
# run_model(get_mobilenet, (get_cat_image((224, 224)), ),
# 'float32',
# 'custom[myfloat]32')
class TestMyfloatLowering(tvm.testing.CompareBeforeAfter):
setup_myfloat()
transform = tvm.tir.transform.LowerCustomDatatypes()
def before(self):
dtype = "custom[myfloat]32"
@T.prim_func
def func(A_data: T.handle(dtype)):
T.func_attr({"target": T.target("llvm")})
A = T.Buffer(16, dtype=dtype, data=A_data)
B_data = T.allocate([16], dtype=dtype)
B = T.Buffer(16, dtype=dtype, data=B_data)
for i in range(16):
B[i] = A[i] + 1.0
return func
def expected(self):
dtype = "custom[myfloat]32"
@T.prim_func
def func(A_data: T.handle(dtype)):
T.func_attr({"target": T.target("llvm")})
A_uint32 = T.Buffer(16, "uint32", data=A_data)
B_data = T.allocate([16], dtype="uint32")
B_uint32 = T.Buffer(16, "uint32", data=B_data)
for i in range(16):
B_uint32[i] = T.call_pure_extern(
"uint32",
"FloatToCustom32",
T.call_pure_extern("float32", "Custom32ToFloat", A_uint32[i]) + T.float32(1),
)
return func
class TestMyfloatLoweringDeclBuffer(tvm.testing.CompareBeforeAfter):
"""Like TestMyfloatLoweringDeclBuffer, but using DeclBuffer"""
setup_myfloat()
transform = tvm.tir.transform.LowerCustomDatatypes()
def before(self):
dtype = "custom[myfloat]32"
@T.prim_func
def func(A_data: T.handle(dtype)):
T.func_attr({"target": T.target("llvm")})
A = T.decl_buffer(16, dtype=dtype, data=A_data)
B = T.decl_buffer(16, dtype=dtype)
for i in range(16):
B[i] = A[i] + 1.0
return func
def expected(self):
dtype = "custom[myfloat]32"
@T.prim_func
def func(A_data: T.handle(dtype)):
T.func_attr({"target": T.target("llvm")})
A_uint32 = T.decl_buffer(16, "uint32", data=A_data)
B_uint32 = T.decl_buffer(16, dtype="uint32")
for i in range(16):
B_uint32[i] = T.call_pure_extern(
"uint32",
"FloatToCustom32",
T.call_pure_extern("float32", "Custom32ToFloat", A_uint32[i]) + T.float32(1),
)
return func
def _has_posit():
return tvm.support.libinfo()["USE_BYODT_POSIT"] == "ON"
@pytest.mark.skipif(not _has_posit(), reason="compiled with USE_BYODT_POSIT flag OFF")
def test_posites2():
setup_posites2()
run_ops("float32", "custom[posites2]8", rtol=1, atol=1)
run_ops("float32", "custom[posites2]16", rtol=0.01, atol=1)
run_ops("float32", "custom[posites2]32", rtol=1e-6, atol=1e-6)
run_conv2d("float32", "custom[posites2]8", rtol=1, atol=1)
run_conv2d("float32", "custom[posites2]16", rtol=0.01, atol=1)
run_conv2d("float32", "custom[posites2]32")
run_batchnorm("float32", "custom[posites2]8", rtol=1, atol=1)
run_batchnorm("float32", "custom[posites2]16", rtol=0.01, atol=1)
run_batchnorm("float32", "custom[posites2]32", rtol=1e-4, atol=1e-4)
# Expected posit8 might be faster, but it's not.
# run_model(get_mobilenet, (get_cat_image((224, 224)), ), 'float32', 'custom[posit8]8')
# run_model(get_mobilenet, (get_cat_image((224, 224)), ), 'float32', 'custom[posit32]32')
# run_model(get_inception, (get_cat_image((229, 229)), ), 'float32', 'custom[posit32]32')
# run_model(get_resnet, (get_cat_image((224, 224)), ), 'float32', 'custom[posit32]32')
# can't run cifar-10 sizes because dimensions
# don't match pretrained weights
# runs on the order of minutes...
# run_model(get_inception, (get_cat_image((229, 229)), ),
# 'float32',
# 'custom[posites2]32')
# run_model(get_resnet, (get_cat_image((224, 224)), ),
# 'float32',
# 'custom[posites2]32')
if __name__ == "__main__":
tvm.testing.main()
| 21,123 | 31.006061 | 142 | py |
tvm | tvm-main/tests/python/unittest/test_te_schedule_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_scan():
m = te.var("m")
n = te.var("n")
x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: x[0, i], name="s_init")
x_trans = te.compute((m, n), lambda i, j: x[i, j] + 1, name="x_trans")
s_up1 = te.compute((m, n), lambda t, i: s_state[t - 1, i] + 1, name="up1")
s_update = te.compute((m, n), lambda t, i: s_up1[t, i] + x_trans[t, i], name="update")
s_scan = tvm.te.scan(s_init, s_update, s_state)
def test_getbody():
body = tvm.te.schedule.ScanGetBody(s_scan.op)
assert set(body) == set([s_scan.op, s_update.op, s_up1.op])
def test_attach_path():
s = te.create_schedule(s_scan.op)
s[x_trans].compute_at(s[s_update], s_update.op.axis[0])
apath = tvm.te.schedule.CreateAttachPath(s)
assert tuple(apath[s_update.op]) == tuple([s_scan.op.scan_axis])
assert tuple(apath[x_trans.op]) == tuple([s_update.op.axis[0], s_scan.op.scan_axis])
def test_fix_pt():
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.spatial_axis_[0]].value != 0
def test_scan_fix_point():
m = te.var("m")
n = te.var("n")
l = te.var("l")
x = te.compute((l, m, n), lambda *i: tvm.tir.const(1, "float32"), name="x")
s_state = te.placeholder((l, m, n))
s_init = te.compute((1, m, n), lambda _, i, j: x[0, i, j], name="s_init")
def test_scan0():
s_update = te.compute(
(l, m, n), lambda t, i, j: x[t, j, i] + s_state[t - 1, i, j], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 1
assert fxpt[s_scan.op.spatial_axis_[1]].value == 1
def test_scan1():
s_update = te.compute(
(l, m, n), lambda t, i, j: x[t, j, i] + s_state[t - 1, j, i], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 0
assert fxpt[s_scan.op.spatial_axis_[1]].value == 0
def test_scan3_not_exact_reach():
s_h1 = te.compute((l, n, m), lambda t, j, i: s_state[t - 1, i, j], name="h1")
s_h2 = te.compute((l, m, n), lambda t, i, j: s_state[t - 1, i, 10] * 2, name="h1")
s_update = te.compute(
(l, m, n), lambda t, i, j: s_h1[t, j, i] + s_h2[t, i, j], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
body = tvm.te.schedule.ScanGetBody(s_scan.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 1
assert fxpt[s_scan.op.spatial_axis_[1]].value == 0
def test_scan4_reach_other():
s_h1 = te.compute((l, n, m), lambda t, j, i: s_state[t - 1, j, j], name="h1")
s_h2 = te.compute((l, m, n), lambda t, i, j: s_state[t - 1, i, j] * 2, name="h1")
s_update = te.compute(
(l, m, n), lambda t, i, j: s_h1[t, j, i] + s_h2[t, i, j], name="update"
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(s_scan.op)
assert fxpt[s_scan.op.spatial_axis_[0]].value == 0
assert fxpt[s_scan.op.spatial_axis_[1]].value == 0
def test_scan5_multi_output():
m = te.var("m")
n = te.var("n")
x1 = te.placeholder((m, n))
s1 = te.placeholder((m, n))
x2 = te.placeholder((m, n))
s2 = te.placeholder((m, n))
s1_init = te.compute((1, n), lambda _, i: x1[0, i])
s2_init = te.compute((1, n), lambda _, i: x2[0, i])
s1_update = te.compute((m, n), lambda t, i: s1[t - 1, i] + x1[t, i])
s2_update = te.compute((m, n), lambda t, i: x2[t, i] + s2[t - 1, i])
r0, r1 = tvm.te.scan([s1_init, s2_init], [s1_update, s2_update], [s1, s2])
body = tvm.te.schedule.ScanGetBody(r0.op)
fxpt = tvm.te.schedule.ScanFixPointAnalysis(r0.op)
assert fxpt[r1.op.spatial_axis_[0]].value == 1
test_scan0()
test_scan1()
test_scan3_not_exact_reach()
test_scan4_reach_other()
test_scan5_multi_output()
def test_create_read_graph():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
A1 = te.compute((m, l), lambda i, j: A[i, j])
A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3)
g = tvm.te.schedule.CreateReadGraph([A2.op])
assert g[A2.op][0] == A1
assert g[A1.op][0] == A
post_order = tvm.te.schedule.PostDFSOrder([A2.op], g)
assert post_order[0] == A.op
assert post_order[1] == A1.op
if __name__ == "__main__":
test_scan()
test_create_read_graph()
test_scan_fix_point()
| 5,858 | 39.972028 | 92 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_utilities.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.ir import IRModule
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = 0.0
for k in range(0, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def matmul_relu(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (1024, 1024))
B = T.match_buffer(b, (1024, 1024))
C = T.alloc_buffer((1024, 1024))
D = T.match_buffer(d, (1024, 1024))
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(1024, 1024):
with T.block("relu"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = T.max(C[vi, vj], 0.0)
@T.prim_func
def matmul_relu_ann1(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (1024, 1024))
B = T.match_buffer(b, (1024, 1024))
C = T.alloc_buffer((1024, 1024))
D = T.match_buffer(d, (1024, 1024))
for i in T.serial(0, 1024, annotations={"test1": "aaa", "test4": {"arr": [0, 0], "key": 3}}):
for j in T.serial(0, 1024, annotations={"test2": 612, "test3": ["aa", 1]}):
for k in T.serial(0, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(1024, 1024):
with T.block("relu"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = T.max(C[vi, vj], 0.0)
@T.prim_func
def matmul_relu_ann2(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (1024, 1024))
B = T.match_buffer(b, (1024, 1024))
C = T.alloc_buffer((1024, 1024))
D = T.match_buffer(d, (1024, 1024))
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
T.block_attr({"test1": "aaa", "test4": {"arr": [0, 0], "key": 3}})
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(1024, 1024):
with T.block("relu"):
vi, vj = T.axis.remap("SS", [i, j])
T.block_attr({"test2": 0.22, "test3": ["aa", 1]})
D[vi, vj] = T.max(C[vi, vj], 0.0)
@tvm.script.ir_module
class ModuleWithMultipleFuncs:
@T.prim_func
def vector_add(
A: T.Buffer(128, "float32"),
B: T.Buffer(128, "float32"),
) -> None:
for i in range(128):
with T.block("init"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
@T.prim_func
def vector_add_2(
A: T.Buffer(128, "float32"),
B: T.Buffer(128, "float32"),
) -> None:
for i in range(128):
with T.block("init"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
@T.prim_func
def tuple_reduction(data: T.Buffer((4, 32), "float32"), T_add: T.Buffer((4,), "float32")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
with T.block("root"):
T.reads()
T.writes()
data_red_temp_v0 = T.alloc_buffer([4], dtype="float32")
data_red_temp_v1 = T.alloc_buffer([4], dtype="float32")
for i0, i1 in T.grid(4, 32):
with T.block("data_red_temp"):
ax0, k1 = T.axis.remap("SR", [i0, i1])
T.reads(data[ax0, k1])
T.writes(data_red_temp_v0[ax0], data_red_temp_v1[ax0])
with T.init():
data_red_temp_v0[ax0] = T.float32(0)
data_red_temp_v1[ax0] = T.float32(0)
v_data_red_temp_v0: T.float32 = data_red_temp_v0[ax0] + data[ax0, k1]
v_data_red_temp_v1: T.float32 = (
data_red_temp_v1[ax0] + data[ax0, k1] * data[ax0, k1]
)
data_red_temp_v0[ax0] = v_data_red_temp_v0
data_red_temp_v1[ax0] = v_data_red_temp_v1
for i0 in range(4):
with T.block("T_add"):
ax0 = T.axis.remap("S", [i0])
T.reads(data_red_temp_v0[ax0], data_red_temp_v1[ax0])
T.writes(T_add[ax0])
T_add[ax0] = data_red_temp_v0[ax0] + data_red_temp_v1[ax0]
# pylint: enable=no-member,invalid-name,unused-variable
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_tir_schedule_creation():
# Tests:
# - Schedule.__init__ for PrimFunc and IRModule
# - Schedule.mod
# - Schedule.state
sch_1 = tir.Schedule(matmul, debug_mask="all")
sch_2 = tir.Schedule(IRModule({"main": matmul}), debug_mask="all")
assert sch_1.mod["main"].same_as(sch_2.mod["main"])
assert sch_1.state.mod["main"].same_as(sch_2.state.mod["main"])
def test_tir_schedule_get_block():
# Tests:
# - Schedule.get_block
# - Schedule.get_sref
# - Schedule.get
sch = tir.Schedule(matmul, debug_mask="all")
block_rv = sch.get_block(name="update")
block_sref = sch.get_sref(block_rv)
block = sch.get(block_rv)
assert block.name_hint == "update"
assert block_sref.stmt.same_as(block)
assert sch.state.get_sref(block).same_as(block_sref)
assert block.same_as(matmul.body.block.body.body.body[1].body.block)
def test_tir_schedule_work_on():
sch = tir.Schedule(ModuleWithMultipleFuncs, debug_mask="all")
with pytest.raises(ValueError, match="does not know which function to be working on"):
sch.get_block(name="init")
sch.work_on(func_name="vector_add")
sch.get_block(name="init")
assert sch.func_working_on == sch.mod.get_global_var("vector_add")
def test_tir_schedule_get_loops(use_block_name):
# Tests:
# - Schedule.get_loops
# - Schedule.get
sch = tir.Schedule(matmul, debug_mask="all")
block = "update" if use_block_name else sch.get_block(name="update")
i, j, k = sch.get_loops(block)
assert sch.get(i).loop_var.name == "i"
assert sch.get(j).loop_var.name == "j"
assert sch.get(k).loop_var.name == "k"
def test_tir_schedule_copy_1(use_block_name):
# Tests:
# - Schedule.copy
sch_1 = tir.Schedule(matmul, debug_mask="all")
block_rv = sch_1.get_block(name="update")
i, j, k = sch_1.get_loops(block="update" if use_block_name else block_rv)
assert sch_1.get(i).loop_var.name == "i"
assert sch_1.get(j).loop_var.name == "j"
assert sch_1.get(k).loop_var.name == "k"
sch_2 = sch_1.copy()
assert sch_2.get(block_rv).name_hint == "update"
assert sch_2.get(i).loop_var.name == "i"
assert sch_2.get(j).loop_var.name == "j"
assert sch_2.get(k).loop_var.name == "k"
def test_tir_schedule_copy_2():
sch = tir.Schedule(mod=matmul, debug_mask="all")
i, j, k = sch.get_loops(sch.get_block("update"))
sch_copy = sch.copy()
assert not sch.get_sref(i).same_as(sch_copy.get_sref(i))
assert not sch.get_sref(j).same_as(sch_copy.get_sref(j))
assert not sch.get_sref(k).same_as(sch_copy.get_sref(k))
assert sch.get_sref(i).stmt.same_as(sch_copy.get_sref(i).stmt)
assert sch.get_sref(j).stmt.same_as(sch_copy.get_sref(j).stmt)
assert sch.get_sref(k).stmt.same_as(sch_copy.get_sref(k).stmt)
i_0, i_1 = sch.split(i, factors=[None, 64])
j_0, j_1 = sch_copy.split(j, factors=[None, 32])
assert sch.get_sref(i_0).stmt.extent == 2
assert sch.get_sref(i_1).stmt.extent == 64
with pytest.raises(IndexError):
sch_copy.get_sref(i_0)
with pytest.raises(IndexError):
sch_copy.get_sref(i_1)
with pytest.raises(IndexError):
sch.get_sref(j_0)
with pytest.raises(IndexError):
sch.get_sref(j_1)
assert sch_copy.get_sref(j_0).stmt.extent == 4
assert sch_copy.get_sref(j_1).stmt.extent == 32
verify_trace_roundtrip(sch, mod=matmul)
verify_trace_roundtrip(sch_copy, mod=matmul)
def test_tir_schedule_remove_rv():
# Tests:
# - Schedule.remove_rv
sch = tir.Schedule(matmul, debug_mask="all")
block_rv = sch.get_block(name="update")
assert sch.get(block_rv).name_hint == "update"
sch.remove_rv(block_rv)
with pytest.raises(IndexError):
sch.get(block_rv)
def test_get_child_blocks():
s = tir.Schedule(matmul, debug_mask="all")
init = s.get_block("init")
update = s.get_block("update")
# loop
blocks = s.get_child_blocks(s.get_loops(init)[0])
assert len(blocks) == 2
assert s.get(init) == s.get(blocks[0])
assert s.get(update) == s.get(blocks[1])
# block
root = s.get_block("root")
blocks = s.get_child_blocks(root)
assert len(blocks) == 2
assert s.get(init) == s.get(blocks[0])
assert s.get(update) == s.get(blocks[1])
def test_get_producers(use_block_name):
sch = tir.Schedule(mod=matmul_relu, debug_mask="all")
block = "relu" if use_block_name else sch.get_block("relu")
(producer,) = sch.get_producers(block)
assert tvm.ir.structural_equal(
sch.get_sref(producer).stmt,
sch.get_sref(sch.get_block("matmul")).stmt,
)
verify_trace_roundtrip(sch, mod=matmul_relu)
def test_get_producers_multiple_buffer_depdencies(use_block_name):
sch = tir.Schedule(mod=tuple_reduction, debug_mask="all")
block = "T_add" if use_block_name else sch.get_block("T_add")
(producer,) = sch.get_producers(block)
assert tvm.ir.structural_equal(
sch.get_sref(producer).stmt,
sch.get_sref(sch.get_block("data_red_temp")).stmt,
)
def test_get_consumers(use_block_name):
sch = tir.Schedule(mod=matmul_relu, debug_mask="all")
block = "matmul" if use_block_name else sch.get_block("matmul")
(consumer,) = sch.get_consumers(block)
assert tvm.ir.structural_equal(
sch.get_sref(consumer).stmt,
sch.get_sref(sch.get_block("relu")).stmt,
)
verify_trace_roundtrip(sch, mod=matmul_relu)
def test_get_consumers_multiple_buffer_depdencies(use_block_name):
sch = tir.Schedule(mod=tuple_reduction, debug_mask="all")
block = "data_red_temp" if use_block_name else sch.get_block("data_red_temp")
(consumer,) = sch.get_consumers(block)
assert tvm.ir.structural_equal(
sch.get_sref(consumer).stmt,
sch.get_sref(sch.get_block("T_add")).stmt,
)
def test_annotate_unannotate_loop():
sch = tir.Schedule(mod=matmul_relu, debug_mask="all")
matmul = sch.get_block("matmul")
relu = sch.get_block("relu")
sch.annotate(sch.get_loops(matmul)[0], "test1", "aaa")
sch.annotate(sch.get_loops(matmul)[1], "test2", 612)
sch.annotate(sch.get_loops(matmul)[1], "test3", ["aa", 1])
sch.annotate(sch.get_loops(matmul)[0], "test4", {"arr": [0, 0], "key": 3})
tvm.ir.assert_structural_equal(sch.mod["main"], matmul_relu_ann1)
verify_trace_roundtrip(sch=sch, mod=matmul_relu)
sch.unannotate(sch.get_loops(matmul)[0], "test1")
sch.unannotate(sch.get_loops(matmul)[1], "test2")
sch.unannotate(sch.get_loops(matmul)[1], "test3")
sch.unannotate(sch.get_loops(matmul)[0], "test4")
verify_trace_roundtrip(sch=sch, mod=matmul_relu)
def test_annotate_unannotate_block():
sch = tir.Schedule(mod=matmul_relu, debug_mask="all")
matmul = sch.get_block("matmul")
relu = sch.get_block("relu")
sch.annotate(matmul, "test1", "aaa")
sch.annotate(relu, "test2", 0.22)
sch.annotate(relu, "test3", ["aa", 1])
sch.annotate(matmul, "test4", {"arr": [0, 0], "key": 3})
tvm.ir.assert_structural_equal(sch.mod["main"], matmul_relu_ann2)
verify_trace_roundtrip(sch=sch, mod=matmul_relu)
sch.unannotate(matmul, "test1")
sch.unannotate(relu, "test2")
sch.unannotate(relu, "test3")
sch.unannotate(matmul, "test4")
verify_trace_roundtrip(sch=sch, mod=matmul_relu)
def test_get_output_blocks_single_output():
sch = tir.Schedule(mod=matmul_relu, debug_mask="all")
output_blocks = sch.get_output_blocks("root")
assert len(output_blocks) == 1, "Unexpected number of blocks when 1 was expected"
block = sch.get(output_blocks[0])
assert block.name_hint == "relu"
relu_block = sch.get_block("relu")
assert sch.get(relu_block).same_as(block)
def test_get_output_blocks_multiple_outputs():
sch = tir.Schedule(mod=matmul, debug_mask="all")
output_blocks = sch.get_output_blocks("root")
assert len(output_blocks) == 2, "Unexpected number of blocks when 2 were expected"
block_1 = sch.get(output_blocks[0])
assert block_1.name_hint == "init"
block_2 = sch.get(output_blocks[1])
assert block_2.name_hint == "update"
init_block = sch.get_block("init")
assert sch.get(init_block).same_as(block_1)
update_block = sch.get_block("update")
assert sch.get(update_block).same_as(block_2)
def test_get_output_blocks_nested():
@T.prim_func
def blockized(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32"),
) -> None:
with T.block("blockized_B"):
vio = T.axis.spatial(1, 0)
vjo = T.axis.spatial(1, 0)
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
sch = tir.Schedule(mod=blockized, debug_mask="all")
output_blocks = sch.get_output_blocks("root")
assert len(output_blocks) == 2, "Unexpected number of blocks when 2 were expected"
block_1 = sch.get(output_blocks[0])
assert block_1.name_hint == "blockized_B"
block_2 = sch.get(output_blocks[1])
assert block_2.name_hint == "B"
blockized_block = sch.get_block("blockized_B")
assert sch.get(blockized_block).same_as(block_1)
b_block = sch.get_block("B")
assert sch.get(b_block).same_as(block_2)
sch = tir.Schedule(mod=blockized, debug_mask="all")
output_blocks = sch.get_output_blocks("blockized_B")
assert len(output_blocks) == 1, "Unexpected number of blocks when 1 were expected"
block = sch.get(output_blocks[0])
assert block.name_hint == "B"
b_block = sch.get_block("B")
assert sch.get(b_block).same_as(block)
if __name__ == "__main__":
tvm.testing.main()
| 15,809 | 36.375887 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_type_annotation_checker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test type checker based on python's type annotations"""
import sys
from typing import Dict, List, Tuple, Union, Callable
import pytest
import _pytest
import tvm
from tvm.tir.schedule._type_checker import type_checked
def int_func(x: int) -> int:
return 2 * x
def str_func(x: str) -> str:
return 2 * x
test_cases = [
{
"type_annotation": int,
"positive_cases": [5],
"negative_cases": ["5"],
},
{
"type_annotation": List[int],
"positive_cases": [
[5],
[],
# Tuples are allowed to be used as lists, because both are
# represented in FFI as tvm::runtime::Array.
(1, 2, 3),
],
"negative_cases": [
None,
5,
["5"],
],
},
{
"type_annotation": Dict[str, int],
"positive_cases": [
{"key1": 0, "key2": 1, "key3": -1},
],
"negative_cases": [None, [1], {1: "1"}],
},
{
"type_annotation": Tuple[int],
"positive_cases": [
(5,),
],
"negative_cases": [
None,
(1, 2, 3),
[1],
5,
["5"],
],
},
{
"type_annotation": Tuple[str, int],
"positive_cases": [
("x", 5),
],
"negative_cases": [
42,
("x", 5, 6),
("x", 5, "y"),
("x", 5.0),
(None, 5),
],
},
{
"type_annotation": Union[str, int],
"positive_cases": [
"x",
5,
],
"negative_cases": [
5.0,
("x", 5, 6),
None,
],
},
{
"type_annotation": Callable,
"positive_cases": [str_func, int_func],
"negative_cases": [
None,
"x",
42,
],
},
{
"type_annotation": Callable[[int], int],
"positive_cases": [int_func],
"negative_cases": [
None,
"x",
42,
pytest.param(
str_func,
marks=pytest.mark.xfail(
reason="Signature of Callable arguments not currently checked"
),
),
],
},
]
def make_parametrization(type_annotation, case):
if isinstance(case, _pytest.mark.structures.ParameterSet):
marks = case.marks
(case,) = case.values
else:
marks = []
try:
annotation_name = type_annotation.__name__
except AttributeError:
annotation_name = str(type_annotation).replace("typing.", "")
if hasattr(case, "__name__"):
case_name = case.__name__
else:
case_name = str(case)
name = f"{annotation_name}, {case_name}"
return pytest.param(type_annotation, case, marks=marks, id=name)
positive_cases = [
make_parametrization(config["type_annotation"], case)
for config in test_cases
for case in config["positive_cases"]
]
negative_cases = [
make_parametrization(config["type_annotation"], case)
for config in test_cases
for case in config["negative_cases"]
]
@pytest.mark.parametrize(
["type_annotation", "case"],
positive_cases,
)
def test_matches_type(type_annotation, case):
@type_checked
def func(_: type_annotation):
pass
func(case)
@pytest.mark.parametrize(
["type_annotation", "case"],
negative_cases,
)
def test_not_matches(type_annotation, case):
@type_checked
def func(_: type_annotation):
pass
with pytest.raises(TypeError):
func(case)
if __name__ == "__main__":
tvm.testing.main()
| 4,510 | 22.494792 | 82 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_schedule_rule_mlt_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
from tvm import meta_schedule as ms
from tvm import te
from tvm.ir import assert_structural_equal
from tvm.meta_schedule.testing.space_generation import (
check_sketches,
generate_design_space,
print_sketches,
)
from tvm.script import tir as T
from tvm.target import Target
from tvm.tir.tensor_intrin.arm_cpu import DP4A_INTRIN
from tvm.tir.tensor_intrin.x86 import AVX512_DOT_16x4_INTRIN as AVX512_INTRIN
from tvm.tir.tensor_intrin.x86 import VNNI_DOT_16x4_INTRIN as VNNI_INTRIN
def test_x86_conv2d_nchwc(intrin=VNNI_INTRIN, target="llvm -mcpu=cascadelake -num-cores=4"):
@T.prim_func
def conv2d_nchwc(
placeholder: T.Buffer((1, 4, 56, 56, 16), "uint8"),
placeholder_1: T.Buffer((16, 4, 1, 1, 4, 16, 4), "int8"),
conv2d_NCHWc_int8: T.Buffer((1, 16, 56, 56, 16), "int32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2, i3, i4, i5, i6, i7, i8, i9 in T.grid(1, 16, 56, 56, 16, 1, 1, 4, 4, 4):
with T.block("conv2d_NCHWc_int8"):
(
n,
oc_chunk,
oh,
ow,
oc_block,
kh,
kw,
ic_outer,
ic_f_inner,
ic_s_inner,
) = T.axis.remap("SSSSSRRRRR", [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9])
T.reads(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
with T.init():
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[
n, oc_chunk, oh, ow, oc_block
] + T.cast(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], "int32"
) * T.cast(
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
"int32",
)
# fmt: off
@T.prim_func
def x86_conv2d_nchwc_0(placeholder: T.Buffer((1, 4, 56, 56, 16), "uint8"), placeholder_1: T.Buffer((16, 4, 1, 1, 4, 16, 4), "int8"), conv2d_NCHWc_int8: T.Buffer((1, 16, 56, 56, 16), "int32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
# with T.block("root"):
conv2d_NCHWc_int8_global = T.alloc_buffer((1, 16, 56, 56, 16), "int32")
for i0_0, i1_0, i2_0, i3_0, i4_0_0, i0_1, i1_1, i2_1, i3_1, i4_0_1 in T.grid(1, 8, 28, 56, 1, 1, 2, 1, 1, 1):
for i5_0, i6_0, i7_0, i8_0, i9_0_0, i0_2, i1_2, i2_2, i3_2, i4_0_2, i5_1, i6_1, i7_1, i8_1, i9_0_1, i0_3, i1_3, i2_3, i3_3, i4_0_3 in T.grid(1, 1, 1, 4, 1, 1, 1, 2, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1):
with T.block("conv2d_NCHWc_int8_o"):
n = T.axis.spatial(1, i0_0 + i0_1 + i0_2 + i0_3)
oc_chunk = T.axis.spatial(16, i1_0 * 2 + i1_1 + i1_2 + i1_3)
oh = T.axis.spatial(56, i2_0 * 2 + i2_1 * 2 + i2_2 + i2_3)
ow = T.axis.spatial(56, i3_0 + i3_1 + i3_2 + i3_3)
oc_block_o = T.axis.spatial(1, i4_0_0 + i4_0_1 + i4_0_2 + i4_0_3)
kh = T.axis.reduce(1, i5_0 + i5_1)
kw = T.axis.reduce(1, i6_0 + i6_1)
ic_outer = T.axis.reduce(4, i7_0 * 4 + i7_1)
ic_f_inner = T.axis.reduce(4, i8_0 + i8_1)
ic_s_inner_o = T.axis.reduce(1, i9_0_0 + i9_0_1)
T.reads(placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4:ic_f_inner * 4 + 4], placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:16, 0:4])
T.writes(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, 0:16])
T.block_attr({"meta_schedule.auto_tensorize": intrin})
with T.init():
for i4_1 in range(16):
with T.block("conv2d_NCHWc_int8_init"):
oc_block_i_init = T.axis.spatial(16, i4_1)
T.reads()
T.writes(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i_init])
conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i_init] = 0
for i4_1, i9_1 in T.grid(16, 4):
with T.block("conv2d_NCHWc_int8"):
oc_block_i, ic_s_inner_i = T.axis.remap("SR", [i4_1, i9_1])
T.reads(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i], placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner_i], placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block_i, ic_s_inner_i])
T.writes(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i] = conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i] + T.Cast("int32", placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner_i]) * T.Cast("int32", placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block_i, ic_s_inner_i])
for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 1, 2, 1, 16):
with T.block("conv2d_NCHWc_int8_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(16, i1_0 * 2 + i1_1 + ax1)
v2 = T.axis.spatial(56, i2_0 * 2 + ax2)
v3 = T.axis.spatial(56, i3_0 + ax3)
v4 = T.axis.spatial(16, ax4)
T.reads(conv2d_NCHWc_int8_global[v0, v1, v2, v3, v4])
T.writes(conv2d_NCHWc_int8[v0, v1, v2, v3, v4])
conv2d_NCHWc_int8[v0, v1, v2, v3, v4] = conv2d_NCHWc_int8_global[v0, v1, v2, v3, v4]
@T.prim_func
def x86_conv2d_nchwc_1(placeholder: T.Buffer((1, 4, 56, 56, 16), "uint8"), placeholder_1: T.Buffer((16, 4, 1, 1, 4, 16, 4), "int8"), conv2d_NCHWc_int8: T.Buffer((1, 16, 56, 56, 16), "int32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
# with T.block("root"):
conv2d_NCHWc_int8_global = T.alloc_buffer((1, 16, 56, 56, 16), "int32")
for i0_0, i1_0, i2_0, i3_0, i4_0_0 in T.grid(1, 8, 28, 56, 1):
for i0_1, i1_1, i2_1, i3_1, i4_0_1, i5_0, i6_0, i7_0, i8_0, i9_0_0, i0_2, i1_2, i2_2, i3_2, i4_0_2, i5_1, i6_1, i7_1, i8_1, i9_0_1, i0_3, i1_3, i2_3, i3_3, i4_0_3 in T.grid(1, 2, 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 2, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1):
with T.block("conv2d_NCHWc_int8_o"):
n = T.axis.spatial(1, i0_0 + i0_1 + i0_2 + i0_3)
oc_chunk = T.axis.spatial(16, i1_0 * 2 + i1_1 + i1_2 + i1_3)
oh = T.axis.spatial(56, i2_0 * 2 + i2_1 * 2 + i2_2 + i2_3)
ow = T.axis.spatial(56, i3_0 + i3_1 + i3_2 + i3_3)
oc_block_o = T.axis.spatial(1, i4_0_0 + i4_0_1 + i4_0_2 + i4_0_3)
kh = T.axis.reduce(1, i5_0 + i5_1)
kw = T.axis.reduce(1, i6_0 + i6_1)
ic_outer = T.axis.reduce(4, i7_0 * 4 + i7_1)
ic_f_inner = T.axis.reduce(4, i8_0 + i8_1)
ic_s_inner_o = T.axis.reduce(1, i9_0_0 + i9_0_1)
T.reads(placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4:ic_f_inner * 4 + 4], placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:16, 0:4])
T.writes(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, 0:16])
T.block_attr({"meta_schedule.auto_tensorize": intrin})
with T.init():
for i4_1 in range(16):
with T.block("conv2d_NCHWc_int8_init"):
oc_block_i_init = T.axis.spatial(16, i4_1)
T.reads()
T.writes(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i_init])
conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i_init] = 0
for i4_1, i9_1 in T.grid(16, 4):
with T.block("conv2d_NCHWc_int8"):
oc_block_i, ic_s_inner_i = T.axis.remap("SR", [i4_1, i9_1])
T.reads(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i], placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner_i], placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block_i, ic_s_inner_i])
T.writes(conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i] = conv2d_NCHWc_int8_global[n, oc_chunk, oh, ow, oc_block_i] + T.Cast("int32", placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner_i]) * T.Cast("int32", placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block_i, ic_s_inner_i])
for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 2, 2, 1, 16):
with T.block("conv2d_NCHWc_int8_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(16, i1_0 * 2 + ax1)
v2 = T.axis.spatial(56, i2_0 * 2 + ax2)
v3 = T.axis.spatial(56, i3_0 + ax3)
v4 = T.axis.spatial(16, ax4)
T.reads(conv2d_NCHWc_int8_global[v0, v1, v2, v3, v4])
T.writes(conv2d_NCHWc_int8[v0, v1, v2, v3, v4])
conv2d_NCHWc_int8[v0, v1, v2, v3, v4] = conv2d_NCHWc_int8_global[v0, v1, v2, v3, v4]
@T.prim_func
def x86_conv2d_nchwc_2(placeholder: T.Buffer((1, 4, 56, 56, 16), "uint8"), placeholder_1: T.Buffer((16, 4, 1, 1, 4, 16, 4), "int8"), conv2d_NCHWc_int8: T.Buffer((1, 16, 56, 56, 16), "int32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
# with T.block("root"):
for i0_0, i1_0, i2_0, i3_0, i4_0_0, i0_1, i1_1, i2_1, i3_1, i4_0_1, i5_0, i6_0, i7_0, i8_0, i9_0_0, i0_2, i1_2, i2_2, i3_2, i4_0_2, i5_1, i6_1, i7_1, i8_1, i9_0_1, i0_3, i1_3, i2_3, i3_3, i4_0_3 in T.grid(1, 8, 28, 56, 1, 1, 2, 1, 1, 1, 1, 1, 1, 4, 1, 1, 1, 2, 1, 1, 1, 1, 4, 1, 1, 1, 1, 1, 1, 1):
with T.block("conv2d_NCHWc_int8_o"):
n = T.axis.spatial(1, i0_0 + i0_1 + i0_2 + i0_3)
oc_chunk = T.axis.spatial(16, i1_0 * 2 + i1_1 + i1_2 + i1_3)
oh = T.axis.spatial(56, i2_0 * 2 + i2_1 * 2 + i2_2 + i2_3)
ow = T.axis.spatial(56, i3_0 + i3_1 + i3_2 + i3_3)
oc_block_o = T.axis.spatial(1, i4_0_0 + i4_0_1 + i4_0_2 + i4_0_3)
kh = T.axis.reduce(1, i5_0 + i5_1)
kw = T.axis.reduce(1, i6_0 + i6_1)
ic_outer = T.axis.reduce(4, i7_0 * 4 + i7_1)
ic_f_inner = T.axis.reduce(4, i8_0 + i8_1)
ic_s_inner_o = T.axis.reduce(1, i9_0_0 + i9_0_1)
T.reads(placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4:ic_f_inner * 4 + 4], placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0:16, 0:4])
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0:16])
T.block_attr({"meta_schedule.auto_tensorize": intrin})
with T.init():
for i4_1 in range(16):
with T.block("conv2d_NCHWc_int8_init"):
oc_block_i_init = T.axis.spatial(16, i4_1)
T.reads()
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i_init])
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i_init] = 0
for i4_1, i9_1 in T.grid(16, 4):
with T.block("conv2d_NCHWc_int8"):
oc_block_i, ic_s_inner_i = T.axis.remap("SR", [i4_1, i9_1])
T.reads(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i], placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner_i], placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block_i, ic_s_inner_i])
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i] = conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i] + T.Cast("int32", placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner_i]) * T.Cast("int32", placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block_i, ic_s_inner_i])
# fmt: on
decision_0 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [8, 2, 1, 1]),
("SamplePerfectTile", [28, 1, 2, 1]),
("SamplePerfectTile", [56, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 1]),
("SamplePerfectTile", [1, 1]),
("SamplePerfectTile", [1, 4]),
("SamplePerfectTile", [4, 1]),
("SamplePerfectTile", [1, 1]),
]
decision_1 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [8, 2, 1, 1]),
("SamplePerfectTile", [28, 1, 2, 1]),
("SamplePerfectTile", [56, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 1]),
("SamplePerfectTile", [1, 1]),
("SamplePerfectTile", [1, 4]),
("SamplePerfectTile", [4, 1]),
("SamplePerfectTile", [1, 1]),
]
decision_2 = [
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [8, 2, 1, 1]),
("SamplePerfectTile", [28, 1, 2, 1]),
("SamplePerfectTile", [56, 1, 1, 1]),
("SamplePerfectTile", [1, 1, 1, 1]),
("SamplePerfectTile", [1, 1]),
("SamplePerfectTile", [1, 1]),
("SamplePerfectTile", [1, 4]),
("SamplePerfectTile", [4, 1]),
("SamplePerfectTile", [1, 1]),
]
mod = conv2d_nchwc
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target(target),
types=None,
sch_rules=[
ms.schedule_rule.MultiLevelTilingWithIntrin(
intrin,
structure="SSRSRS",
tile_binds=None,
max_innermost_factor=64,
vector_load_lens=None,
reuse_read=None,
reuse_write=ms.schedule_rule.ReuseType(req="may", levels=[1, 2], scope="global"),
),
],
)
check_sketches(
mod,
sketches=actual,
expected_mods=[x86_conv2d_nchwc_0, x86_conv2d_nchwc_1, x86_conv2d_nchwc_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def _check_dp4a_dense(m, n, k, in_dtype, out_dtype, expected_mods, expected_decisions):
def _dense(m, n, k, in_dtype, out_dtype):
X = te.placeholder((m, k), name="X", dtype=in_dtype)
W = te.placeholder((n, k), name="W", dtype=in_dtype)
ak = te.reduce_axis((0, k), name="k")
matmul = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype(out_dtype) * W[j, ak].astype(out_dtype),
axis=ak,
),
name="compute",
)
return te.create_prim_func([X, W, matmul])
mod = _dense(m, n, k, in_dtype, out_dtype)
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("cuda --arch=sm_70"),
types=None,
sch_rules=[
ms.schedule_rule.MultiLevelTilingWithIntrin(
DP4A_INTRIN,
structure="SSSRRSRS",
tile_binds=["blockIdx.x", "vthread.x", "threadIdx.x"],
max_innermost_factor=64,
vector_load_lens=[1, 2, 3, 4],
reuse_read=ms.schedule_rule.ReuseType(req="must", levels=[4], scope="shared"),
reuse_write=ms.schedule_rule.ReuseType(req="must", levels=[3], scope="local"),
)
],
)
if expected_mods is None:
assert expected_decisions is None
assert len(actual) == 1
assert_structural_equal(mod, actual[0].mod["main"])
else:
check_sketches(mod, actual, expected_mods, expected_decisions)
def test_dp4a_dense():
@T.prim_func
def dp4a_dense_0(
X: T.Buffer((128, 128), "int8"),
W: T.Buffer((128, 128), "int8"),
compute: T.Buffer((128, 128), "int32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
# with T.block("root"):
compute_local = T.alloc_buffer((128, 128), "int32", scope="local")
X_shared = T.alloc_buffer((128, 128), "int8", scope="shared")
W_shared = T.alloc_buffer((128, 128), "int8", scope="shared")
for i_0_j_0_fused in T.thread_binding(1, thread="blockIdx.x"):
for i_1_j_1_fused in T.thread_binding(512, thread="vthread.x"):
for i_2_j_2_fused in T.thread_binding(2, thread="threadIdx.x"):
for k_0_0 in range(1):
for ax0_ax1_fused in range(16384):
with T.block("X_shared"):
v0 = T.axis.spatial(128, ax0_ax1_fused // 128)
v1 = T.axis.spatial(128, ax0_ax1_fused % 128)
T.reads(X[v0, v1])
T.writes(X_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
X_shared[v0, v1] = X[v0, v1]
for ax0_ax1_fused in range(16384):
with T.block("W_shared"):
v0 = T.axis.spatial(128, ax0_ax1_fused // 128)
v1 = T.axis.spatial(128, ax0_ax1_fused % 128)
T.reads(W[v0, v1])
T.writes(W_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
W_shared[v0, v1] = W[v0, v1]
for k_0_1, i_3, j_3, k_0_2, i_4, j_4 in T.grid(1, 2, 4, 32, 2, 1):
with T.block("compute_o"):
v_i = T.axis.spatial(
128, i_1_j_1_fused // 32 * 8 + i_2_j_2_fused * 4 + i_3 * 2 + i_4
)
v_j = T.axis.spatial(128, i_1_j_1_fused % 32 * 4 + j_3 + j_4)
v_k_o = T.axis.reduce(32, k_0_0 * 32 + k_0_1 * 32 + k_0_2)
T.reads(
X_shared[v_i, v_k_o * 4 : v_k_o * 4 + 4],
W_shared[v_j, v_k_o * 4 : v_k_o * 4 + 4],
)
T.writes(compute_local[v_i, v_j])
T.block_attr({"meta_schedule.auto_tensorize": "dp4a"})
with T.init():
with T.block("compute_init"):
T.reads()
T.writes(compute_local[v_i, v_j])
compute_local[v_i, v_j] = 0
for k_1 in range(4):
with T.block("compute"):
v_k_i = T.axis.reduce(4, k_1)
T.reads(
compute_local[v_i, v_j],
X_shared[v_i, v_k_o * 4 + v_k_i],
W_shared[v_j, v_k_o * 4 + v_k_i],
)
T.writes(compute_local[v_i, v_j])
T.block_attr({"meta_schedule.tiling_structure": "SSSRRSRS"})
compute_local[v_i, v_j] = compute_local[v_i, v_j] + T.Cast(
"int32", X_shared[v_i, v_k_o * 4 + v_k_i]
) * T.Cast("int32", W_shared[v_j, v_k_o * 4 + v_k_i])
for ax0, ax1 in T.grid(4, 4):
with T.block("compute_local"):
v0 = T.axis.spatial(
128, i_1_j_1_fused // 32 * 8 + i_2_j_2_fused * 4 + ax0
)
v1 = T.axis.spatial(128, i_1_j_1_fused % 32 * 4 + ax1)
T.reads(compute_local[v0, v1])
T.writes(compute[v0, v1])
compute[v0, v1] = compute_local[v0, v1]
decision_0 = [
("SamplePerfectTile", [1, 16, 2, 2, 2]),
("SamplePerfectTile", [1, 32, 1, 4, 1]),
("SamplePerfectTile", [1, 1, 32]),
("SampleCategorical", 0),
("SampleCategorical", 0),
]
_check_dp4a_dense(
m=128,
n=128,
k=128,
in_dtype="int8",
out_dtype="int32",
expected_mods=[dp4a_dense_0],
expected_decisions=[decision_0],
)
def test_dp4a_dense_no_tensorize_1():
_check_dp4a_dense(
m=128,
n=128,
k=128,
in_dtype="float32",
out_dtype="float32",
expected_mods=None,
expected_decisions=None,
)
def test_dp4a_dense_no_tensorize_2():
_check_dp4a_dense(
m=127,
n=127,
k=127,
in_dtype="int8",
out_dtype="int32",
expected_mods=None,
expected_decisions=None,
)
if __name__ == "__main__":
test_x86_conv2d_nchwc()
test_x86_conv2d_nchwc(AVX512_INTRIN, "llvm -mcpu=skylake-avx512 -num-cores=4")
test_dp4a_dense()
test_dp4a_dense_no_tensorize_1()
test_dp4a_dense_no_tensorize_2()
| 23,345 | 54.191489 | 337 | py |
tvm | tvm-main/tests/python/unittest/test_object_path.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm.runtime import object_path
from tvm.runtime.object_path import ObjectPath
def test_root_path():
root = ObjectPath.root()
assert isinstance(root, object_path.RootPath)
assert str(root) == "<root>"
assert len(root) == 1
assert root == ObjectPath.root()
assert root.parent is None
def test_named_root_path():
root = ObjectPath.root("base_name")
assert isinstance(root, object_path.RootPath)
assert str(root) == "base_name"
assert len(root) == 1
assert root != ObjectPath.root()
assert root == ObjectPath.root("base_name")
assert root.parent is None
def test_path_attr():
path = ObjectPath.root().attr("foo")
assert isinstance(path, object_path.AttributeAccessPath)
assert str(path) == "<root>.foo"
assert len(path) == 2
assert path.parent == ObjectPath.root()
def test_path_attr_unknown():
path = ObjectPath.root().attr(None)
assert isinstance(path, object_path.UnknownAttributeAccessPath)
assert str(path) == "<root>.<unknown attribute>"
assert len(path) == 2
assert path.parent == ObjectPath.root()
def test_path_array_index():
path = ObjectPath.root().array_index(2)
assert isinstance(path, object_path.ArrayIndexPath)
assert str(path) == "<root>[2]"
assert len(path) == 2
assert path.parent == ObjectPath.root()
def test_path_missing_array_element():
path = ObjectPath.root().missing_array_element(2)
assert isinstance(path, object_path.MissingArrayElementPath)
assert str(path) == "<root>[<missing element #2>]"
assert len(path) == 2
assert path.parent == ObjectPath.root()
def test_path_map_value():
path = ObjectPath.root().map_value("foo")
assert isinstance(path, object_path.MapValuePath)
assert str(path) == '<root>["foo"]'
assert len(path) == 2
assert path.parent == ObjectPath.root()
def test_path_missing_map_entry():
path = ObjectPath.root().missing_map_entry()
assert isinstance(path, object_path.MissingMapEntryPath)
assert str(path) == "<root>[<missing entry>]"
assert len(path) == 2
assert path.parent == ObjectPath.root()
@pytest.mark.parametrize(
"a, b, expected",
[
(ObjectPath.root(), ObjectPath.root(), True),
(ObjectPath.root(), ObjectPath.root().attr("foo"), True),
(ObjectPath.root().attr("foo"), ObjectPath.root(), False),
(ObjectPath.root().attr("foo"), ObjectPath.root().attr("foo"), True),
(ObjectPath.root().attr("bar"), ObjectPath.root().attr("foo"), False),
(ObjectPath.root().attr("foo"), ObjectPath.root().attr("foo").array_index(2), True),
(ObjectPath.root().attr("foo").array_index(2), ObjectPath.root().attr("foo"), False),
(ObjectPath.root().attr("foo"), ObjectPath.root().attr("bar").array_index(2), False),
],
)
def test_path_is_prefix_of(a, b, expected):
assert a.is_prefix_of(b) == expected
paths_for_equality_test = [
ObjectPath.root(),
ObjectPath.root().attr("foo"),
ObjectPath.root().attr("bar"),
ObjectPath.root().array_index(3),
ObjectPath.root().array_index(4),
ObjectPath.root().missing_array_element(3),
ObjectPath.root().missing_array_element(4),
ObjectPath.root().map_value("foo"),
ObjectPath.root().map_value("bar"),
ObjectPath.root().missing_map_entry(),
ObjectPath.root().attr("foo").missing_map_entry(),
]
def make_test_params_for_eq_test():
return [
pytest.param(idx, path, id="path{}".format(idx))
for idx, path in enumerate(paths_for_equality_test)
]
@pytest.mark.parametrize("a_idx, a_path", make_test_params_for_eq_test())
@pytest.mark.parametrize("b_idx, b_path", make_test_params_for_eq_test())
def test_path_equal(a_idx, a_path, b_idx, b_path):
expected = a_idx == b_idx
result = a_path == b_path
assert result == expected
def test_path_get_prefix():
p1 = ObjectPath.root()
p2 = p1.attr("foo")
p3 = p2.array_index(5)
assert p3.parent == p2
assert p2.parent == p1
assert p1.parent is None
assert p2.get_prefix(1) == p1
assert p3.get_prefix(1) == p1
assert p3.get_prefix(2) == p2
assert p3.get_prefix(3) == p3
with pytest.raises(IndexError) as e:
p3.get_prefix(0)
assert "Prefix length must be at least 1" in str(e.value)
with pytest.raises(IndexError) as e:
p3.get_prefix(4)
assert "Attempted to get a prefix longer than the path itself" in str(e.value)
| 5,270 | 31.94375 | 93 | py |
tvm | tvm-main/tests/python/unittest/test_te_schedule.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pickle as pkl
import pytest
import tvm
from tvm import te
def test_schedule_create():
m = te.size_var("m")
n = te.size_var("n")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
B = te.placeholder((n, l), name="B")
AA = te.compute((m, l), lambda i, j: A[i, j])
T = te.compute((m, n, l), lambda i, j, k: AA(i, k) * B(j, k))
s = te.create_schedule(T.op)
s[AA].set_scope("shared")
xo, xi = s[T].split(T.op.axis[0], factor=10)
xi1, xi2 = s[T].split(xi, factor=2)
s[AA].compute_at(s[T], xi1)
xo, xi = s[AA].split(AA.op.axis[0], factor=10)
s[T].reorder(xi2, xi1)
assert T.op.axis[1] in s[T].leaf_iter_vars
# save load json
json_str = tvm.ir.save_json(s)
s_loaded = tvm.ir.load_json(json_str)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
assert str(s_loaded.outputs[0].body) == str(s.outputs[0].body)
# pickle unpickle
dump = pkl.dumps(s)
s_loaded = pkl.loads(dump)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
assert str(s_loaded.outputs[0].body) == str(s.outputs[0].body)
def test_reorder():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
T = te.compute(m, lambda i: A[i + 1])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
xi1, xi2 = s[T].split(xi, factor=2)
order = (xi2, xi1, xo)
assert tuple(s[T].leaf_iter_vars) != order
s[T].reorder(*order)
assert tuple(s[T].leaf_iter_vars) == order
try:
# pass duplicate IterVar
# must raise an error
s[T].reorder(xi2, xi1, xi2)
assert False
except tvm.error.TVMError:
pass
def test_split():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
T = te.compute((m,), lambda i: A[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
assert tuple(s[T].leaf_iter_vars) == (xo, xi)
def test_tile():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
assert tuple(s[T].leaf_iter_vars) == (xo, yo, xi, yi)
def test_fuse():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
fused = s[T].fuse(xo, yo)
assert any(isinstance(x, tvm.te.schedule.Fuse) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (fused, xi, yi)
def test_fuse_with_split():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
fused = s[T].fuse(xi, y)
assert any(isinstance(x, tvm.te.schedule.Fuse) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (xo, fused)
def test_fuse_with_out_of_order_axis():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
with pytest.raises(RuntimeError):
fused = s[T].fuse(xo, y) # should throw here
def test_fuse_with_out_of_order_axis_with_reorder():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].reorder(y, xo, xi)
fused = s[T].fuse(y, xo) # should be ok
s = te.create_schedule(T.op)
y = T.op.axis[1]
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].reorder(y, xo, xi)
with pytest.raises(RuntimeError):
fused = s[T].fuse(y, xi) # should throw here
def test_singleton():
A = te.placeholder((), name="A")
T = te.compute((), lambda: A() + 1)
s = te.create_schedule(T.op)
fused = s[T].fuse()
assert any(isinstance(x, tvm.te.schedule.Singleton) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (fused,)
dump = pkl.dumps(s)
s_loaded = pkl.loads(dump)
assert isinstance(s_loaded, tvm.te.schedule.Schedule)
def test_vectorize():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
T = te.compute((m, n), lambda i, j: A[i, j])
s = te.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
s[T].vectorize(yi)
s[T].unroll(xi)
UNROLL = tvm.te.schedule.IterVar.Unrolled
VECTORIZE = tvm.te.schedule.IterVar.Vectorized
assert s[T].iter_var_attrs[xi].iter_type == UNROLL
assert s[T].iter_var_attrs[yi].iter_type == VECTORIZE
def test_vectorize_commreduce():
V = te.placeholder((128,), name="V")
ax = te.reduce_axis((0, 128), name="ax")
O = te.compute((1,), lambda _: te.sum(V[ax], axis=[ax]))
s = te.create_schedule(O.op)
with pytest.raises(RuntimeError):
s[O].vectorize(ax) # should throw here
def test_pragma():
m = 100
A = te.placeholder((m,), name="A")
T = te.compute((m,), lambda i: A[i])
s = te.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].pragma(xo, "pragma1")
s[T].pragma(xi, "vectorize")
VECTORIZE = tvm.te.schedule.IterVar.Vectorized
assert s[T].iter_var_attrs[xo].pragma_keys[0].value == "pragma1"
assert s[T].iter_var_attrs[xi].iter_type == VECTORIZE
def test_rfactor():
n = te.size_var("n")
k1 = te.reduce_axis((0, n), name="k1")
k2 = te.reduce_axis((0, n), name="k2")
A = te.placeholder((n, n, n), name="A")
B = te.compute((n,), lambda i: te.sum(A[i, k1, k2], axis=[k1, k2]))
# normal schedule
s = te.create_schedule(B.op)
BF = s.rfactor(B, k1)
assert tuple(BF.shape) == (n, n)
assert set(BF.op.body[0].axis) == set([k2])
assert s[B].op.body[0].axis[0].dom.extent == n
assert len(s[B].all_iter_vars) == 2
# schedule with split
s = te.create_schedule(B.op)
ko, ki = s[B].split(k1, factor=4)
xo, xi = s[B].split(B.op.axis[0], factor=8)
BF = s.rfactor(B, ki)
assert BF.shape[0].value == 4
assert BF.shape[1] == n
assert BF.op.body[0].axis[0] == k2
assert BF.op.body[0].axis[1].var == ko.var
assert s[B].op.body[0].axis[0].dom.extent.value == 4
# schedule with factor_axis
s = te.create_schedule(B.op)
ko, ki = s[B].split(k1, factor=4)
xo, xi = s[B].split(B.op.axis[0], factor=8)
BF = s.rfactor(B, ki, 1)
assert n == BF.shape[0]
assert BF.shape[1].value == 4
assert BF.op.body[0].axis[0] == k2
assert BF.op.body[0].axis[1].var == ko.var
assert s[B].op.body[0].axis[0].dom.extent.value == 4
def test_tensor_intrin():
n = 16
x = te.placeholder((n,), name="x")
y = te.placeholder((n,), name="y")
z = te.compute(x.shape, lambda i: x[i] + y[i], name="z")
def intrin_func(ins, outs):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
assert ins[0].shape[0].value == n
return tvm.tir.call_packed("vadd", ins[0].data, outs[0].data, ins[0].shape[0])
intrin = te.decl_tensor_intrin(z.op, intrin_func, default_buffer_params={"offset_factor": n})
assert intrin.op == z.op
assert intrin.reduce_init is None
assert tuple(intrin.inputs) == tuple(z.op.input_tensors)
assert intrin.buffers[0].shape[0].value == n
m = 32
X = te.placeholder((m,), name="X")
Y = te.placeholder((m,), name="Y")
Z = te.compute(X.shape, lambda i: X[i] + Y[i], name="Z")
s = te.create_schedule(Z.op)
xo, xi = s[Z].split(Z.op.axis[0], factor=n)
s[Z].tensorize(xi, intrin)
stmt = tvm.lower(s, [X, Y, Z])["main"].body
assert isinstance(stmt.body, tvm.tir.Evaluate)
assert str(stmt.body.value.args[0]) == '"vadd"'
assert str(stmt.body.value.args[1]) == "X"
assert str(stmt.body.value.args[2]) == "Z"
assert s[Z].iter_var_attrs[xi].tensor_intrin == intrin
assert s[Z].iter_var_attrs[xi].iter_type == tvm.te.schedule.IterVar.Tensorized
def test_tensor_intrin_scalar_params():
n = te.size_var("n")
x = te.placeholder((n,), name="x")
v = te.size_var("v")
w = te.size_var("w")
z = te.compute((n,), lambda i: x[i] * v + w, name="z")
def intrin_func(ins, outs, sp):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
assert ins[0].shape[0] == n
assert sp[0] == v
assert sp[1] == w
return tvm.tir.call_packed("hw_func", ins[0].data, outs[0].data, sp[0], sp[1])
intrin = te.decl_tensor_intrin(
z.op, intrin_func, scalar_params=[v, w], default_buffer_params={"offset_factor": 1}
)
assert intrin.op == z.op
assert intrin.reduce_init is None
assert tuple(intrin.inputs) == tuple(z.op.input_tensors)
assert intrin.buffers[0].shape[0] == n
assert tuple(intrin.scalar_params) == tuple((v, w))
A = te.placeholder((10, 10), name="A")
# Pass scalar inputs to the TensorIntrin, interleaved with tensor inputs
C = te.compute((10, 10), lambda i, j: intrin(i * i, A[i, j], i + j), name="C")
s = te.create_schedule(C.op)
stmt = tvm.lower(s, [A, C])["main"].body
assert isinstance(stmt.body.body, tvm.tir.Evaluate)
assert len(stmt.body.body.value.args) == 5
assert str(stmt.body.body.value.args[3]) == "i * i"
assert str(stmt.body.body.value.args[4]) == "i + j"
def test_legalize_invalid_attach():
A = te.compute((10, 10), lambda i, j: 1.0, name="A")
B = te.compute((10, 10), lambda i, j: A[i][j], name="B")
# Case 1: Split an axis which is the target of a compute_at
s = te.create_schedule([B.op])
s[A].compute_at(s[B], B.op.axis[1])
s[B].split(B.op.axis[1], 2)
stmt = tvm.lower(s, [A, B], simple_mode=True)["main"].body
assert isinstance(stmt.body.body, tvm.tir.stmt.For)
# Case 2: Fuse an axis which is the target of a compute_at
s = te.create_schedule([B.op])
s[A].compute_at(s[B], B.op.axis[1])
s[B].fuse(B.op.axis[0], B.op.axis[1])
stmt = tvm.lower(s, [A, B], simple_mode=True)["main"].body
assert isinstance(stmt, tvm.tir.stmt.For)
def test_compute_at():
def add():
shape = (16, 16)
A = tvm.te.compute(shape, lambda *i: 1.0, name="A")
B = tvm.te.compute(shape, lambda *i: 2.0, name="B")
C = tvm.te.compute(shape, lambda *i: A(*i) + B(*i), name="C")
return A, B, C
def invalid_compute_at_self():
A, B, C = add()
s = tvm.te.create_schedule(C.op)
s[C].compute_at(s[C], C.op.axis[0])
with pytest.raises(RuntimeError):
tvm.lower(s, [A, B], simple_mode=True)
def invalid_compute_at_loop():
A, B, C = add()
s = tvm.te.create_schedule(C.op)
s[A].compute_at(s[C], C.op.axis[0])
s[C].compute_at(s[A], A.op.axis[0])
with pytest.raises(RuntimeError):
tvm.lower(s, [C], simple_mode=True)
invalid_compute_at_self()
invalid_compute_at_loop()
if __name__ == "__main__":
test_singleton()
test_pragma()
test_tensor_intrin()
test_tensor_intrin_scalar_params()
test_rfactor()
test_schedule_create()
test_reorder()
test_tile()
test_split()
test_fuse()
test_fuse_with_split()
test_fuse_with_out_of_order_axis()
test_fuse_with_out_of_order_axis_with_reorder()
test_vectorize()
test_vectorize_commreduce()
test_legalize_invalid_attach()
test_compute_at()
| 12,579 | 32.546667 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_remove_undef.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.script import tir as T
from tvm import TVMError
class BaseBeforeAfter(tvm.testing.CompareBeforeAfter):
@tvm.testing.fixture
def transform(self):
return tvm.tir.transform.RemoveStoreUndef()
class TestRemoveStoreUndef(BaseBeforeAfter):
"""Remove a store whose value is T.undef()"""
def before(A: T.Buffer(1, "int32")):
A[0] = T.undef(dtype="int32")
def expected(A: T.Buffer(1, "int32")):
T.evaluate(0)
class TestRemoveStoreUndefExpression(BaseBeforeAfter):
"""Expressions containing T.undef() are removed"""
def before(A: T.Buffer(1, "int32")):
A[0] = 1 + T.undef(dtype="int32")
def expected(A: T.Buffer(1, "int32")):
T.evaluate(0)
class TestKeepOtherCallNodes(BaseBeforeAfter):
"""Expressions containing other CallNodes are not removed"""
def before(A: T.Buffer(1, "int32"), n: T.int32):
A[0] = T.shift_left(n, 1, dtype="int32")
expected = before
class TestRemoveLetUndef(BaseBeforeAfter):
"""Remove a store whose value is bound to T.undef()"""
def before(A: T.Buffer(1, "int32")):
val = T.undef(dtype="int32")
A[0] = val
def expected(A: T.Buffer(1, "int32")):
T.evaluate(0)
class TestRaiseErrorForUndefAsStoreIndices(BaseBeforeAfter):
"""Use of T.undef() as buffer indices is an error"""
def before(A: T.Buffer(1, "int32")):
val = T.undef(dtype="int32")
A[val] = 5
expected = TVMError
class TestRaiseErrorForUndefAsLoadIndices(BaseBeforeAfter):
"""Use of T.undef() as buffer indices is an error
Even though this occurs as part of the BufferStore's value, the
T.undef() may not appear in a buffer's indices.
"""
def before(A: T.Buffer(1, "int32"), B: T.Buffer(1, "int32")):
B[0] = A[T.undef(dtype="int32")]
expected = TVMError
if __name__ == "__main__":
tvm.testing.main()
| 2,718 | 27.621053 | 67 | py |
tvm | tvm-main/tests/python/unittest/test_tir_usmp_analysis_extract_bufferinfo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import sys
import tvm
from tvm import tir, script
from tvm.ir import Range
from tvm.script import tir as T
from tvm.tir import stmt_functor
from tvm.tir import PrimFunc
from tvm.tir.usmp import utils as usmp_utils
from tvm.target import Target
from tvm import WorkspacePoolInfo, ConstantPoolInfo
def _replace_stmt_with_buf_var_names(buffer_info_map):
"""helper to replace tir.allocates with buffer names"""
new_buffer_info_map = dict()
for k, v in buffer_info_map.items():
new_buffer_info_map[k.name_hint] = k
return new_buffer_info_map
def _verify_conflicts(main_buf_name, conflicting_buf_names, buffer_info_map):
"""helper to check expected liveness conflicts"""
buf_info = buffer_info_map[main_buf_name]
for conflict in buf_info.conflicts:
assert conflict.name_hint in conflicting_buf_names
def _get_allocates(primfunc):
"""helper to extract all allocate nodes by name"""
allocates = dict()
def get_allocate(stmt):
if isinstance(stmt, tvm.tir.Allocate):
allocates[str(stmt.buffer_var.name)] = stmt
stmt_functor.post_order_visit(primfunc.body, get_allocate)
return allocates
def _assign_poolinfos_to_allocates_in_primfunc(primfunc, pool_infos, constant_pool_infos):
"""helper to assing poolinfos to allocate nodes in a tir.PrimFunc"""
def set_poolinfos(stmt):
if isinstance(stmt, tvm.tir.Allocate):
return tvm.tir.Allocate(
buffer_var=stmt.buffer_var,
dtype=stmt.dtype,
extents=stmt.extents,
condition=stmt.condition,
body=stmt.body,
annotations={tvm.tir.usmp.utils.CANDIDATE_MEMORY_POOL_ATTR: pool_infos},
)
elif isinstance(stmt, tvm.tir.AllocateConst):
return tvm.tir.AllocateConst(
buffer_var=stmt.buffer_var,
dtype=stmt.dtype,
extents=stmt.extents,
data_or_idx=stmt.data,
body=stmt.body,
annotations={tvm.tir.usmp.utils.CANDIDATE_MEMORY_POOL_ATTR: constant_pool_infos},
)
return primfunc.with_body(stmt_functor.ir_transform(primfunc.body, None, set_poolinfos))
def _assign_poolinfos_to_allocates_in_irmodule(mod, pool_infos, constant_pool_infos=None):
"""helper to assign poolinfos to allocate nodes in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = _assign_poolinfos_to_allocates_in_primfunc(
basefunc, pool_infos, constant_pool_infos
)
return ret
def _assign_targets_to_primfuncs_irmodule(mod, target):
"""helper to assign target for PrimFunc in a IRModule"""
ret = tvm.IRModule()
for global_var, basefunc in mod.functions.items():
if isinstance(basefunc, tvm.tir.PrimFunc):
ret[global_var] = basefunc.with_attr("target", target)
return ret
# These are test IRModules that contains varied topologies of operator graphs
# that includes a main TIR function that includes call to such operators.
# fmt: off
@tvm.script.ir_module
class LinearStructure:
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", "tir.noalias": True})
placeholder_65 = T.match_buffer(placeholder_62, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_66 = T.match_buffer(placeholder_63, [9408], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_67 = T.match_buffer(placeholder_64, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_21 = T.match_buffer(T_cast_20, [289], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_7 = T.decl_buffer([157323], "int16")
for i0_i1_fused_7 in T.serial(0, 229):
for i2_7, i3_7 in T.grid(229, 3):
PaddedInput_7[(((i0_i1_fused_7*687) + (i2_7*3)) + i3_7)] = T.if_then_else(((((2 <= i0_i1_fused_7) and (i0_i1_fused_7 < 226)) and (2 <= i2_7)) and (i2_7 < 226)), placeholder_65[((((i0_i1_fused_7*672) + (i2_7*3)) + i3_7) - 1350)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7 = T.decl_buffer([64], "int32")
for ff_3 in T.serial(0, 64):
Conv2dOutput_7[ff_3] = 0
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
Conv2dOutput_7[ff_3] = (Conv2dOutput_7[ff_3] + (T.cast(PaddedInput_7[(((((T.floordiv(ax0_ax1_fused_ax2_fused_7, 112)*1374) + (ry_2*687)) + (T.floormod(ax0_ax1_fused_ax2_fused_7, 112)*6)) + (rx_2*3)) + rc_7)], "int32")*T.cast(placeholder_66[((((ry_2*1344) + (rx_2*192)) + (rc_7*64)) + ff_3)], "int32")))
for ax3_inner_7 in T.serial(0, 64):
T_cast_21[((ax0_ax1_fused_ax2_fused_7*64) + ax3_inner_7)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_7[ax3_inner_7] + placeholder_67[ax3_inner_7]), 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [177], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
tensor_2 = T.decl_buffer([200704], "uint8")
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16")
@T.prim_func
def run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_9 = T.allocate([301056], "int8", "global")
sid_8 = T.allocate([802816], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, output, dtype="int32"))
__tvm_meta__ = None
# fmt: on
def test_linear():
target = Target("c")
fast_memory_pool = WorkspacePoolInfo(pool_name="fast_memory", targets=[target])
slow_memory_pool = WorkspacePoolInfo(pool_name="slow_memory", targets=[target])
tir_mod = LinearStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(
tir_mod, [fast_memory_pool, slow_memory_pool]
)
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(tir_mod["run_model"], tir_mod)
assert buffer_info_analysis.memory_pressure == 1117718
buffer_info_map = _replace_stmt_with_buf_var_names(buffer_info_analysis.buffer_info_stmts)
# check conflicts
_verify_conflicts("PaddedInput_7", ["sid_9", "sid_8", "Conv2dOutput_7"], buffer_info_map)
_verify_conflicts("tensor_2", ["sid_8"], buffer_info_map)
_verify_conflicts("sid_9", ["PaddedInput_7"], buffer_info_map)
_verify_conflicts("sid_8", ["PaddedInput_7", "Conv2dOutput_7", "tensor_2"], buffer_info_map)
_verify_conflicts("Conv2dOutput_7", ["sid_8", "PaddedInput_7"], buffer_info_map)
# check sizes
assert buffer_info_map["sid_8"].size_bytes == 802816
assert buffer_info_map["Conv2dOutput_7"].size_bytes == 256
assert buffer_info_map["PaddedInput_7"].size_bytes == 314646
assert buffer_info_map["tensor_2"].size_bytes == 200704
assert buffer_info_map["sid_9"].size_bytes == 301056
# check_pool_candidates
assert [
pool_info.pool_name for pool_info in list(buffer_info_map["sid_8"].pool_candidates)
] == ["fast_memory", "slow_memory"]
# fmt: off
@tvm.script.ir_module
class ParallelSerialMixedForLoops:
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1(placeholder_68: T.handle, placeholder_69: T.handle, placeholder_70: T.handle, T_cast_22: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", "tir.noalias": True})
placeholder_71 = T.match_buffer(placeholder_68, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_72 = T.match_buffer(placeholder_69, [110592], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_73 = T.match_buffer(placeholder_70, [192], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_23 = T.match_buffer(T_cast_22, [305], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_8 = T.decl_buffer([215296], "int16")
for i0_i1_fused_8 in T.serial(0, 58):
for i2_8, i3_8 in T.grid(58, 64):
PaddedInput_8[(((i0_i1_fused_8*3712) + (i2_8*64)) + i3_8)] = T.if_then_else(((((1 <= i0_i1_fused_8) and (i0_i1_fused_8 < 57)) and (1 <= i2_8)) and (i2_8 < 57)), placeholder_71[((((i0_i1_fused_8*3584) + (i2_8*64)) + i3_8) - 3648)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_8 in T.parallel(0, 3136):
dummy_allocate = T.decl_buffer([1], "int32")
for ax3_outer_4 in T.serial(0, 3):
Conv2dOutput_8 = T.decl_buffer([64], "int32")
for ff_4 in T.serial(0, 64):
Conv2dOutput_8[ff_4] = 0
for ry_3, rx_3, rc_8 in T.grid(3, 3, 64):
Conv2dOutput_8[ff_4] = (Conv2dOutput_8[ff_4] + (T.cast(PaddedInput_8[(((((T.floordiv(ax0_ax1_fused_ax2_fused_8, 56)*3712) + (ry_3*3712)) + (rx_3*64)) + (T.floormod(ax0_ax1_fused_ax2_fused_8, 56)*64)) + rc_8)], "int32")*T.cast(placeholder_72[(((((ry_3*36864) + (rx_3*12288)) + (rc_8*192)) + (ax3_outer_4*64)) + ff_4)], "int32")))
for ax3_inner_8 in T.serial(0, 64):
T_cast_23[(((ax0_ax1_fused_ax2_fused_8*192) + (ax3_outer_4*64)) + ax3_inner_8)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_8[ax3_inner_8] + placeholder_73[((ax3_outer_4*64) + ax3_inner_8)]), 1139793473, 31, -6, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", input, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), output, dtype="int32"))
__tvm_meta__ = None
# fmt: on
# fmt: off
@tvm.script.ir_module
class AllSerialForLoops:
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1(placeholder_68: T.handle, placeholder_69: T.handle, placeholder_70: T.handle, T_cast_22: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", "tir.noalias": True})
placeholder_71 = T.match_buffer(placeholder_68, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_72 = T.match_buffer(placeholder_69, [110592], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_73 = T.match_buffer(placeholder_70, [192], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_23 = T.match_buffer(T_cast_22, [305], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_8 = T.decl_buffer([215296], "int16")
for i0_i1_fused_8 in T.serial(0, 58):
for i2_8, i3_8 in T.grid(58, 64):
PaddedInput_8[(((i0_i1_fused_8*3712) + (i2_8*64)) + i3_8)] = T.if_then_else(((((1 <= i0_i1_fused_8) and (i0_i1_fused_8 < 57)) and (1 <= i2_8)) and (i2_8 < 57)), placeholder_71[((((i0_i1_fused_8*3584) + (i2_8*64)) + i3_8) - 3648)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_8 in T.serial(0, 3136):
dummy_allocate = T.decl_buffer([1], "int32")
for ax3_outer_4 in T.serial(0, 3):
Conv2dOutput_8 = T.decl_buffer([64], "int32")
for ff_4 in T.serial(0, 64):
Conv2dOutput_8[ff_4] = 0
for ry_3, rx_3, rc_8 in T.grid(3, 3, 64):
Conv2dOutput_8[ff_4] = (Conv2dOutput_8[ff_4] + (T.cast(PaddedInput_8[(((((T.floordiv(ax0_ax1_fused_ax2_fused_8, 56)*3712) + (ry_3*3712)) + (rx_3*64)) + (T.floormod(ax0_ax1_fused_ax2_fused_8, 56)*64)) + rc_8)], "int32")*T.cast(placeholder_72[(((((ry_3*36864) + (rx_3*12288)) + (rc_8*192)) + (ax3_outer_4*64)) + ff_4)], "int32")))
for ax3_inner_8 in T.serial(0, 64):
T_cast_23[(((ax0_ax1_fused_ax2_fused_8*192) + (ax3_outer_4*64)) + ax3_inner_8)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_8[ax3_inner_8] + placeholder_73[((ax3_outer_4*64) + ax3_inner_8)]), 1139793473, 31, -6, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", input, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), output, dtype="int32"))
__tvm_meta__ = None
# fmt: on
def test_parallel_serial_mixed_for_loops():
target = Target("c")
global_ws_pool = WorkspacePoolInfo(
pool_name="global_workspace",
targets=[target],
)
all_serial_tir_mod = AllSerialForLoops
all_serial_tir_mod = _assign_targets_to_primfuncs_irmodule(all_serial_tir_mod, target)
all_serial_tir_mod = _assign_poolinfos_to_allocates_in_irmodule(
all_serial_tir_mod, [global_ws_pool]
)
main_func = all_serial_tir_mod["run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, all_serial_tir_mod)
assert buffer_info_analysis.memory_pressure == 430848
buffer_info_map = _replace_stmt_with_buf_var_names(buffer_info_analysis.buffer_info_stmts)
# When all loops are serial all allocates are touched by USMP
assert len(buffer_info_map) == 3
for name, _ in buffer_info_map.items():
assert name in ["dummy_allocate", "Conv2dOutput_8", "PaddedInput_8"]
parallel_serial_mixed_tir_mod = ParallelSerialMixedForLoops
parallel_serial_mixed_tir_mod = _assign_targets_to_primfuncs_irmodule(
parallel_serial_mixed_tir_mod, target
)
parallel_serial_mixed_tir_mod = _assign_poolinfos_to_allocates_in_irmodule(
parallel_serial_mixed_tir_mod, [global_ws_pool]
)
main_func = parallel_serial_mixed_tir_mod["run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(
main_func, parallel_serial_mixed_tir_mod
)
assert buffer_info_analysis.memory_pressure == 430848
buffer_info_map = _replace_stmt_with_buf_var_names(buffer_info_analysis.buffer_info_stmts)
# USMP will not touch (yet) the allocates inside parallel for loops
assert len(buffer_info_map) == 2
for name, _ in buffer_info_map.items():
assert name in ["Conv2dOutput_8", "PaddedInput_8"]
# fmt: off
@tvm.script.ir_module
class InceptionStructure:
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d(placeholder: T.handle, tensor: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d", "tir.noalias": True})
placeholder_1 = T.match_buffer(placeholder, [602112], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
tensor_1 = T.match_buffer(tensor, [249], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused in T.serial(0, 28):
for ax2 in T.serial(0, 28):
for ax3_outer_init, ax3_inner_init in T.grid(3, 64):
tensor_1[((((ax0_ax1_fused*5376) + (ax2*192)) + (ax3_outer_init*64)) + ax3_inner_init)] = T.uint8(0)
for rv0_rv1_fused, ax3_outer, ax3_inner in T.grid(9, 3, 64):
tensor_1[((((ax0_ax1_fused*5376) + (ax2*192)) + (ax3_outer*64)) + ax3_inner)] = T.max(tensor_1[((((ax0_ax1_fused*5376) + (ax2*192)) + (ax3_outer*64)) + ax3_inner)], T.if_then_else(((((ax0_ax1_fused*2) + T.floordiv(rv0_rv1_fused, 3)) < 56) and (((ax2*2) + T.floormod(rv0_rv1_fused, 3)) < 56)), placeholder_1[((((((ax0_ax1_fused*21504) + (T.floordiv(rv0_rv1_fused, 3)*10752)) + (ax2*384)) + (T.floormod(rv0_rv1_fused, 3)*192)) + (ax3_outer*64)) + ax3_inner)], T.uint8(0), dtype="uint8"))
@T.prim_func
def tvmgen_default_fused_cast_subtract(placeholder_2: T.handle, placeholder_3: T.handle, T_subtract: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast_subtract", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_5 = T.match_buffer(placeholder_3, [1], dtype="int16", elem_offset=0, align=64, offset_factor=1)
T_subtract_1 = T.match_buffer(T_subtract, [452], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_1 in T.serial(0, 224):
for ax2_1, ax3_inner_1 in T.grid(224, 3):
T_subtract_1[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)] = (T.cast(placeholder_4[(((ax0_ax1_fused_1*672) + (ax2_1*3)) + ax3_inner_1)], "int16") - placeholder_5[0])
@T.prim_func
def tvmgen_default_fused_cast(placeholder_6: T.handle, T_cast: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_cast", "tir.noalias": True})
placeholder_7 = T.match_buffer(placeholder_6, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_1 = T.match_buffer(T_cast, [249], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_2 in T.serial(0, 28):
for ax2_2, ax3_outer_1, ax3_inner_2 in T.grid(28, 12, 16):
T_cast_1[((((ax0_ax1_fused_2*5376) + (ax2_2*192)) + (ax3_outer_1*16)) + ax3_inner_2)] = T.cast(placeholder_7[((((ax0_ax1_fused_2*5376) + (ax2_2*192)) + (ax3_outer_1*16)) + ax3_inner_2)], "int16")
@T.prim_func
def tvmgen_default_fused_concatenate(placeholder_8: T.handle, placeholder_9: T.handle, placeholder_10: T.handle, placeholder_11: T.handle, T_concat: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_concatenate", "tir.noalias": True})
placeholder_12 = T.match_buffer(placeholder_8, [50176], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_concat_1 = T.match_buffer(T_concat, [313], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_13 = T.match_buffer(placeholder_9, [100352], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_14 = T.match_buffer(placeholder_11, [25088], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
placeholder_15 = T.match_buffer(placeholder_10, [25088], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
for ax0_ax1_fused_3 in T.serial(0, 28):
for ax2_3, ax3 in T.grid(28, 256):
T_concat_1[(((ax0_ax1_fused_3*7168) + (ax2_3*256)) + ax3)] = T.if_then_else((224 <= ax3), placeholder_14[((((ax0_ax1_fused_3*896) + (ax2_3*32)) + ax3) - 224)], T.if_then_else((192 <= ax3), placeholder_15[((((ax0_ax1_fused_3*896) + (ax2_3*32)) + ax3) - 192)], T.if_then_else((64 <= ax3), placeholder_13[((((ax0_ax1_fused_3*3584) + (ax2_3*128)) + ax3) - 64)], placeholder_12[(((ax0_ax1_fused_3*1792) + (ax2_3*64)) + ax3)], dtype="uint8"), dtype="uint8"), dtype="uint8")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast(placeholder_16: T.handle, placeholder_17: T.handle, placeholder_18: T.handle, T_cast_2: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", "tir.noalias": True})
placeholder_19 = T.match_buffer(placeholder_16, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_20 = T.match_buffer(placeholder_17, [4096], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_21 = T.match_buffer(placeholder_18, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_3 = T.match_buffer(T_cast_2, [177], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput = T.decl_buffer([200704], "int16")
for i0_i1_fused in T.serial(0, 56):
for i2, i3 in T.grid(56, 64):
PaddedInput[(((i0_i1_fused*3584) + (i2*64)) + i3)] = placeholder_19[(((i0_i1_fused*3584) + (i2*64)) + i3)]
for ax0_ax1_fused_ax2_fused in T.serial(0, 3136):
Conv2dOutput = T.decl_buffer([64], "int32")
for ff in T.serial(0, 64):
Conv2dOutput[ff] = 0
for rc in T.serial(0, 64):
Conv2dOutput[ff] = (Conv2dOutput[ff] + (T.cast(PaddedInput[((ax0_ax1_fused_ax2_fused*64) + rc)], "int32")*T.cast(placeholder_20[((rc*64) + ff)], "int32")))
for ax3_inner_3 in T.serial(0, 64):
T_cast_3[((ax0_ax1_fused_ax2_fused*64) + ax3_inner_3)] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput[ax3_inner_3] + placeholder_21[ax3_inner_3]), 1191576922, 31, -4, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1(placeholder_22: T.handle, placeholder_23: T.handle, placeholder_24: T.handle, T_cast_4: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", "tir.noalias": True})
placeholder_25 = T.match_buffer(placeholder_22, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_26 = T.match_buffer(placeholder_23, [18432], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_27 = T.match_buffer(placeholder_24, [96], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_5 = T.match_buffer(T_cast_4, [153], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_1 = T.decl_buffer([150528], "int16")
for i0_i1_fused_1 in T.serial(0, 28):
for i2_1, i3_1 in T.grid(28, 192):
PaddedInput_1[(((i0_i1_fused_1*5376) + (i2_1*192)) + i3_1)] = placeholder_25[(((i0_i1_fused_1*5376) + (i2_1*192)) + i3_1)]
for ax0_ax1_fused_ax2_fused_1 in T.serial(0, 784):
Conv2dOutput_1 = T.decl_buffer([1], "int32")
for ax3_1 in T.serial(0, 96):
Conv2dOutput_1[0] = 0
for rc_1 in T.serial(0, 192):
Conv2dOutput_1[0] = (Conv2dOutput_1[0] + (T.cast(PaddedInput_1[((ax0_ax1_fused_ax2_fused_1*192) + rc_1)], "int32")*T.cast(placeholder_26[((rc_1*96) + ax3_1)], "int32")))
T_cast_5[((ax0_ax1_fused_ax2_fused_1*96) + ax3_1)] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_1[0] + placeholder_27[ax3_1]), 1201322342, 31, -6, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast(placeholder_28: T.handle, T_cast_6: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True})
placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_7 = T.match_buffer(T_cast_6, [177], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
tensor_2 = T.decl_buffer([200704], "uint8")
for ax0_ax1_fused_4 in T.serial(0, 56):
for ax2_4 in T.serial(0, 56):
for ax3_init in T.serial(0, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0)
for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64):
tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_5 in T.serial(0, 56):
for ax2_5, ax3_3 in T.grid(56, 64):
T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_2(placeholder_30: T.handle, placeholder_31: T.handle, placeholder_32: T.handle, T_cast_8: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_2", "tir.noalias": True})
placeholder_33 = T.match_buffer(placeholder_30, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_34 = T.match_buffer(placeholder_31, [12288], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_35 = T.match_buffer(placeholder_32, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_9 = T.match_buffer(T_cast_8, [121], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_2 = T.decl_buffer([150528], "int16")
for i0_i1_fused_2 in T.serial(0, 28):
for i2_2, i3_2 in T.grid(28, 192):
PaddedInput_2[(((i0_i1_fused_2*5376) + (i2_2*192)) + i3_2)] = placeholder_33[(((i0_i1_fused_2*5376) + (i2_2*192)) + i3_2)]
for ax0_ax1_fused_ax2_fused_2 in T.serial(0, 784):
Conv2dOutput_2 = T.decl_buffer([64], "int32")
for ff_1 in T.serial(0, 64):
Conv2dOutput_2[ff_1] = 0
for rc_2 in T.serial(0, 192):
Conv2dOutput_2[ff_1] = (Conv2dOutput_2[ff_1] + (T.cast(PaddedInput_2[((ax0_ax1_fused_ax2_fused_2*192) + rc_2)], "int32")*T.cast(placeholder_34[((rc_2*64) + ff_1)], "int32")))
for ax3_inner_4 in T.serial(0, 64):
T_cast_9[((ax0_ax1_fused_ax2_fused_2*64) + ax3_inner_4)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_2[ax3_inner_4] + placeholder_35[ax3_inner_4]), 1663316467, 31, -7, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_max_pool2d_cast_1(placeholder_36: T.handle, T_cast_10: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast_1", "tir.noalias": True})
placeholder_37 = T.match_buffer(placeholder_36, [150528], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
T_cast_11 = T.match_buffer(T_cast_10, [249], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
tensor_3 = T.decl_buffer([150528], "uint8")
for ax0_ax1_fused_6 in T.serial(0, 28):
for ax2_6 in T.serial(0, 28):
for ax3_outer_init_1, ax3_inner_init_1 in T.grid(3, 64):
tensor_3[((((ax0_ax1_fused_6*5376) + (ax2_6*192)) + (ax3_outer_init_1*64)) + ax3_inner_init_1)] = T.uint8(0)
for rv0_rv1_fused_2, ax3_outer_2, ax3_inner_5 in T.grid(9, 3, 64):
tensor_3[((((ax0_ax1_fused_6*5376) + (ax2_6*192)) + (ax3_outer_2*64)) + ax3_inner_5)] = T.max(tensor_3[((((ax0_ax1_fused_6*5376) + (ax2_6*192)) + (ax3_outer_2*64)) + ax3_inner_5)], T.if_then_else(((((1 <= (T.floordiv(rv0_rv1_fused_2, 3) + ax0_ax1_fused_6)) and ((T.floordiv(rv0_rv1_fused_2, 3) + ax0_ax1_fused_6) < 29)) and (1 <= (ax2_6 + T.floormod(rv0_rv1_fused_2, 3)))) and ((ax2_6 + T.floormod(rv0_rv1_fused_2, 3)) < 29)), placeholder_37[(((((((T.floordiv(rv0_rv1_fused_2, 3)*5376) + (ax0_ax1_fused_6*5376)) + (ax2_6*192)) + (T.floormod(rv0_rv1_fused_2, 3)*192)) + (ax3_outer_2*64)) + ax3_inner_5) - 5568)], T.uint8(0), dtype="uint8"))
for ax0_ax1_fused_7 in T.serial(0, 28):
for ax2_7, ax3_4 in T.grid(28, 192):
T_cast_11[(((ax0_ax1_fused_7*5376) + (ax2_7*192)) + ax3_4)] = T.cast(tensor_3[(((ax0_ax1_fused_7*5376) + (ax2_7*192)) + ax3_4)], "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320__2(placeholder_38: T.handle, placeholder_39: T.handle, placeholder_40: T.handle, T_cast_12: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320__2", "tir.noalias": True})
placeholder_41 = T.match_buffer(placeholder_38, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_42 = T.match_buffer(placeholder_39, [6144], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_43 = T.match_buffer(placeholder_40, [32], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_13 = T.match_buffer(T_cast_12, [89], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_3 = T.decl_buffer([150528], "int16")
for i0_i1_fused_3 in T.serial(0, 28):
for i2_3, i3_3 in T.grid(28, 192):
PaddedInput_3[(((i0_i1_fused_3*5376) + (i2_3*192)) + i3_3)] = placeholder_41[(((i0_i1_fused_3*5376) + (i2_3*192)) + i3_3)]
for ax0_ax1_fused_ax2_fused_3 in T.serial(0, 784):
Conv2dOutput_3 = T.decl_buffer([1], "int32")
for ax3_5 in T.serial(0, 32):
Conv2dOutput_3[0] = 0
for rc_3 in T.serial(0, 192):
Conv2dOutput_3[0] = (Conv2dOutput_3[0] + (T.cast(PaddedInput_3[((ax0_ax1_fused_ax2_fused_3*192) + rc_3)], "int32")*T.cast(placeholder_42[((rc_3*32) + ax3_5)], "int32")))
T_cast_13[((ax0_ax1_fused_ax2_fused_3*32) + ax3_5)] = T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_3[0] + placeholder_43[ax3_5]), 1811141736, 31, -6, dtype="int32"), 255), 0), "uint8"), "int32"), 1136333842, 31, 0, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2(placeholder_44: T.handle, placeholder_45: T.handle, placeholder_46: T.handle, T_cast_14: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2", "tir.noalias": True})
placeholder_47 = T.match_buffer(placeholder_44, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_48 = T.match_buffer(placeholder_45, [3072], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_49 = T.match_buffer(placeholder_46, [16], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_15 = T.match_buffer(T_cast_14, [73], dtype="int16", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_4 = T.decl_buffer([150528], "int16")
for i0_i1_fused_4 in T.serial(0, 28):
for i2_4, i3_4 in T.grid(28, 192):
PaddedInput_4[(((i0_i1_fused_4*5376) + (i2_4*192)) + i3_4)] = placeholder_47[(((i0_i1_fused_4*5376) + (i2_4*192)) + i3_4)]
for ax0_ax1_fused_ax2_fused_4 in T.serial(0, 784):
Conv2dOutput_4 = T.decl_buffer([1], "int32")
for ax3_6 in T.serial(0, 16):
Conv2dOutput_4[0] = 0
for rc_4 in T.serial(0, 192):
Conv2dOutput_4[0] = (Conv2dOutput_4[0] + (T.cast(PaddedInput_4[((ax0_ax1_fused_ax2_fused_4*192) + rc_4)], "int32")*T.cast(placeholder_48[((rc_4*16) + ax3_6)], "int32")))
T_cast_15[((ax0_ax1_fused_ax2_fused_4*16) + ax3_6)] = T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_4[0] + placeholder_49[ax3_6]), 1764006585, 31, -7, dtype="int32"), 255), 0), "uint8"), "int16")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320__1(placeholder_50: T.handle, placeholder_51: T.handle, placeholder_52: T.handle, T_cast_16: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320__1", "tir.noalias": True})
placeholder_53 = T.match_buffer(placeholder_50, [12544], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_54 = T.match_buffer(placeholder_51, [4608], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_55 = T.match_buffer(placeholder_52, [32], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_17 = T.match_buffer(T_cast_16, [89], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_5 = T.decl_buffer([14400], "int16")
for i0_i1_fused_5 in T.serial(0, 30):
for i2_5, i3_5 in T.grid(30, 16):
PaddedInput_5[(((i0_i1_fused_5*480) + (i2_5*16)) + i3_5)] = T.if_then_else(((((1 <= i0_i1_fused_5) and (i0_i1_fused_5 < 29)) and (1 <= i2_5)) and (i2_5 < 29)), placeholder_53[((((i0_i1_fused_5*448) + (i2_5*16)) + i3_5) - 464)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_5 in T.serial(0, 784):
Conv2dOutput_5 = T.decl_buffer([1], "int32")
for ax3_7 in T.serial(0, 32):
Conv2dOutput_5[0] = 0
for ry, rx, rc_5 in T.grid(3, 3, 16):
Conv2dOutput_5[0] = (Conv2dOutput_5[0] + (T.cast(PaddedInput_5[(((((T.floordiv(ax0_ax1_fused_ax2_fused_5, 28)*480) + (ry*480)) + (rx*16)) + (T.floormod(ax0_ax1_fused_ax2_fused_5, 28)*16)) + rc_5)], "int32")*T.cast(placeholder_54[((((ry*1536) + (rx*512)) + (rc_5*32)) + ax3_7)], "int32")))
T_cast_17[((ax0_ax1_fused_ax2_fused_5*32) + ax3_7)] = T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_5[0] + placeholder_55[ax3_7]), 1131968888, 31, -6, dtype="int32"), 255), 0), "uint8"), "int32"), 1900719667, 31, 0, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320_(placeholder_56: T.handle, placeholder_57: T.handle, placeholder_58: T.handle, T_cast_18: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320_", "tir.noalias": True})
placeholder_59 = T.match_buffer(placeholder_56, [75264], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_60 = T.match_buffer(placeholder_57, [110592], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_61 = T.match_buffer(placeholder_58, [128], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_19 = T.match_buffer(T_cast_18, [185], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_6 = T.decl_buffer([86400], "int16")
for i0_i1_fused_6 in T.serial(0, 30):
for i2_6, i3_6 in T.grid(30, 96):
PaddedInput_6[(((i0_i1_fused_6*2880) + (i2_6*96)) + i3_6)] = T.if_then_else(((((1 <= i0_i1_fused_6) and (i0_i1_fused_6 < 29)) and (1 <= i2_6)) and (i2_6 < 29)), placeholder_59[((((i0_i1_fused_6*2688) + (i2_6*96)) + i3_6) - 2784)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_6 in T.serial(0, 784):
Conv2dOutput_6 = T.decl_buffer([64], "int32")
for ax3_outer_3 in T.serial(0, 2):
for ff_2 in T.serial(0, 64):
Conv2dOutput_6[ff_2] = 0
for ry_1, rx_1, rc_6 in T.grid(3, 3, 96):
Conv2dOutput_6[ff_2] = (Conv2dOutput_6[ff_2] + (T.cast(PaddedInput_6[(((((T.floordiv(ax0_ax1_fused_ax2_fused_6, 28)*2880) + (ry_1*2880)) + (rx_1*96)) + (T.floormod(ax0_ax1_fused_ax2_fused_6, 28)*96)) + rc_6)], "int32")*T.cast(placeholder_60[(((((ry_1*36864) + (rx_1*12288)) + (rc_6*128)) + (ax3_outer_3*64)) + ff_2)], "int32")))
for ax3_inner_6 in T.serial(0, 64):
T_cast_19[(((ax0_ax1_fused_ax2_fused_6*128) + (ax3_outer_3*64)) + ax3_inner_6)] = T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_6[ax3_inner_6] + placeholder_61[((ax3_outer_3*64) + ax3_inner_6)]), 1374050734, 31, -7, dtype="int32"), 255), 0), "uint8"), "int32"), 1544713713, 31, 0, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast(placeholder_62: T.handle, placeholder_63: T.handle, placeholder_64: T.handle, T_cast_20: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", "T.noalias": True})
placeholder_65 = T.match_buffer(placeholder_62, [150528], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_66 = T.match_buffer(placeholder_63, [9408], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_67 = T.match_buffer(placeholder_64, [64], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_21 = T.match_buffer(T_cast_20, [289], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_7 = T.decl_buffer([157323], "int16")
for i0_i1_fused_7 in T.serial(0, 229):
for i2_7, i3_7 in T.grid(229, 3):
PaddedInput_7[(((i0_i1_fused_7*687) + (i2_7*3)) + i3_7)] = T.if_then_else(((((2 <= i0_i1_fused_7) and (i0_i1_fused_7 < 226)) and (2 <= i2_7)) and (i2_7 < 226)), placeholder_65[((((i0_i1_fused_7*672) + (i2_7*3)) + i3_7) - 1350)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_7 in T.serial(0, 12544):
Conv2dOutput_7 = T.decl_buffer([64], "int32")
for ff_3 in T.serial(0, 64):
Conv2dOutput_7[ff_3] = 0
for ry_2, rx_2, rc_7 in T.grid(7, 7, 3):
Conv2dOutput_7[ff_3] = (Conv2dOutput_7[ff_3] + (T.cast(PaddedInput_7[(((((T.floordiv(ax0_ax1_fused_ax2_fused_7, 112)*1374) + (ry_2*687)) + (T.floormod(ax0_ax1_fused_ax2_fused_7, 112)*6)) + (rx_2*3)) + rc_7)], "int32")*T.cast(placeholder_66[((((ry_2*1344) + (rx_2*192)) + (rc_7*64)) + ff_3)], "int32")))
for ax3_inner_7 in T.serial(0, 64):
T_cast_21[((ax0_ax1_fused_ax2_fused_7*64) + ax3_inner_7)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_7[ax3_inner_7] + placeholder_67[ax3_inner_7]), 1939887962, 31, -9, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1(placeholder_68: T.handle, placeholder_69: T.handle, placeholder_70: T.handle, T_cast_22: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", "tir.noalias": True})
placeholder_71 = T.match_buffer(placeholder_68, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_72 = T.match_buffer(placeholder_69, [110592], dtype="int16", elem_offset=0, align=64, offset_factor=1)
placeholder_73 = T.match_buffer(placeholder_70, [192], dtype="int32", elem_offset=0, align=64, offset_factor=1)
T_cast_23 = T.match_buffer(T_cast_22, [305], dtype="uint8", elem_offset=0, align=64, offset_factor=1)
# body
PaddedInput_8 = T.decl_buffer([215296], "int16")
for i0_i1_fused_8 in T.serial(0, 58):
for i2_8, i3_8 in T.grid(58, 64):
PaddedInput_8[(((i0_i1_fused_8*3712) + (i2_8*64)) + i3_8)] = T.if_then_else(((((1 <= i0_i1_fused_8) and (i0_i1_fused_8 < 57)) and (1 <= i2_8)) and (i2_8 < 57)), placeholder_71[((((i0_i1_fused_8*3584) + (i2_8*64)) + i3_8) - 3648)], T.int16(0), dtype="int16")
for ax0_ax1_fused_ax2_fused_8 in T.serial(0, 3136):
Conv2dOutput_8 = T.decl_buffer([64], "int32")
for ax3_outer_4 in T.serial(0, 3):
for ff_4 in T.serial(0, 64):
Conv2dOutput_8[ff_4] = 0
for ry_3, rx_3, rc_8 in T.grid(3, 3, 64):
Conv2dOutput_8[ff_4] = (Conv2dOutput_8[ff_4] + (T.cast(PaddedInput_8[(((((T.floordiv(ax0_ax1_fused_ax2_fused_8, 56)*3712) + (ry_3*3712)) + (rx_3*64)) + (T.floormod(ax0_ax1_fused_ax2_fused_8, 56)*64)) + rc_8)], "int32")*T.cast(placeholder_72[(((((ry_3*36864) + (rx_3*12288)) + (rc_8*192)) + (ax3_outer_4*64)) + ff_4)], "int32")))
for ax3_inner_8 in T.serial(0, 64):
T_cast_23[(((ax0_ax1_fused_ax2_fused_8*192) + (ax3_outer_4*64)) + ax3_inner_8)] = T.cast(T.max(T.min(T.q_multiply_shift((Conv2dOutput_8[ax3_inner_8] + placeholder_73[((ax3_outer_4*64) + ax3_inner_8)]), 1139793473, 31, -6, dtype="int32"), 255), 0), "uint8")
@T.prim_func
def run_model(input: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
# body
T.attr("default", "device_id", 0)
T.attr("default", "device_type", 1)
sid_32 = T.allocate([301056], "int8", "global")
sid_20 = T.allocate([150528], "int8", "global")
sid_6 = T.allocate([401408], "int8", "global")
sid_9 = T.allocate([301056], "int8", "global")
sid_7 = T.allocate([401408], "int8", "global")
sid_8 = T.allocate([802816], "int8", "global")
sid_2 = T.allocate([50176], "int8", "global")
sid_3 = T.allocate([301056], "int8", "global")
sid_19 = T.allocate([100352], "int8", "global")
sid_4 = T.allocate([150528], "int8", "global")
sid_5 = T.allocate([602112], "int8", "global")
sid_25 = T.allocate([25088], "int8", "global")
sid_26 = T.allocate([25088], "int8", "global")
sid_31 = T.allocate([25088], "int8", "global")
T.evaluate(T.call_extern("tvmgen_default_fused_cast_subtract", input, T.lookup_param("p0", dtype="handle"), sid_9, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast", sid_9, T.lookup_param("p1", dtype="handle"), T.lookup_param("p2", dtype="handle"), sid_8, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast", sid_8, sid_7, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast", sid_7, T.lookup_param("p3", dtype="handle"), T.lookup_param("p4", dtype="handle"), sid_6, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_1", sid_6, T.lookup_param("p5", dtype="handle"), T.lookup_param("p6", dtype="handle"), sid_5, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d", sid_5, sid_4, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_cast", sid_4, sid_3, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_2", sid_3, T.lookup_param("p7", dtype="handle"), T.lookup_param("p8", dtype="handle"), sid_2, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_1", sid_3, T.lookup_param("p9", dtype="handle"), T.lookup_param("p10", dtype="handle"), sid_20, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320_", sid_20, T.lookup_param("p11", dtype="handle"), T.lookup_param("p12", dtype="handle"), sid_19, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_2", sid_3, T.lookup_param("p13", dtype="handle"), T.lookup_param("p14", dtype="handle"), sid_26, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320__1", sid_26, T.lookup_param("p15", dtype="handle"), T.lookup_param("p16", dtype="handle"), sid_25, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_max_pool2d_cast_1", sid_4, sid_32, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_nn_conv2d_add_fixed_point_multiply_clip_cast_cast_fixed_point_multiply_cli_4464294615199028320__2", sid_32, T.lookup_param("p17", dtype="handle"), T.lookup_param("p18", dtype="handle"), sid_31, dtype="int32"))
T.evaluate(T.call_extern("tvmgen_default_fused_concatenate", sid_2, sid_19, sid_25, sid_31, output, dtype="int32"))
__tvm_meta__ = None
# fmt: on
def test_inception_structure():
target = Target("c")
global_ws_pool = WorkspacePoolInfo(
pool_name="global_workspace",
targets=[target],
)
tir_mod = InceptionStructure
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(tir_mod, [global_ws_pool])
main_func = tir_mod["run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
assert buffer_info_analysis.memory_pressure == 1117718
buffer_info_map = _replace_stmt_with_buf_var_names(buffer_info_analysis.buffer_info_stmts)
# check conflicts
_verify_conflicts(
"sid_3",
[
"sid_4",
"PaddedInput_2",
"sid_2",
"Conv2dOutput_2",
"PaddedInput_1",
"Conv2dOutput_1",
"sid_20",
"PaddedInput_6",
"Conv2dOutput_6",
"sid_19",
"PaddedInput_4",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput",
[
"sid_6",
"PaddedInput",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_7",
[
"PaddedInput_7",
"sid_8",
],
buffer_info_map,
)
_verify_conflicts(
"sid_4",
[
"sid_5",
"sid_3",
"PaddedInput_2",
"sid_2",
"Conv2dOutput_2",
"PaddedInput_1",
"Conv2dOutput_1",
"sid_20",
"PaddedInput_6",
"Conv2dOutput_6",
"sid_19",
"PaddedInput_4",
"Conv2dOutput_4",
"sid_26",
"PaddedInput_5",
"Conv2dOutput_5",
"sid_25",
"tensor_3",
],
buffer_info_map,
)
_verify_conflicts(
"sid_2",
[
"PaddedInput_2",
"sid_3",
"sid_4",
"Conv2dOutput_2",
"PaddedInput_1",
"Conv2dOutput_1",
"sid_20",
"PaddedInput_6",
"Conv2dOutput_6",
"sid_19",
"PaddedInput_4",
"Conv2dOutput_4",
"sid_26",
"PaddedInput_5",
"Conv2dOutput_5",
"sid_25",
"tensor_3",
"sid_32",
"PaddedInput_3",
"Conv2dOutput_3",
"sid_31",
],
buffer_info_map,
)
_verify_conflicts(
"sid_19",
[
"Conv2dOutput_6",
"sid_2",
"PaddedInput_6",
"sid_3",
"sid_4",
"PaddedInput_4",
"Conv2dOutput_4",
"sid_26",
"PaddedInput_5",
"Conv2dOutput_5",
"sid_25",
"tensor_3",
"sid_32",
"PaddedInput_3",
"Conv2dOutput_3",
"sid_31",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_2",
[
"sid_3",
"sid_4",
"sid_2",
"Conv2dOutput_2",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_6",
[
"sid_2",
"PaddedInput_6",
"sid_3",
"sid_4",
"sid_19",
],
buffer_info_map,
)
_verify_conflicts(
"sid_9",
[
"PaddedInput_7",
],
buffer_info_map,
)
_verify_conflicts(
"sid_7",
[
"tensor_2",
"PaddedInput",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_4",
[
"sid_2",
"sid_19",
"sid_3",
"sid_4",
"Conv2dOutput_4",
"sid_26",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_3",
[
"sid_2",
"sid_32",
"sid_25",
"sid_19",
"Conv2dOutput_3",
"sid_31",
],
buffer_info_map,
)
_verify_conflicts(
"sid_5",
[
"PaddedInput_8",
"Conv2dOutput_8",
"sid_4",
],
buffer_info_map,
)
_verify_conflicts(
"sid_31",
[
"Conv2dOutput_3",
"PaddedInput_3",
"sid_2",
"sid_25",
"sid_19",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput",
[
"sid_7",
"sid_6",
"Conv2dOutput",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_2",
[
"sid_2",
"PaddedInput_2",
"sid_3",
"sid_4",
],
buffer_info_map,
)
_verify_conflicts(
"sid_32",
[
"tensor_3",
"sid_2",
"sid_25",
"sid_19",
"PaddedInput_3",
],
buffer_info_map,
)
_verify_conflicts(
"tensor_2",
[
"sid_8",
"sid_7",
],
buffer_info_map,
)
_verify_conflicts(
"sid_26",
[
"Conv2dOutput_4",
"PaddedInput_4",
"sid_2",
"sid_19",
"sid_4",
"PaddedInput_5",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_3",
[
"PaddedInput_3",
"sid_2",
"sid_25",
"sid_19",
"sid_31",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_6",
[
"sid_2",
"sid_3",
"sid_20",
"sid_4",
"Conv2dOutput_6",
"sid_19",
],
buffer_info_map,
)
_verify_conflicts(
"sid_6",
[
"PaddedInput",
"Conv2dOutput",
"PaddedInput_8",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_8",
[
"sid_6",
"sid_5",
"Conv2dOutput_8",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_5",
[
"PaddedInput_5",
"sid_2",
"sid_19",
"sid_4",
"sid_25",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_1",
[
"PaddedInput_1",
"sid_2",
"sid_3",
"sid_4",
"sid_20",
],
buffer_info_map,
)
_verify_conflicts(
"tensor_3",
[
"sid_2",
"sid_25",
"sid_19",
"sid_4",
"sid_32",
],
buffer_info_map,
)
_verify_conflicts(
"sid_8",
[
"Conv2dOutput_7",
"PaddedInput_7",
"tensor_2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_20",
[
"Conv2dOutput_1",
"PaddedInput_1",
"sid_2",
"sid_3",
"sid_4",
"PaddedInput_6",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_8",
[
"sid_5",
"PaddedInput_8",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_1",
[
"sid_2",
"sid_3",
"sid_4",
"Conv2dOutput_1",
"sid_20",
],
buffer_info_map,
)
_verify_conflicts(
"Conv2dOutput_4",
[
"PaddedInput_4",
"sid_2",
"sid_19",
"sid_4",
"sid_26",
],
buffer_info_map,
)
_verify_conflicts(
"sid_25",
[
"PaddedInput_5",
"Conv2dOutput_5",
"sid_2",
"sid_19",
"sid_4",
"tensor_3",
"sid_32",
"PaddedInput_3",
"Conv2dOutput_3",
"sid_31",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_7",
[
"sid_9",
"Conv2dOutput_7",
"sid_8",
],
buffer_info_map,
)
_verify_conflicts(
"PaddedInput_5",
[
"sid_2",
"sid_19",
"sid_26",
"sid_4",
"Conv2dOutput_5",
"sid_25",
],
buffer_info_map,
)
# check sizes
assert buffer_info_map["sid_20"].size_bytes == 150528
assert buffer_info_map["tensor_2"].size_bytes == 200704
assert buffer_info_map["sid_5"].size_bytes == 602112
assert buffer_info_map["sid_9"].size_bytes == 301056
assert buffer_info_map["Conv2dOutput_3"].size_bytes == 4
assert buffer_info_map["sid_26"].size_bytes == 25088
assert buffer_info_map["Conv2dOutput_2"].size_bytes == 256
assert buffer_info_map["PaddedInput_5"].size_bytes == 28800
assert buffer_info_map["sid_8"].size_bytes == 802816
assert buffer_info_map["Conv2dOutput_5"].size_bytes == 4
assert buffer_info_map["sid_3"].size_bytes == 301056
assert buffer_info_map["Conv2dOutput"].size_bytes == 256
assert buffer_info_map["PaddedInput_3"].size_bytes == 301056
assert buffer_info_map["sid_32"].size_bytes == 301056
assert buffer_info_map["PaddedInput_8"].size_bytes == 430592
assert buffer_info_map["sid_4"].size_bytes == 150528
assert buffer_info_map["PaddedInput_7"].size_bytes == 314646
assert buffer_info_map["sid_6"].size_bytes == 401408
assert buffer_info_map["Conv2dOutput_8"].size_bytes == 256
assert buffer_info_map["sid_25"].size_bytes == 25088
assert buffer_info_map["PaddedInput"].size_bytes == 401408
assert buffer_info_map["sid_7"].size_bytes == 401408
assert buffer_info_map["Conv2dOutput_1"].size_bytes == 4
assert buffer_info_map["Conv2dOutput_4"].size_bytes == 4
assert buffer_info_map["PaddedInput_2"].size_bytes == 301056
assert buffer_info_map["sid_31"].size_bytes == 25088
assert buffer_info_map["PaddedInput_1"].size_bytes == 301056
assert buffer_info_map["Conv2dOutput_6"].size_bytes == 256
assert buffer_info_map["PaddedInput_4"].size_bytes == 301056
assert buffer_info_map["sid_2"].size_bytes == 50176
assert buffer_info_map["tensor_3"].size_bytes == 150528
assert buffer_info_map["Conv2dOutput_7"].size_bytes == 256
assert buffer_info_map["sid_19"].size_bytes == 100352
assert buffer_info_map["PaddedInput_6"].size_bytes == 172800
# fmt: off
@tvm.script.ir_module
class MultipleCallsToSamePrimFuncModule:
@T.prim_func
def tvmgen_default_fused_layout_transform_1(placeholder: T.handle, T_layout_trans: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_layout_transform_1", "tir.noalias": True})
placeholder_1 = T.match_buffer(placeholder, [864], dtype="float32")
T_layout_trans_1 = T.match_buffer(T_layout_trans, [41], dtype="float32")
# body
for ax0_ax1_fused_ax2_fused, ax3, ax4_inner in T.grid(24, 12, 3):
T_layout_trans_1[ax0_ax1_fused_ax2_fused * 36 + ax3 * 3 + ax4_inner] = placeholder_1[ax4_inner * 288 + ax0_ax1_fused_ax2_fused * 12 + ax3]
@T.prim_func
def tvmgen_default_fused_nn_contrib_conv2d_NCHWc(placeholder_2: T.handle, placeholder_3: T.handle, conv2d_NCHWc: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_nn_contrib_conv2d_NCHWc", "tir.noalias": True})
placeholder_4 = T.match_buffer(placeholder_2, [864], dtype="float32")
placeholder_5 = T.match_buffer(placeholder_3, [81], dtype="float32")
conv2d_NCHWc_1 = T.match_buffer(conv2d_NCHWc, [41], dtype="float32")
# body
data_pad = T.decl_buffer([1092], "float32")
for i0_i1_fused_i2_fused, i3, i4 in T.grid(26, 14, 3):
data_pad[i0_i1_fused_i2_fused * 42 + i3 * 3 + i4] = T.if_then_else(1 <= i0_i1_fused_i2_fused and i0_i1_fused_i2_fused < 25 and 1 <= i3 and i3 < 13, placeholder_4[i0_i1_fused_i2_fused * 36 + i3 * 3 + i4 - 39], T.float32(0), dtype="float32")
for n_oc_chunk_fused_oh_fused in T.serial(0, 24):
conv2d_NCHWc_global = T.decl_buffer([36], "float32")
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 3] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 6] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 9] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 12] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 15] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 18] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 21] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 24] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 27] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 30] = T.float32(0)
for oc_block_c_init in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c_init + 33] = T.float32(0)
for kh, kw, ic_inner in T.grid(3, 3, 3):
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c] = conv2d_NCHWc_global[oc_block_c] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 3] = conv2d_NCHWc_global[oc_block_c + 3] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 3] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 6] = conv2d_NCHWc_global[oc_block_c + 6] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 6] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 9] = conv2d_NCHWc_global[oc_block_c + 9] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 9] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 12] = conv2d_NCHWc_global[oc_block_c + 12] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 12] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 15] = conv2d_NCHWc_global[oc_block_c + 15] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 15] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 18] = conv2d_NCHWc_global[oc_block_c + 18] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 18] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 21] = conv2d_NCHWc_global[oc_block_c + 21] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 21] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 24] = conv2d_NCHWc_global[oc_block_c + 24] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 24] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 27] = conv2d_NCHWc_global[oc_block_c + 27] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 27] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 30] = conv2d_NCHWc_global[oc_block_c + 30] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 30] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for oc_block_c in T.serial(0, 3):
conv2d_NCHWc_global[oc_block_c + 33] = conv2d_NCHWc_global[oc_block_c + 33] + data_pad[kh * 42 + n_oc_chunk_fused_oh_fused * 42 + kw * 3 + ic_inner + 33] * placeholder_5[kh * 27 + kw * 9 + ic_inner * 3 + oc_block_c]
for ow_inner, oc_block in T.grid(12, 3):
conv2d_NCHWc_1[n_oc_chunk_fused_oh_fused * 36 + ow_inner * 3 + oc_block] = conv2d_NCHWc_global[ow_inner * 3 + oc_block]
@T.prim_func
def tvmgen_default_fused_nn_softmax_add_add_multiply_add(placeholder_6: T.handle, placeholder_7: T.handle, placeholder_8: T.handle, placeholder_9: T.handle, placeholder_10: T.handle, T_add: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_nn_softmax_add_add_multiply_add", "tir.noalias": True})
placeholder_11 = T.match_buffer(placeholder_6, [864], dtype="float32")
placeholder_12 = T.match_buffer(placeholder_7, [864], dtype="float32")
placeholder_13 = T.match_buffer(placeholder_8, [3], dtype="float32")
placeholder_14 = T.match_buffer(placeholder_9, [3], dtype="float32")
placeholder_15 = T.match_buffer(placeholder_10, [3], dtype="float32")
T_add_1 = T.match_buffer(T_add, [864], dtype="float32")
# body
for ax0_ax1_fused_ax2_fused in T.serial(0, 72):
T_softmax_norm = T.decl_buffer([12], "float32")
with T.decl_buffer([1], "float32") as T_softmax_maxelem:
T_softmax_maxelem[0] = T.float32(-3.4028234663852886e+38)
for k in T.serial(0, 12):
T_softmax_maxelem[0] = T.max(T_softmax_maxelem[0], placeholder_11[ax0_ax1_fused_ax2_fused * 12 + k])
T_softmax_exp = T.decl_buffer([12], "float32")
for i3 in T.serial(0, 12):
T_softmax_exp[i3] = T.exp(placeholder_11[ax0_ax1_fused_ax2_fused * 12 + i3] - T_softmax_maxelem[0], dtype="float32")
T_softmax_expsum = T.decl_buffer([1], "float32")
T_softmax_expsum[0] = T.float32(0)
for k in T.serial(0, 12):
T_softmax_expsum[0] = T_softmax_expsum[0] + T_softmax_exp[k]
for i3 in T.serial(0, 12):
T_softmax_norm[i3] = T_softmax_exp[i3] / T_softmax_expsum[0]
for ax3 in T.serial(0, 12):
T_add_1[ax0_ax1_fused_ax2_fused * 12 + ax3] = (placeholder_12[ax0_ax1_fused_ax2_fused * 12 + ax3] + T_softmax_norm[ax3] + placeholder_13[T.floordiv(ax0_ax1_fused_ax2_fused, 24)]) * placeholder_14[T.floordiv(ax0_ax1_fused_ax2_fused, 24)] + placeholder_15[T.floordiv(ax0_ax1_fused_ax2_fused, 24)]
@T.prim_func
def tvmgen_default_fused_nn_contrib_dense_pack_nn_relu(placeholder_16: T.handle, placeholder_17: T.handle, T_relu: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_nn_contrib_dense_pack_nn_relu", "tir.noalias": True})
placeholder_18 = T.match_buffer(placeholder_16, [864], dtype="float32")
placeholder_19 = T.match_buffer(placeholder_17, [144], dtype="float32")
T_relu_1 = T.match_buffer(T_relu, [864], dtype="float32")
# body
for ax1_outer_ax0_outer_fused in T.serial(0, 18):
compute = T.decl_buffer([48], "float32")
with T.decl_buffer([48], "float32") as compute_global:
for x_c_init in T.serial(0, 6):
compute_global[x_c_init] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 6] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 12] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 18] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 24] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 30] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 36] = T.float32(0)
for x_c_init in T.serial(0, 6):
compute_global[x_c_init + 42] = T.float32(0)
for k_outer in T.serial(0, 12):
for x_c in T.serial(0, 6):
compute_global[x_c] = compute_global[x_c] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 6] = compute_global[x_c + 6] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 12] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 12] = compute_global[x_c + 12] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 24] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 18] = compute_global[x_c + 18] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 36] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 24] = compute_global[x_c + 24] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 48] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 30] = compute_global[x_c + 30] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 60] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 36] = compute_global[x_c + 36] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 72] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_c in T.serial(0, 6):
compute_global[x_c + 42] = compute_global[x_c + 42] + placeholder_18[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + k_outer + 84] * placeholder_19[T.floordiv(ax1_outer_ax0_outer_fused, 9) * 72 + k_outer * 6 + x_c]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner] = compute_global[x_inner_inner]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 6] = compute_global[x_inner_inner + 6]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 12] = compute_global[x_inner_inner + 12]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 18] = compute_global[x_inner_inner + 18]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 24] = compute_global[x_inner_inner + 24]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 30] = compute_global[x_inner_inner + 30]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 36] = compute_global[x_inner_inner + 36]
for x_inner_inner in T.serial(0, 6):
compute[x_inner_inner + 42] = compute_global[x_inner_inner + 42]
for ax0_inner_inner, ax1_inner_inner in T.grid(8, 6):
T_relu_1[T.floormod(ax1_outer_ax0_outer_fused, 9) * 96 + ax0_inner_inner * 12 + T.floordiv(ax1_outer_ax0_outer_fused, 9) * 6 + ax1_inner_inner] = T.max(compute[ax0_inner_inner * 6 + ax1_inner_inner], T.float32(0))
@T.prim_func
def tvmgen_default_fused_reshape_1(placeholder_20: T.handle, T_reshape: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_reshape_1", "tir.noalias": True})
placeholder_21 = T.match_buffer(placeholder_20, [864], dtype="float32")
T_reshape_1 = T.match_buffer(T_reshape, [864], dtype="float32")
# body
for ax0, ax1_inner in T.grid(72, 12):
T_reshape_1[ax0 * 12 + ax1_inner] = placeholder_21[ax0 * 12 + ax1_inner]
@T.prim_func
def tvmgen_default_fused_layout_transform(placeholder_22: T.handle, T_layout_trans_2: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_layout_transform", "tir.noalias": True})
placeholder_23 = T.match_buffer(placeholder_22, [864], dtype="float32")
T_layout_trans_3 = T.match_buffer(T_layout_trans_2, [864], dtype="float32")
# body
for ax0_ax1_fused, ax2, ax3_inner in T.grid(3, 24, 12):
T_layout_trans_3[ax0_ax1_fused * 288 + ax2 * 12 + ax3_inner] = placeholder_23[ax2 * 36 + ax3_inner * 3 + ax0_ax1_fused]
@T.prim_func
def tvmgen_default_fused_reshape(placeholder_24: T.handle, T_reshape_2: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_reshape", "tir.noalias": True})
placeholder_25 = T.match_buffer(placeholder_24, [864], dtype="float32")
T_reshape_3 = T.match_buffer(T_reshape_2, [864], dtype="float32")
# body
for ax0_ax1_fused, ax2, ax3_inner in T.grid(3, 24, 12):
T_reshape_3[ax0_ax1_fused * 288 + ax2 * 12 + ax3_inner] = placeholder_25[ax0_ax1_fused * 288 + ax2 * 12 + ax3_inner]
@T.prim_func
def tvmgen_default_fused_nn_softmax_add(placeholder_26: T.handle, placeholder_27: T.handle, T_add_2: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "tvmgen_default_fused_nn_softmax_add", "tir.noalias": True})
placeholder_28 = T.match_buffer(placeholder_26, [864], dtype="float32")
placeholder_29 = T.match_buffer(placeholder_27, [864], dtype="float32")
T_add_3 = T.match_buffer(T_add_2, [864], dtype="float32")
# body
for ax0_ax1_fused_ax2_fused in T.serial(0, 72):
T_softmax_norm = T.decl_buffer([12], "float32")
with T.decl_buffer([1], "float32") as T_softmax_maxelem:
T_softmax_maxelem[0] = T.float32(-3.4028234663852886e+38)
for k in T.serial(0, 12):
T_softmax_maxelem[0] = T.max(T_softmax_maxelem[0], placeholder_28[ax0_ax1_fused_ax2_fused * 12 + k])
T_softmax_exp= T.decl_buffer([12], "float32")
for i3 in T.serial(0, 12):
T_softmax_exp[i3] = T.exp(placeholder_28[ax0_ax1_fused_ax2_fused * 12 + i3] - T_softmax_maxelem[0], dtype="float32")
T_softmax_expsum = T.decl_buffer([1], "float32")
T_softmax_expsum[0] = T.float32(0)
for k in T.serial(0, 12):
T_softmax_expsum[0] = T_softmax_expsum[0] + T_softmax_exp[k]
for i3 in T.serial(0, 12):
T_softmax_norm[i3] = T_softmax_exp[i3] / T_softmax_expsum[0]
for ax3 in T.serial(0, 12):
T_add_3[ax0_ax1_fused_ax2_fused * 12 + ax3] = placeholder_29[ax0_ax1_fused_ax2_fused * 12 + ax3] + T_softmax_norm[ax3]
@T.prim_func
def run_model(data: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "tvmgen_default_run_model", "runner_function": True})
data_buffer = T.match_buffer(data, [864], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [864], dtype="float32", align=16)
# body
sid_11 = T.allocate([3456], "int8", "global.workspace")
sid_5 = T.allocate([3456], "int8", "global.workspace")
sid_10 = T.allocate([3456], "int8", "global.workspace")
sid_6 = T.allocate([3456], "int8", "global.workspace")
sid_8 = T.allocate([3456], "int8", "global.workspace")
sid_2 = T.allocate([3456], "int8", "global.workspace")
sid_7 = T.allocate([3456], "int8", "global.workspace")
sid_3 = T.allocate([3456], "int8", "global.workspace")
sid_12 = T.allocate([3456], "int8", "global.workspace")
sid_4 = T.allocate([3456], "int8", "global.workspace")
sid_18 = T.allocate([3456], "int8", "global.workspace")
sid_19 = T.allocate([3456], "int8", "global.workspace")
sid_20 = T.allocate([3456], "int8", "global.workspace")
sid_21 = T.allocate_const([0,1,2,3,4,5,6,7,8,9], "int8", [10])
sid_22 = T.allocate_const([1], "int8", [1])
sid_23 = T.allocate_const([2,1], "int8", [3456])
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_layout_transform_1", data_buffer.data, sid_23, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_nn_contrib_conv2d_NCHWc", sid_8, T.cast(T.lookup_param("p0", dtype="handle"), "handle"), sid_7, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_layout_transform", sid_7, sid_6, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_reshape_1", data_buffer.data, sid_12, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_nn_contrib_dense_pack_nn_relu", sid_12, T.cast(T.lookup_param("p1", dtype="handle"), "handle"), sid_11, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_reshape", sid_11, sid_10, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_nn_softmax_add_add_multiply_add", sid_6, sid_10, T.cast(T.lookup_param("p2", dtype="handle"), "handle"), T.cast(T.lookup_param("p3", dtype="handle"), "handle"), T.cast(T.lookup_param("p4", dtype="handle"), "handle"), sid_5, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_layout_transform_1", sid_5, sid_4, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_nn_contrib_conv2d_NCHWc", sid_4, T.cast(T.lookup_param("p5", dtype="handle"), "handle"), sid_3, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_layout_transform", sid_3, sid_2, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_reshape_1", sid_5, sid_20, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_nn_contrib_dense_pack_nn_relu", sid_20, T.cast(T.lookup_param("p6", dtype="handle"), "handle"), sid_19, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_reshape", sid_19, sid_18, dtype="int32"))
T.evaluate(T.tvm_call_cpacked("tvmgen_default_fused_nn_softmax_add", sid_2, sid_18, output_buffer.data, dtype="int32"))
# fmt: on
def test_multiple_calls_to_same_primfunc():
target = Target("c")
global_ws_pool = WorkspacePoolInfo(
pool_name="global_workspace",
targets=[target],
)
global_const_pool = ConstantPoolInfo(
pool_name="global_constants",
targets=[target],
)
tir_mod = MultipleCallsToSamePrimFuncModule
tir_mod = _assign_targets_to_primfuncs_irmodule(tir_mod, target)
tir_mod = _assign_poolinfos_to_allocates_in_irmodule(
tir_mod, [global_ws_pool], [global_const_pool]
)
main_func = tir_mod["run_model"]
buffer_info_analysis = tvm.tir.usmp.analysis.extract_buffer_info(main_func, tir_mod)
assert buffer_info_analysis.memory_pressure == 11424
buffer_info_map = _replace_stmt_with_buf_var_names(buffer_info_analysis.buffer_info_stmts)
# check conflicts
_verify_conflicts("sid_23", ["sid_22", "sid_21"], buffer_info_map)
_verify_conflicts(
"sid_6",
[
"sid_7",
"sid_12",
"compute",
"compute_global",
"sid_11",
"sid_10",
"T_softmax_exp",
"T_softmax_maxelem",
"sid_5",
"T_softmax_norm",
"T_softmax_expsum",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_exp",
[
"sid_10",
"sid_6",
"T_softmax_maxelem",
"sid_5",
"T_softmax_norm",
"T_softmax_expsum",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_expsum2",
[
"T_softmax_exp2",
"T_softmax_norm2",
"sid_18",
"T_softmax_maxelem2",
"sid_2",
],
buffer_info_map,
)
_verify_conflicts(
"compute",
[
"sid_12",
"sid_6",
"compute_global",
"sid_11",
"sid_19",
"sid_20",
"sid_2",
"compute_global",
],
buffer_info_map,
)
_verify_conflicts(
"compute_global",
[
"compute",
"sid_12",
"sid_6",
"sid_11",
"compute",
"sid_19",
"sid_20",
"sid_2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_10",
[
"sid_11",
"sid_6",
"T_softmax_exp",
"T_softmax_maxelem",
"sid_5",
"T_softmax_norm",
"T_softmax_expsum",
],
buffer_info_map,
)
_verify_conflicts(
"sid_2",
[
"sid_3",
"sid_5",
"sid_20",
"sid_19",
"compute",
"compute_global",
"sid_18",
"T_softmax_norm2",
"T_softmax_exp2",
"T_softmax_maxelem2",
"T_softmax_expsum2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_5",
[
"T_softmax_maxelem",
"sid_10",
"T_softmax_exp",
"sid_6",
"T_softmax_norm",
"T_softmax_expsum",
"sid_4",
"data_pad",
"sid_3",
"conv2d_NCHWc_global",
"sid_2",
"sid_20",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_norm2",
[
"sid_18",
"sid_2",
"T_softmax_exp2",
"T_softmax_maxelem2",
"T_softmax_expsum2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_20",
[
"sid_2",
"sid_5",
"sid_19",
"compute",
"compute_global",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_expsum",
[
"sid_5",
"T_softmax_norm",
"T_softmax_maxelem",
"sid_10",
"T_softmax_exp",
"sid_6",
],
buffer_info_map,
)
_verify_conflicts(
"data_pad",
[
"sid_8",
"conv2d_NCHWc_global",
"sid_7",
"sid_4",
"sid_5",
"sid_3",
"conv2d_NCHWc_global",
],
buffer_info_map,
)
_verify_conflicts(
"sid_19",
[
"sid_20",
"sid_2",
"compute",
"compute_global",
"sid_18",
],
buffer_info_map,
)
_verify_conflicts(
"conv2d_NCHWc_global",
[
"data_pad",
"sid_7",
"sid_3",
"data_pad",
"sid_5",
],
buffer_info_map,
)
_verify_conflicts(
"sid_18",
[
"sid_19",
"sid_2",
"T_softmax_norm2",
"T_softmax_exp2",
"T_softmax_maxelem2",
"T_softmax_expsum2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_7",
[
"conv2d_NCHWc_global",
"data_pad",
"sid_6",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_exp2",
[
"T_softmax_norm2",
"sid_18",
"sid_2",
"T_softmax_maxelem2",
"T_softmax_expsum2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_4",
[
"sid_5",
"data_pad",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_maxelem",
[
"sid_10",
"T_softmax_exp",
"sid_6",
"sid_5",
"T_softmax_norm",
"T_softmax_expsum",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_maxelem2",
[
"T_softmax_exp2",
"T_softmax_norm2",
"sid_18",
"sid_2",
"T_softmax_expsum2",
],
buffer_info_map,
)
_verify_conflicts(
"sid_11",
[
"compute",
"sid_12",
"compute_global",
"sid_6",
"sid_10",
],
buffer_info_map,
)
_verify_conflicts(
"sid_12",
[
"sid_6",
"compute",
"compute_global",
"sid_11",
],
buffer_info_map,
)
_verify_conflicts(
"T_softmax_norm",
[
"sid_5",
"T_softmax_maxelem",
"sid_10",
"T_softmax_exp",
"sid_6",
"T_softmax_expsum",
],
buffer_info_map,
)
_verify_conflicts(
"sid_8",
[
"data_pad",
],
buffer_info_map,
)
if __name__ == "__main__":
tvm.testing.main()
| 88,475 | 51.229044 | 659 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_reorder.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def elementwise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i, j, k, l in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_not_affine(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i, j, k, l in T.grid(128, 128, 128, 8):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
vl = T.axis.S(128, l * 16)
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_dependent_loop(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i in T.serial(0, 128):
for j, k, l in T.grid(128, i, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for i, j, k, l in T.grid(128, 128, 128, 128):
with T.block("B"):
T.where(i * 2097152 + j * 16384 + k * 128 + l < 100)
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_non_single_branch(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
C = T.alloc_buffer((128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
for k in T.serial(0, 128):
with T.block("C"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
C[vi, vj, vk] = A[vi, vj, vk] * 2.0
for k in T.serial(0, 128):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = C[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_loops_not_same_scope(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j in T.grid(128, 128):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
for k in T.serial(0, 128):
with T.block("B"):
vk = T.axis.S(128, k)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_with_wrong_block_var_type(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128))
B = T.match_buffer(b, (128, 128, 128))
for i, j, k in T.grid(128, 128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
vk = T.axis.scan(128, k)
T.reads([A[vi, vj, vk]])
T.writes([B[vi, vj, vk]])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def elementwise_reordered(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for l, j, k, i in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_reordered2(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for k, j, i, l in T.grid(128, 128, 128, 128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def elementwise_reordered_with_predicate(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128, 128, 128))
B = T.match_buffer(b, (128, 128, 128, 128))
for l, j, k, i in T.grid(128, 128, 128, 128):
with T.block("B"):
T.where(i * 2097152 + j * 16384 + k * 128 + l < 100)
vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l])
B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0
@T.prim_func
def opaque_access(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16], "float32")
B = T.match_buffer(b, [16, 16], "float32")
for i, j in T.grid(16, 16):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([A[0:16, 0:16]])
A[vi, vj] = 1
for i, j in T.grid(16, 16):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([B[0:16, 0:16]])
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, vi * 16 + vj, dtype="handle"))
@T.prim_func
def opaque_access_reorder(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [16, 16], "float32")
B = T.match_buffer(b, [16, 16], "float32")
for j, i in T.grid(16, 16):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([A[0:16, 0:16]])
A[vi, vj] = 1
for j, i in T.grid(16, 16):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([])
T.writes([B[0:16, 0:16]])
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, vi * 16 + vj, dtype="handle"))
# pylint: enable=no-member,invalid-name,unused-variable
def test_reorder():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
sch.reorder(l, i)
tvm.ir.assert_structural_equal(elementwise_reordered, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_reorder2():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
sch.reorder(k, i, l)
tvm.ir.assert_structural_equal(elementwise_reordered2, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_reorder_with_opaque_access():
sch = tir.Schedule(opaque_access, debug_mask="all")
block_a = sch.get_block("A")
i, j = sch.get_loops(block_a)
sch.reorder(j, i)
block_b = sch.get_block("B")
i, j = sch.get_loops(block_b)
sch.reorder(j, i)
tvm.ir.assert_structural_equal(opaque_access_reorder, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=opaque_access)
def test_reorder_overlapped_access():
@T.prim_func
def overlapped_access(A: T.Buffer((14, 4), "float32"), B: T.Buffer((14, 4), "float32")):
# example to write first axis multiple times
for v0, v1, v2 in T.grid(6, 4, 4):
with T.block("block"):
i = T.axis.spatial(14, v0 * 2 + v1)
j = T.axis.spatial(4, v2)
B[i, j] = A[i, j] + 1.0
@T.prim_func
def overlapped_access_reorder(A: T.Buffer((14, 4), "float32"), B: T.Buffer((14, 4), "float32")):
# example to write first axis multiple times
for v0, v2, v1 in T.grid(6, 4, 4):
with T.block("block"):
i = T.axis.spatial(14, v0 * 2 + v1)
j = T.axis.spatial(4, v2)
B[i, j] = A[i, j] + 1.0
sch = tir.Schedule(overlapped_access, debug_mask="all")
v0, v1, v2 = sch.get_loops(sch.get_block("block"))
sch.reorder(v0, v2, v1)
tvm.ir.assert_structural_equal(overlapped_access_reorder, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=overlapped_access)
def test_reorder_with_partial_affineness():
@T.prim_func
def non_affine_func(A: T.Buffer((14, 4), "float32"), B: T.Buffer((14, 4), "float32")):
for v0, v1, v2 in T.grid(6, 4, 4):
with T.block("block"):
i = T.axis.spatial(14, v0 * v0 + v1)
j = T.axis.spatial(4, v2)
B[i, j] = A[i, j] + 1.0
@T.prim_func
def non_affine_func_reorder(A: T.Buffer((14, 4), "float32"), B: T.Buffer((14, 4), "float32")):
for v0, v2, v1 in T.grid(6, 4, 4):
with T.block("block"):
i = T.axis.spatial(14, v0 * v0 + v1)
j = T.axis.spatial(4, v2)
B[i, j] = A[i, j] + 1.0
sch = tir.Schedule(non_affine_func, debug_mask="all")
v0, v1, v2 = sch.get_loops(sch.get_block("block"))
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(v0, v2, v1)
sch.reorder(v2, v1)
tvm.ir.assert_structural_equal(non_affine_func_reorder, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=non_affine_func)
def test_reorder_with_cascade_tiled_ops():
@T.prim_func
def cascade_pool_ops(
x: T.Buffer((1, 16, 112, 112), "float32"), y2: T.Buffer((1, 16, 108, 108), "float32")
) -> None:
y1 = T.alloc_buffer([1, 16, 110, 110], dtype="float32")
for n, c, h, w, kh, kw in T.grid(1, 16, 110, 110, 3, 3):
with T.block("pool_0"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [n, c, h, w, kh, kw])
with T.init():
y1[ax0, ax1, ax2, ax3] = 0.0
y1[ax0, ax1, ax2, ax3] = y1[ax0, ax1, ax2, ax3] + x[ax0, ax1, ax2 + rv0, ax3 + rv1]
for n, c, h, w, kh, kw in T.grid(1, 16, 108, 108, 3, 3):
with T.block("pool_1"):
ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [n, c, h, w, kh, kw])
with T.init():
y2[ax0, ax1, ax2, ax3] = 0.0
y2[ax0, ax1, ax2, ax3] = y2[ax0, ax1, ax2, ax3] + y1[ax0, ax1, ax2 + rv0, ax3 + rv1]
@T.prim_func
def cascade_pool_ops_tile_reordered(
x: T.Buffer((1, 16, 112, 112), "float32"), y2: T.Buffer((1, 16, 108, 108), "float32")
) -> None:
y1 = T.alloc_buffer([1, 16, 110, 110], dtype="float32")
for n, c, h_o in T.grid(1, 16, 27):
for w, h_i, kh, kw in T.grid(110, 6, 3, 3):
with T.block("pool_0"):
ax0 = T.axis.spatial(1, 0)
ax1 = T.axis.spatial(16, c)
ax2 = T.axis.spatial(110, h_o * 4 + h_i)
ax3, rv0, rv1 = T.axis.remap("SRR", [w, kh, kw])
with T.init():
y1[ax0, ax1, ax2, ax3] = 0.0
y1[ax0, ax1, ax2, ax3] = (
y1[ax0, ax1, ax2, ax3] + x[ax0, ax1, ax2 + rv0, ax3 + rv1]
)
for h_i, w, kh, kw in T.grid(4, 108, 3, 3):
with T.block("pool_1"):
ax0 = T.axis.spatial(1, n)
ax1 = T.axis.spatial(16, c)
ax2 = T.axis.spatial(108, h_o * 4 + h_i)
ax3, rv0, rv1 = T.axis.remap("SRR", [w, kh, kw])
with T.init():
y2[ax0, ax1, ax2, ax3] = 0.0
y2[ax0, ax1, ax2, ax3] = (
y2[ax0, ax1, ax2, ax3] + y1[ax0, ax1, ax2 + rv0, ax3 + rv1]
)
sch = tvm.tir.schedule.Schedule(cascade_pool_ops)
pool_0 = sch.get_block("pool_0")
pool_1 = sch.get_block("pool_1")
_, _, h, w, _, _ = sch.get_loops(pool_1)
ho, _ = sch.split(h, factors=[None, 4])
sch.compute_at(pool_0, ho)
_, _, _, h_i, w, _, _ = sch.get_loops(pool_0)
sch.reorder(w, h_i)
tvm.ir.assert_structural_equal(cascade_pool_ops_tile_reordered, sch.mod["main"], True)
verify_trace_roundtrip(sch=sch, mod=cascade_pool_ops)
def test_reorder_with_predicate():
sch = tir.Schedule(elementwise_predicate, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(l, i)
def test_reorder_fail_with_multi_appearance_loops():
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k, i, i)
def test_reorder_fail_with_non_single_branch_loop():
sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k, i)
sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all")
block_b = sch.get_block("B")
block_c = sch.get_block("C")
i, j, k1 = sch.get_loops(block_b)
_, _, k2 = sch.get_loops(block_c)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k1, i, k2)
def test_reorder_fail_with_loops_not_under_same_scope():
sch = tir.Schedule(elementwise_with_loops_not_same_scope, debug_mask="all")
block_b = sch.get_block("B")
block_a = sch.get_block("A")
i, j = sch.get_loops(block_a)
k = sch.get_loops(block_b)[0]
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k, i)
def test_reorder_fail_with_wrong_block_var_type():
sch = tir.Schedule(elementwise_with_wrong_block_var_type, debug_mask="all")
block_b = sch.get_block("B")
i, j, k = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(k, i)
def test_reorder_fail_with_dependent_loops():
sch = tir.Schedule(elementwise_dependent_loop, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(l, i)
def test_reorder_fail_not_affine_bindings():
sch = tir.Schedule(elementwise_not_affine, debug_mask="all")
block_b = sch.get_block("B")
i, j, k, l = sch.get_loops(block_b)
with pytest.raises(tvm.tir.ScheduleError):
sch.reorder(l, i)
if __name__ == "__main__":
tvm.testing.main()
| 15,068 | 36.957179 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_postproc_verify_gpu_code.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import pytest
import tvm
import tvm.testing
from tvm import meta_schedule as ms
from tvm import tir
from tvm.script import tir as T
from tvm.target import Target
def _target() -> Target:
return Target("nvidia/geforce-rtx-3080")
def _create_context(mod, target) -> ms.TuneContext:
return ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[ms.postproc.VerifyGPUCode()],
mutator_probs={},
),
task_name="test",
)
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument,not-callable,misplaced-comparison-constant
# fmt: off
@tvm.script.ir_module
class Conv2dCuda0:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "T.noalias": True})
# var definition
threadIdx_x = T.env_thread("threadIdx.x")
threadIdx_y = T.env_thread("threadIdx.y")
blockIdx_x = T.env_thread("blockIdx.x")
blockIdx_y = T.env_thread("blockIdx.y")
blockIdx_z = T.env_thread("blockIdx.z")
A = T.match_buffer(a, [14*14*256*256], dtype="float32")
B = T.match_buffer(b, [14*14*512*256], dtype="float32")
# body
T.launch_thread(blockIdx_z, 196)
B_local = T.decl_buffer([64], "float32", scope="local")
Apad_shared = T.decl_buffer([512], "float32", scope="shared")
Apad_shared_local = T.decl_buffer([8], "float32", scope="local")
T.launch_thread(blockIdx_y, 8)
T.launch_thread(blockIdx_x, 4)
T.launch_thread(threadIdx_y, 8)
T.launch_thread(threadIdx_x, 8)
for ff_c_init, nn_c_init in T.grid(8, 8):
B_local[ff_c_init * 8 + nn_c_init] = T.float32(0)
for rc_outer, ry, rx in T.grid(32, 3, 3):
for ax3_inner_outer in T.serial(0, 2):
Apad_shared[T.ramp(threadIdx_y * 64 + threadIdx_x * 8 + ax3_inner_outer * 4, 1, 4)] = T.if_then_else(
1 <= blockIdx_z // 14 + ry and blockIdx_z // 14 + ry < 15 and 1 <= rx + blockIdx_z % 14 and rx + blockIdx_z % 14 < 15,
A[T.ramp(ry * 917504 + blockIdx_z * 65536 + rx * 65536 + rc_outer * 2048 + threadIdx_y * 256 + blockIdx_x * 64 + threadIdx_x * 8 + ax3_inner_outer * 4 - 983040, 1, 4)],
T.broadcast(T.float32(0), 4),
dtype="float32x4",
)
for rc_inner in T.serial(0, 8):
for ax3 in T.serial(0, 8):
Apad_shared_local[ax3] = Apad_shared[rc_inner * 64 + threadIdx_x * 8 + ax3]
for ff_c, nn_c in T.grid(8, 8):
B_local[ff_c * 8 + nn_c] = B_local[ff_c * 8 + nn_c] + Apad_shared_local[nn_c]
for ff_inner_inner_inner, nn_inner_inner_inner in T.grid(8, 8):
B[blockIdx_z * 131072 + blockIdx_y * 16384 + threadIdx_y * 2048 + ff_inner_inner_inner * 256 + blockIdx_x * 64 + threadIdx_x * 8 + nn_inner_inner_inner] = B_local[ff_inner_inner_inner * 8 + nn_inner_inner_inner] # fmt: on
@tvm.script.ir_module
class Conv2dCuda1:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "T.noalias": True})
# var definition
threadIdx_x = T.env_thread("threadIdx.x")
threadIdx_y = T.env_thread("threadIdx.y")
blockIdx_x = T.env_thread("blockIdx.x")
blockIdx_y = T.env_thread("blockIdx.y")
blockIdx_z = T.env_thread("blockIdx.z")
A = T.match_buffer(a, [14*14*256*256], dtype="float32")
B = T.match_buffer(b, [14*14*512*256], dtype="float32")
# body
T.launch_thread(blockIdx_z, 196)
B_local = T.decl_buffer([6400000], "float32", scope="local")
Apad_shared = T.decl_buffer([512], "float32", scope="shared")
Apad_shared_local = T.decl_buffer([8], "float32", scope="local")
T.launch_thread(blockIdx_y, 8)
T.launch_thread(blockIdx_x, 4)
T.launch_thread(threadIdx_y, 8)
T.launch_thread(threadIdx_x, 8)
for ff_c_init, nn_c_init in T.grid(8, 8):
B_local[ff_c_init * 8 + nn_c_init] = T.float32(0)
# Access of the last element of B_local prevents buffer
# compacting from reducing the amount of shared memory
# used.
B_local[6400000-1 + ff_c_init*8] = 0.0
for rc_outer, ry, rx in T.grid(32, 3, 3):
for ax3_inner_outer in T.serial(0, 2):
Apad_shared[T.ramp(threadIdx_y * 64 + threadIdx_x * 8 + ax3_inner_outer * 4, 1, 4)] = T.if_then_else(
1 <= blockIdx_z // 14 + ry and blockIdx_z // 14 + ry < 15 and 1 <= rx + blockIdx_z % 14 and rx + blockIdx_z % 14 < 15,
A[T.ramp(ry * 917504 + blockIdx_z * 65536 + rx * 65536 + rc_outer * 2048 + threadIdx_y * 256 + blockIdx_x * 64 + threadIdx_x * 8 + ax3_inner_outer * 4 - 983040, 1, 4)],
T.broadcast(T.float32(0), 4),
dtype="float32x4",
)
for rc_inner in T.serial(0, 8):
for ax3 in T.serial(0, 8):
Apad_shared_local[ax3] = Apad_shared[rc_inner * 64 + threadIdx_x * 8 + ax3]
for ff_c, nn_c in T.grid(8, 8):
B_local[ff_c * 8 + nn_c] = B_local[ff_c * 8 + nn_c] + Apad_shared_local[nn_c]
for ff_inner_inner_inner, nn_inner_inner_inner in T.grid(8, 8):
B[blockIdx_z * 131072 + blockIdx_y * 16384 + threadIdx_y * 2048 + ff_inner_inner_inner * 256 + blockIdx_x * 64 + threadIdx_x * 8 + nn_inner_inner_inner] = B_local[ff_inner_inner_inner * 8 + nn_inner_inner_inner]# fmt: on
@tvm.script.ir_module
class Conv2dCuda2:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "T.noalias": True})
# var definition
threadIdx_x = T.env_thread("threadIdx.x")
threadIdx_y = T.env_thread("threadIdx.y")
blockIdx_x = T.env_thread("blockIdx.x")
blockIdx_y = T.env_thread("blockIdx.y")
blockIdx_z = T.env_thread("blockIdx.z")
A = T.match_buffer(a, [14*14*256*256], dtype="float32")
B = T.match_buffer(b, [14*14*512*256], dtype="float32")
# body
T.launch_thread(blockIdx_z, 196)
B_local = T.decl_buffer([64], "float32", scope="local")
Apad_shared = T.decl_buffer([512000], "float32", scope="shared")
Apad_shared_local = T.decl_buffer([8], "float32", scope="local")
T.launch_thread(blockIdx_y, 8)
T.launch_thread(blockIdx_x, 4)
T.launch_thread(threadIdx_y, 8)
T.launch_thread(threadIdx_x, 8)
for ff_c_init, nn_c_init in T.grid(8, 8):
B_local[ff_c_init * 8 + nn_c_init] = T.float32(0)
for rc_outer, ry, rx in T.grid(32, 3, 3):
for ax3_inner_outer in T.serial(0, 2):
Apad_shared[T.ramp(threadIdx_y * 64 + threadIdx_x * 8 + ax3_inner_outer * 4, 1, 4)] = T.if_then_else(
1 <= blockIdx_z // 14 + ry and blockIdx_z // 14 + ry < 15 and 1 <= rx + blockIdx_z % 14 and rx + blockIdx_z % 14 < 15,
A[T.ramp(ry * 917504 + blockIdx_z * 65536 + rx * 65536 + rc_outer * 2048 + threadIdx_y * 256 + blockIdx_x * 64 + threadIdx_x * 8 + ax3_inner_outer * 4 - 983040, 1, 4)],
T.broadcast(T.float32(0), 4),
dtype="float32x4",
)
# Access of the last element of Apad_shared prevents
# buffer compacting from reducing the amount of shared
# memory used.
Apad_shared[512000-1] = 0.0
for rc_inner in T.serial(0, 8):
for ax3 in T.serial(0, 8):
Apad_shared_local[ax3] = Apad_shared[rc_inner * 64 + threadIdx_x * 8 + ax3]
for ff_c, nn_c in T.grid(8, 8):
B_local[ff_c * 8 + nn_c] = B_local[ff_c * 8 + nn_c] + Apad_shared_local[nn_c]
for ff_inner_inner_inner, nn_inner_inner_inner in T.grid(8, 8):
B[blockIdx_z * 131072 + blockIdx_y * 16384 + threadIdx_y * 2048 + ff_inner_inner_inner * 256 + blockIdx_x * 64 + threadIdx_x * 8 + nn_inner_inner_inner] = B_local[ff_inner_inner_inner * 8 + nn_inner_inner_inner]# fmt: on
@tvm.script.ir_module
class Conv2dCuda3:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "T.noalias": True})
# var definition
threadIdx_x = T.env_thread("threadIdx.x")
threadIdx_y = T.env_thread("threadIdx.y")
blockIdx_x = T.env_thread("blockIdx.x")
blockIdx_y = T.env_thread("blockIdx.y")
blockIdx_z = T.env_thread("blockIdx.z")
A = T.match_buffer(a, [14*14*256*256], dtype="float32")
B = T.match_buffer(b, [14*14*512*256], dtype="float32")
# body
T.launch_thread(blockIdx_z, 196)
B_local = T.decl_buffer([64], "float32", scope="local")
Apad_shared = T.decl_buffer([512], "float32", scope="shared")
Apad_shared_local = T.decl_buffer([8], "float32", scope="local")
T.launch_thread(blockIdx_y, 8)
T.launch_thread(blockIdx_x, 4)
T.launch_thread(threadIdx_y, 8)
T.launch_thread(threadIdx_x, 800000)
for ff_c_init, nn_c_init in T.grid(8, 8):
B_local[ff_c_init * 8 + nn_c_init] = T.float32(0)
for rc_outer, ry, rx in T.grid(32, 3, 3):
for ax3_inner_outer in T.serial(0, 2):
Apad_shared[T.ramp(threadIdx_y * 64 + threadIdx_x * 8 + ax3_inner_outer * 4, 1, 4)] = T.if_then_else(
1 <= blockIdx_z // 14 + ry and blockIdx_z // 14 + ry < 15 and 1 <= rx + blockIdx_z % 14 and rx + blockIdx_z % 14 < 15,
A[T.ramp(ry * 917504 + blockIdx_z * 65536 + rx * 65536 + rc_outer * 2048 + threadIdx_y * 256 + blockIdx_x * 64 + threadIdx_x * 8 + ax3_inner_outer * 4 - 983040, 1, 4)],
T.broadcast(T.float32(0), 4),
dtype="float32x4",
)
for rc_inner in T.serial(0, 8):
for ax3 in T.serial(0, 8):
Apad_shared_local[ax3] = Apad_shared[rc_inner * 64 + threadIdx_x * 8 + ax3]
for ff_c, nn_c in T.grid(8, 8):
B_local[ff_c * 8 + nn_c] = B_local[ff_c * 8 + nn_c] + Apad_shared_local[nn_c]
for ff_inner_inner_inner, nn_inner_inner_inner in T.grid(8, 8):
B[blockIdx_z * 131072 + blockIdx_y * 16384 + threadIdx_y * 2048 + ff_inner_inner_inner * 256 + blockIdx_x * 64 + threadIdx_x * 8 + nn_inner_inner_inner] = B_local[ff_inner_inner_inner * 8 + nn_inner_inner_inner]# fmt: on
@T.prim_func
def GmmCuda0(X: T.Buffer((1, 128, 128), "float32"), Y: T.Buffer((1, 128, 128), "float32"), Z: T.Buffer((1, 128, 128), "float32")) -> None:
Z_local = T.alloc_buffer([1, 128, 128], dtype="float32", scope="local")
X_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
Y_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_fused in T.thread_binding(16, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_fused in T.thread_binding(1, thread="vthread.x"):
for i0_2_i1_2_i2_2_fused in T.thread_binding(128, thread="threadIdx.x"):
for i1_3_init, i2_4_init in T.grid(4, 2):
with T.block("Z_init"):
b = T.axis.spatial(1, 0)
i = T.axis.spatial(128, i0_0_i1_0_i2_0_fused // 4 * 32 + i0_2_i1_2_i2_2_fused // 16 * 4 + i1_3_init)
j = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + i2_4_init)
T.reads()
T.writes(Z_local[b, i, j])
Z_local[b, i, j] = T.float32(0)
for i3_0 in T.serial(4):
for ax0_ax1_ax2_fused_0 in T.serial(4):
for ax0_ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
for ax0_ax1_ax2_fused_2 in T.vectorized(2):
with T.block("X_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused // 4 * 32 + (ax0_ax1_ax2_fused_0 * 256 + ax0_ax1_ax2_fused_1 * 2 + ax0_ax1_ax2_fused_2) // 32)
v2 = T.axis.spatial(128, i3_0 * 32 + (ax0_ax1_ax2_fused_0 * 256 + ax0_ax1_ax2_fused_1 * 2 + ax0_ax1_ax2_fused_2) % 32)
T.reads(X[v0, v1, v2])
T.writes(X_shared[v0, v1, v2])
X_shared[v0, v1, v2] = X[v0, v1, v2]
for ax0_ax1_ax2_fused_0 in T.serial(8):
for ax0_ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
with T.block("Y_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, i3_0 * 32 + (ax0_ax1_ax2_fused_0 * 128 + ax0_ax1_ax2_fused_1) // 32)
v2 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + (ax0_ax1_ax2_fused_0 * 128 + ax0_ax1_ax2_fused_1) % 32)
T.reads(Y[v0, v1, v2])
T.writes(Y_shared[v0, v1, v2])
Y_shared[v0, v1, v2] = Y[v0, v1, v2]
for i3_1, i0_3, i1_3, i2_3, i3_2, i0_4, i1_4, i2_4 in T.grid(1, 1, 4, 1, 32, 1, 1, 2):
with T.block("Z_update"):
b = T.axis.spatial(1, 0)
i = T.axis.spatial(128, i0_0_i1_0_i2_0_fused // 4 * 32 + i0_2_i1_2_i2_2_fused // 16 * 4 + i1_3)
j = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + i2_4)
k = T.axis.reduce(128, i3_0 * 32 + i3_2)
T.reads(Z_local[b, i, j], X_shared[b, i, k], Y_shared[b, k, j])
T.writes(Z_local[b, i, j])
Z_local[b, i, j] = Z_local[b, i, j] + X_shared[b, i, k] * Y_shared[b, k, j]
for ax0, ax1, ax2 in T.grid(1, 4, 2):
with T.block("Z_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused // 4 * 32 + i0_2_i1_2_i2_2_fused // 16 * 4 + ax1)
v2 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + ax2)
T.reads(Z_local[v0, v1, v2])
T.writes(Z[v0, v1, v2])
Z[v0, v1, v2] = Z_local[v0, v1, v2]
@T.prim_func
def GmmCuda1(X: T.Buffer((1, 128, 128), "float32"), Y: T.Buffer((1, 128, 128), "float32"), Z: T.Buffer((1, 128, 128), "float32")) -> None:
Z_local = T.alloc_buffer([1, 128, 128], dtype="float32", scope="local")
X_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
Y_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_fused in T.thread_binding(16, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_fused in T.thread_binding(1, thread="vthread.x"):
for i0_2_i1_2_i2_2_fused in T.thread_binding(128, thread="threadIdx.x"):
for i1_3_init, i2_4_init in T.grid(4, 2):
with T.block("Z_init"):
b = T.axis.spatial(1, 0)
i = T.axis.spatial(128, i0_0_i1_0_i2_0_fused // 4 * 32 + i0_2_i1_2_i2_2_fused // 16 * 4 + i1_3_init)
j = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + i2_4_init)
T.reads()
T.writes(Z_local[b, i, j])
Z_local[b, i, j] = T.float32(0)
for i3_0 in T.serial(4):
for ax0_ax1_ax2_fused_0 in T.serial(4):
for ax0_ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
for ax0_ax1_ax2_fused_2 in T.vectorized(2):
with T.block("X_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused // 4 * 32 + (ax0_ax1_ax2_fused_0 * 256 + ax0_ax1_ax2_fused_1 * 2 + ax0_ax1_ax2_fused_2) // 32)
v2 = T.axis.spatial(128, i3_0 * 32 + (ax0_ax1_ax2_fused_0 * 256 + ax0_ax1_ax2_fused_1 * 2 + ax0_ax1_ax2_fused_2) % 32)
T.reads(X[v0, v1, v2])
T.writes(X_shared[v0, v1, v2])
X_shared[v0, v1, v2] = X[v0, v1, v2]
for ax0_ax1_ax2_fused_0 in T.serial(8):
for ax0_ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
with T.block("Y_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, i3_0 * 32 + (ax0_ax1_ax2_fused_0 * 128 + ax0_ax1_ax2_fused_1) // 32)
v2 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + (ax0_ax1_ax2_fused_0 * 128 + ax0_ax1_ax2_fused_1) % 32)
T.reads(Y[v0, v1, v2])
T.writes(Y_shared[v0, v1, v2])
Y_shared[v0, v1, v2] = Y[v0, v1, v2]
for i3_1, i0_3, i1_3, i2_3, i3_2, i0_4, i1_4, i2_4 in T.grid(1, 1, 4, 1, 32, 1, 1, 2):
with T.block("Z_update"):
b = T.axis.spatial(1, 0)
i = T.axis.spatial(128, i0_0_i1_0_i2_0_fused // 4 * 32 + i0_2_i1_2_i2_2_fused // 16 * 4 + i1_3)
j = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + i2_4)
k = T.axis.reduce(128, i3_0 * 32 + i3_2)
T.block_attr({
"meta_schedule.thread_extent_low_inclusive": 0,
"meta_schedule.thread_extent_high_inclusive": 32,
})
T.reads(Z_local[b, i, j], X_shared[b, i, k], Y_shared[b, k, j])
T.writes(Z_local[b, i, j])
Z_local[b, i, j] = Z_local[b, i, j] + X_shared[b, i, k] * Y_shared[b, k, j]
for ax0, ax1, ax2 in T.grid(1, 4, 2):
with T.block("Z_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused // 4 * 32 + i0_2_i1_2_i2_2_fused // 16 * 4 + ax1)
v2 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + ax2)
T.reads(Z_local[v0, v1, v2])
T.writes(Z[v0, v1, v2])
Z[v0, v1, v2] = Z_local[v0, v1, v2]
@T.prim_func
def GmmCuda2(X: T.Buffer((1, 128, 128), "float32"), Y: T.Buffer((1, 128, 128), "float32"), Z: T.Buffer((1, 128, 128), "float32")) -> None:
Z_local = T.alloc_buffer([1, 128, 128], dtype="float32", scope="local")
X_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
Y_shared = T.alloc_buffer([1, 128, 128], dtype="float32", scope="shared")
for i0_0_i1_0_i2_0_fused in T.thread_binding(16, thread="blockIdx.x"):
for i0_1_i1_1_i2_1_fused in T.thread_binding(1, thread="vthread.x"):
for i0_2_i1_2_i2_2_fused in T.thread_binding(128, thread="threadIdx.x"):
for i1_3_init, i2_4_init in T.grid(4, 2):
with T.block("Z_init"):
b = T.axis.spatial(1, 0)
i = T.axis.spatial(128, i0_0_i1_0_i2_0_fused // 4 * 32 + i0_2_i1_2_i2_2_fused // 16 * 4 + i1_3_init)
j = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + i2_4_init)
T.reads()
T.writes(Z_local[b, i, j])
Z_local[b, i, j] = T.float32(0)
for i3_0 in T.serial(4):
for ax0_ax1_ax2_fused_0 in T.serial(4):
for ax0_ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
for ax0_ax1_ax2_fused_2 in T.vectorized(2):
with T.block("X_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused // 4 * 32 + (ax0_ax1_ax2_fused_0 * 256 + ax0_ax1_ax2_fused_1 * 2 + ax0_ax1_ax2_fused_2) // 32)
v2 = T.axis.spatial(128, i3_0 * 32 + (ax0_ax1_ax2_fused_0 * 256 + ax0_ax1_ax2_fused_1 * 2 + ax0_ax1_ax2_fused_2) % 32)
T.reads(X[v0, v1, v2])
T.writes(X_shared[v0, v1, v2])
X_shared[v0, v1, v2] = X[v0, v1, v2]
for ax0_ax1_ax2_fused_0 in T.serial(8):
for ax0_ax1_ax2_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
with T.block("Y_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(128, i3_0 * 32 + (ax0_ax1_ax2_fused_0 * 128 + ax0_ax1_ax2_fused_1) // 32)
v2 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + (ax0_ax1_ax2_fused_0 * 128 + ax0_ax1_ax2_fused_1) % 32)
T.reads(Y[v0, v1, v2])
T.writes(Y_shared[v0, v1, v2])
Y_shared[v0, v1, v2] = Y[v0, v1, v2]
for i3_1, i0_3, i1_3, i2_3, i3_2, i0_4, i1_4, i2_4 in T.grid(1, 1, 4, 1, 32, 1, 1, 2):
with T.block("Z_update"):
b = T.axis.spatial(1, 0)
i = T.axis.spatial(128, i0_0_i1_0_i2_0_fused // 4 * 32 + i0_2_i1_2_i2_2_fused // 16 * 4 + i1_3)
j = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + i2_4)
k = T.axis.reduce(128, i3_0 * 32 + i3_2)
T.block_attr({
"meta_schedule.thread_extent_low_inclusive": 1024,
"meta_schedule.thread_extent_high_inclusive": 1024,
})
T.reads(Z_local[b, i, j], X_shared[b, i, k], Y_shared[b, k, j])
T.writes(Z_local[b, i, j])
Z_local[b, i, j] = Z_local[b, i, j] + X_shared[b, i, k] * Y_shared[b, k, j]
for ax0, ax1, ax2 in T.grid(1, 4, 2):
with T.block("Z_local"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused // 4 * 32 + i0_2_i1_2_i2_2_fused // 16 * 4 + ax1)
v2 = T.axis.spatial(128, i0_0_i1_0_i2_0_fused % 4 * 32 + i0_2_i1_2_i2_2_fused % 16 * 2 + ax2)
T.reads(Z_local[v0, v1, v2])
T.writes(Z[v0, v1, v2])
Z[v0, v1, v2] = Z_local[v0, v1, v2]
@T.prim_func
def GMMCUDATensorCore(
X: T.Buffer((1024, 1024), "float16"),
Y: T.Buffer((1024, 1024), "float16"),
Z: T.Buffer((1024, 1024), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
s0 = T.int32()
s0_1 = T.int32()
s0_2 = T.int32()
s1 = T.int32()
s1_1 = T.int32()
s1_2 = T.int32()
# body
# with T.block("root")
Z_wmma_accumulator = T.alloc_buffer([1024, 1024], dtype="float32", scope="wmma.accumulator")
X_shared = T.alloc_buffer([1024, 1024], dtype="float16", scope="shared")
Y_shared = T.alloc_buffer([1024, 1024], dtype="float16", scope="shared")
X_shared_wmma_matrix_a = T.alloc_buffer([1024, 1024], dtype="float16", scope="wmma.matrix_a")
Y_shared_wmma_matrix_b = T.alloc_buffer([1024, 1024], dtype="float16", scope="wmma.matrix_b")
for ax0_0_ax1_0_0_ax2_0_0_fused in T.thread_binding(64, thread="blockIdx.x"):
for ax0_1_ax1_0_1_ax2_0_1_fused in T.thread_binding(2, thread="blockIdx.y"):
for ax0_2_ax1_0_2_ax2_0_2_fused in T.thread_binding(2, thread="threadIdx.y"):
for ax1_0_3_init, ax2_0_3_init, ax1_0_4_init, ax2_0_4_init in T.grid(2, 1, 2, 4):
with T.block("Z_o_init"):
v0 = T.axis.spatial(1, 0)
v1_o = T.axis.spatial(
64,
ax0_0_ax1_0_0_ax2_0_0_fused % 64 // 16 * 16
+ ax0_1_ax1_0_1_ax2_0_1_fused % 2 * 8
+ ax0_2_ax1_0_2_ax2_0_2_fused % 2 * 4
+ ax1_0_3_init * 2
+ ax1_0_4_init,
)
v2_o = T.axis.spatial(
64,
(ax0_0_ax1_0_0_ax2_0_0_fused % 16 + 0 + 0 + ax2_0_3_init) * 4
+ ax2_0_4_init,
)
T.reads()
T.writes(
Z_wmma_accumulator[
v1_o * 16 : v1_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16
]
)
T.block_attr(
{
"meta_schedule.thread_extent_high_inclusive": 1024,
"meta_schedule.thread_extent_low_inclusive": 32,
"warp_execution": 1,
}
)
C = T.match_buffer(
Z_wmma_accumulator[
v1_o * 16 : v1_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16
],
[16, 16],
dtype="float32",
scope="wmma.accumulator",
offset_factor=16,
)
T.evaluate(
T.tvm_fill_fragment(
C.data,
16,
16,
16,
C.elem_offset // 256 + C.elem_offset % 256 // 16,
T.float32(0),
dtype="handle",
)
)
for ax3_0_0 in T.serial(32):
for ax0_ax1_fused_0 in T.serial(16):
for ax0_ax1_fused_1 in T.thread_binding(2, thread="threadIdx.y"):
for ax0_ax1_fused_2 in T.thread_binding(32, thread="threadIdx.x"):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block("X_shared"):
v0 = T.axis.spatial(
1024,
ax0_0_ax1_0_0_ax2_0_0_fused // 16 * 256
+ ax0_1_ax1_0_1_ax2_0_1_fused * 128
+ (
ax0_ax1_fused_0 * 256
+ ax0_ax1_fused_1 * 128
+ ax0_ax1_fused_2 * 4
+ ax0_ax1_fused_3
)
// 32,
)
v1 = T.axis.spatial(
1024,
ax3_0_0 * 32
+ (
ax0_ax1_fused_0 * 256
+ ax0_ax1_fused_1 * 128
+ ax0_ax1_fused_2 * 4
+ ax0_ax1_fused_3
)
% 32,
)
T.reads(X[v0, v1])
T.writes(X_shared[v0, v1])
T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]]})
X_shared[v0, v1] = X[v0, v1]
for ax0_ax1_fused_0 in T.serial(8):
for ax0_ax1_fused_1 in T.thread_binding(2, thread="threadIdx.y"):
for ax0_ax1_fused_2 in T.thread_binding(32, thread="threadIdx.x"):
for ax0_ax1_fused_3 in T.vectorized(4):
with T.block("Y_shared"):
v0 = T.axis.spatial(
1024,
ax3_0_0 * 32
+ (
ax0_ax1_fused_0 * 256
+ ax0_ax1_fused_1 * 128
+ ax0_ax1_fused_2 * 4
+ ax0_ax1_fused_3
)
// 64,
)
v1 = T.axis.spatial(
1024,
ax0_0_ax1_0_0_ax2_0_0_fused % 16 * 64
+ (
ax0_ax1_fused_0 * 256
+ ax0_ax1_fused_1 * 128
+ ax0_ax1_fused_2 * 4
+ ax0_ax1_fused_3
)
% 64,
)
T.reads(Y[v0, v1])
T.writes(Y_shared[v0, v1])
T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]]})
Y_shared[v0, v1] = Y[v0, v1]
for ax3_0_1 in T.serial(2):
for ax0_0, ax1_0 in T.grid(4, 1):
with T.block("X_shared_wmma.matrix_a_o"):
v0_o = T.axis.spatial(
64,
ax0_0_ax1_0_0_ax2_0_0_fused // 16 * 16
+ ax0_1_ax1_0_1_ax2_0_1_fused * 8
+ ax0_2_ax1_0_2_ax2_0_2_fused * 4
+ ax0_0,
)
v1_o = T.axis.spatial(64, ax3_0_0 * 2 + ax3_0_1)
T.reads(
X_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16]
)
T.writes(
X_shared_wmma_matrix_a[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
]
)
A = T.match_buffer(
X_shared[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
],
[16, 16],
dtype="float16",
strides=[s1, s0],
scope="shared",
offset_factor=16,
)
C_1 = T.match_buffer(
X_shared_wmma_matrix_a[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
],
[16, 16],
dtype="float16",
scope="wmma.matrix_a",
offset_factor=16,
)
T.evaluate(
T.tvm_load_matrix_sync(
C_1.data,
16,
16,
16,
C_1.elem_offset // 256 + C_1.elem_offset % 256 // 16,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A.data,
A.elem_offset,
s1 * 16,
1,
dtype="handle",
),
s1,
"row_major",
dtype="handle",
)
)
for ax0_0, ax1_0 in T.grid(1, 4):
with T.block("Y_shared_wmma.matrix_b_o"):
v0_o = T.axis.spatial(64, ax3_0_0 * 2 + ax3_0_1)
v1_o = T.axis.spatial(
64, ax0_0_ax1_0_0_ax2_0_0_fused % 16 * 4 + ax1_0
)
T.reads(
Y_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16]
)
T.writes(
Y_shared_wmma_matrix_b[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
]
)
A_1 = T.match_buffer(
Y_shared[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
],
[16, 16],
dtype="float16",
strides=[s1_1, s0_1],
scope="shared",
offset_factor=16,
)
C_2 = T.match_buffer(
Y_shared_wmma_matrix_b[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
],
[16, 16],
dtype="float16",
scope="wmma.matrix_b",
offset_factor=16,
)
T.evaluate(
T.tvm_load_matrix_sync(
C_2.data,
16,
16,
16,
C_2.elem_offset // 256 + C_2.elem_offset % 256 // 16,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A_1.data,
A_1.elem_offset,
s1_1 * 16,
1,
dtype="handle",
),
s1_1,
"row_major",
dtype="handle",
)
)
for ax0_3, ax1_0_3, ax2_0_3, ax3_0_2, ax0_4, ax1_0_4, ax2_0_4 in T.grid(
1, 2, 1, 1, 1, 2, 4
):
with T.block("Z_o_update"):
v0 = T.axis.spatial(1, 0)
v1_o = T.axis.spatial(
64,
ax0_0_ax1_0_0_ax2_0_0_fused % 64 // 16 * 16
+ ax0_1_ax1_0_1_ax2_0_1_fused % 2 * 8
+ ax0_2_ax1_0_2_ax2_0_2_fused % 2 * 4
+ ax1_0_3 * 2
+ ax1_0_4,
)
v2_o = T.axis.spatial(
64,
(ax0_0_ax1_0_0_ax2_0_0_fused % 16 + 0 + 0 + ax2_0_3) * 4
+ ax2_0_4,
)
v3_o = T.axis.reduce(64, ax3_0_0 * 2 + ax3_0_1 + ax3_0_2)
T.reads(
Z_wmma_accumulator[
v1_o * 16 : v1_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16
],
X_shared_wmma_matrix_a[
v1_o * 16 : v1_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16
],
Y_shared_wmma_matrix_b[
v3_o * 16 : v3_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16
],
)
T.writes(
Z_wmma_accumulator[
v1_o * 16 : v1_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16
]
)
T.block_attr(
{
"meta_schedule.thread_extent_high_inclusive": 1024,
"meta_schedule.thread_extent_low_inclusive": 32,
"warp_execution": 1,
}
)
A_2 = T.match_buffer(
X_shared_wmma_matrix_a[
v1_o * 16 : v1_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16
],
[16, 16],
dtype="float16",
scope="wmma.matrix_a",
offset_factor=16,
)
B = T.match_buffer(
Y_shared_wmma_matrix_b[
v3_o * 16 : v3_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16
],
[16, 16],
dtype="float16",
scope="wmma.matrix_b",
offset_factor=16,
)
C_3 = T.match_buffer(
Z_wmma_accumulator[
v1_o * 16 : v1_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16
],
[16, 16],
dtype="float32",
scope="wmma.accumulator",
offset_factor=16,
)
T.evaluate(
T.tvm_mma_sync(
C_3.data,
C_3.elem_offset // 256 + C_3.elem_offset % 256 // 16,
A_2.data,
A_2.elem_offset // 256,
B.data,
B.elem_offset // 256,
C_3.data,
C_3.elem_offset // 256 + C_3.elem_offset % 256 // 16,
dtype="handle",
)
)
for ax0_0, ax1_0 in T.grid(4, 4):
with T.block("Z_wmma.accumulator_o"):
v0_o = T.axis.spatial(
64,
ax0_0_ax1_0_0_ax2_0_0_fused // 16 * 16
+ ax0_1_ax1_0_1_ax2_0_1_fused * 8
+ ax0_2_ax1_0_2_ax2_0_2_fused * 4
+ ax0_0,
)
v1_o = T.axis.spatial(64, ax0_0_ax1_0_0_ax2_0_0_fused % 16 * 4 + ax1_0)
T.reads(
Z_wmma_accumulator[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
]
)
T.writes(Z[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
A_3 = T.match_buffer(
Z_wmma_accumulator[
v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16
],
[16, 16],
dtype="float32",
scope="wmma.accumulator",
offset_factor=16,
)
C_4 = T.match_buffer(
Z[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16],
[16, 16],
dtype="float32",
strides=[s1_2, s0_2],
offset_factor=16,
)
T.evaluate(
T.tvm_store_matrix_sync(
A_3.data,
16,
16,
16,
A_3.elem_offset // 256 + A_3.elem_offset % 256 // 16,
T.tvm_access_ptr(
T.type_annotation(dtype="float32"),
C_4.data,
C_4.elem_offset,
s1_2 * 16,
2,
dtype="handle",
),
s1_2,
"row_major",
dtype="handle",
)
)
# fmt: on
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument,not-callable,misplaced-comparison-constant
@pytest.mark.parametrize("mod", [Conv2dCuda0, Conv2dCuda1, GmmCuda0, GMMCUDATensorCore])
def test_postproc_check_pass(mod):
ctx = _create_context(mod, target=_target())
sch = tir.Schedule(mod, debug_mask="all")
assert ctx.space_generator.postprocs[0].apply(sch)
@pytest.mark.parametrize(
"mod",
[
Conv2dCuda2, # Should fail due to too much local memory per block (large Apad_shared allocation)
Conv2dCuda3, # Should fail due to too many threads per block (large threadIdx.x extent)
GmmCuda1,
GmmCuda2,
],
)
def test_postproc_check_fail(mod):
ctx = _create_context(mod, target=_target())
sch = tir.Schedule(mod, debug_mask="all")
assert not ctx.space_generator.postprocs[0].apply(sch)
if __name__ == "__main__":
tvm.testing.main()
| 46,264 | 56.400744 | 233 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_graph_debug.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import re
import sys
import time
from distutils.log import debug
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import rpc, te
from tvm._ffi.base import TVMError
from tvm.contrib import utils
from tvm.contrib.debugger import debug_executor
from tvm import relay
# Constants for creating simple graphs, fixtures to avoid free globals
@pytest.fixture
def n():
return 4
@pytest.fixture
def A(n):
return te.placeholder((n,), name="A")
@pytest.fixture
def B(A):
return te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
@pytest.fixture
def s(B):
return te.create_schedule(B.op)
@pytest.fixture
def mlib(s, A, B):
return tvm.build(s, [A, B], "llvm", name="myadd")
@pytest.fixture
def myadd(mlib):
def _myadd(*args):
to_return = mlib["myadd"](*args)
time.sleep(0.25)
return to_return
return _myadd
@pytest.fixture
def graph():
node0 = {"op": "null", "name": "x", "inputs": []}
node1 = {
"op": "tvm_op",
"name": "add",
"inputs": [[0, 0, 0]],
"attrs": {"func_name": "myadd", "flatten_data": "1", "num_inputs": "1", "num_outputs": "1"},
}
nodes = [node0, node1]
arg_nodes = [0]
node_row_ptr = [0, 1, 2]
outputs = [[1, 0, 0]]
shape = (4,)
attrs = {
"shape": ["list_shape", [shape, shape]],
"dltype": ["list_str", ["float32", "float32"]],
"storage_id": ["list_int", [0, 1]],
}
graph = {
"nodes": nodes,
"arg_nodes": arg_nodes,
"node_row_ptr": node_row_ptr,
"heads": outputs,
"attrs": attrs,
}
graph = json.dumps(graph)
return graph
@tvm.testing.requires_llvm
@tvm.testing.requires_rpc
@pytest.mark.skipif(
tvm.support.libinfo()["USE_PROFILER"] != "ON", reason="TVM was not built with profiler support"
)
def test_end_to_end_graph_simple(graph, n, A, B, s, myadd):
def check_verify():
mlib_proxy = tvm.support.FrontendTestModule()
mlib_proxy["myadd"] = myadd
mod = debug_executor.create(graph, mlib_proxy, tvm.cpu(0))
a = np.random.uniform(size=(n,)).astype(A.dtype)
mod.set_input(x=a)
# verify dumproot created
directory = mod._dump_path
assert os.path.exists(directory)
# verify graph is there
GRAPH_DUMP_FILE_NAME = "_tvmdbg_graph_dump.json"
assert len(os.listdir(directory)) == 1
# verify the file name is proper
graph_dump_path = os.path.join(directory, GRAPH_DUMP_FILE_NAME)
assert os.path.exists(graph_dump_path)
# verify the graph contains some expected keys
with open(graph_dump_path) as graph_f:
dumped_graph = json.load(graph_f)
assert isinstance(dumped_graph, dict)
for k in ("nodes", "arg_nodes", "node_row_ptr", "heads", "attrs"):
assert k in dumped_graph, f"key {k} not in dumped graph {graph!r}"
mod.run()
# Verify the tensors are dumped
assert len(os.listdir(directory)) > 1
debug_lines = mod.debug_datum.get_debug_result().split("\n")
def split_debug_line(i):
to_return = re.split(r" [ ]*", debug_lines[i])
assert to_return[-1] == ""
to_return = to_return[:-1] # strip empty trailing part
return to_return
assert split_debug_line(0) == [
"Node Name",
"Ops",
"Time(us)",
"Time(%)",
"Shape",
"Inputs",
"Outputs",
"Measurements(us)",
]
myadd_lines = split_debug_line(2)
assert myadd_lines[0] == "add"
assert myadd_lines[1] == "myadd"
runtime_sec = float(myadd_lines[2]) / 1e6 # printed in us
# Ensure runtime is at least the sleep time and less than a unit prefix order of magnitude.
# Here we just care that the prefix is correct.
assert runtime_sec > 0.25 and runtime_sec < 0.25 * 1000
total_lines = split_debug_line(3)
assert total_lines[0] == "Total_time"
assert total_lines[2] == myadd_lines[2]
CHROME_TRACE_FILE_NAME = "_tvmdbg_execution_trace.json"
assert os.path.exists(os.path.join(directory, CHROME_TRACE_FILE_NAME))
with open(os.path.join(directory, CHROME_TRACE_FILE_NAME)) as f:
trace = json.load(f)
assert trace["displayTimeUnit"] == "ns"
events = trace["traceEvents"]
assert len(events) == 4
assert all(event["ph"] in ("B", "E") for event in events)
assert all(event["pid"] == 1 for event in events)
assert all(event["tid"] == 1 for event in events)
assert all(event["name"] == "x" for event in events[:2])
assert all(event["name"] == "add" for event in events[2:])
assert events[0]["ts"] == 0
assert events[0]["ph"] == "B"
# verify the output is correct
out = mod.get_output(0, tvm.nd.empty((n,)))
np.testing.assert_equal(out.numpy(), a + 1)
mod.exit()
# verify dump root delete after cleanup
assert not os.path.exists(directory)
def check_remote(server):
mlib = tvm.build(s, [A, B], "llvm", name="myadd")
remote = rpc.connect(server.host, server.port)
temp = utils.tempdir()
dev = remote.cpu(0)
path_dso = temp.relpath("dev_lib.so")
mlib.export_library(path_dso)
remote.upload(path_dso)
mlib = remote.load_module("dev_lib.so")
try:
mod = debug_executor.create(graph, mlib, remote.cpu(0))
except ValueError:
print("Skip because debug runtime not enabled")
return
a = np.random.uniform(size=(n,)).astype(A.dtype)
mod.run(x=tvm.nd.array(a, dev))
out = tvm.nd.empty((n,), device=dev)
out = mod.get_output(0, out)
np.testing.assert_equal(out.numpy(), a + 1)
check_verify()
check_remote(rpc.Server("127.0.0.1"))
@tvm.testing.requires_llvm
@pytest.mark.skipif(
tvm.support.libinfo()["USE_PROFILER"] != "ON", reason="TVM was not built with profiler support"
)
def test_run_single_node(graph, n, A, myadd):
mlib_proxy = tvm.support.FrontendTestModule()
mlib_proxy["myadd"] = myadd
mod: debug_executor.GraphModuleDebug = debug_executor.create(graph, mlib_proxy, tvm.cpu(0))
a = np.random.uniform(size=(n,)).astype(A.dtype)
mod.set_input(x=a)
assert len(mod.debug_datum.get_graph_nodes()) == 2
assert mod.debug_datum.get_graph_nodes()[0]["op"] == "param"
assert mod.debug_datum.get_graph_nodes()[1]["op"] == "myadd"
# Running a node with no associated function should return instantly and have 0 runtime
assert mod.run_individual_node(0, number=1).mean == 0
# Meanwhile the actual function should take some time, more time if you run it more times
repeat_1_result = mod.run_individual_node(1, repeat=1)
assert repeat_1_result.mean > 0
# Running multiple times (10) should take longer than 1 time
repeat_3_results = mod.run_individual_node(1, repeat=3)
assert sum(repeat_3_results.results) > sum(repeat_1_result.results)
# Increasing the number of repeats should give you the number of results asked for
assert len(mod.run_individual_node(1, repeat=10).results) == 10
# Doing repeat_ms should have the run time greater than the asked amount
start = time.time()
mod.run_individual_node(1, min_repeat_ms=500)
end = time.time()
elapsed_time_in_seconds = end - start
assert elapsed_time_in_seconds >= 0.5
# Doing `cooldown_interval_ms` should have the execution time increases
start = time.time()
mod.run_individual_node(1, repeat=2, min_repeat_ms=500, cooldown_interval_ms=1000)
end = time.time()
elapsed_time_in_seconds_with_def_rep = end - start
assert elapsed_time_in_seconds_with_def_rep >= 3
# Doing with `repeats_to_cooldown` not equal 1 should not trigger
# cooldown after each repeat
start = time.time()
mod.run_individual_node(
1, repeat=2, min_repeat_ms=500, cooldown_interval_ms=1000, repeats_to_cooldown=2
)
end = time.time()
elapsed_time_in_seconds_with_rep_2 = end - start
assert elapsed_time_in_seconds_with_rep_2 >= 2 and (
elapsed_time_in_seconds_with_rep_2 < elapsed_time_in_seconds_with_def_rep
)
# Going out of bounds of node index throws a tvm error
with pytest.raises(TVMError):
mod.run_individual_node(2)
@tvm.testing.requires_llvm
def test_multiple_output():
x = relay.var("x", shape=(1, 3, 48, 16), dtype="float32")
t = relay.split(x, [12, 16, 32], 2).astuple()
x0 = relay.TupleGetItem(t, 0)
x1 = relay.TupleGetItem(t, 1)
x2 = relay.TupleGetItem(t, 2)
x3 = relay.TupleGetItem(t, 3)
p0 = relay.const(np.random.uniform(-1, 1, (3, 3, 1, 1)).astype("float32"))
y = relay.nn.conv2d(x2, p0, kernel_size=(1, 1), kernel_layout="OIHW", out_dtype="float32") + x3
func = relay.Function([x], relay.Tuple([x0, x1, y]))
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
target = tvm.target.Target("llvm")
device = tvm.cpu()
lib = relay.build(mod, target=target)
m = debug_executor.GraphModuleDebug(
lib["debug_create"]("default", device), [device], lib.get_graph_json(), None
)
nodes = m.debug_datum.get_graph_nodes()
assert nodes[2]["shape"] == [3, 3, 1, 1]
if __name__ == "__main__":
tvm.testing.main()
| 10,302 | 32.891447 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_post_order_apply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import math
import sys
from typing import List
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm.ir.module import IRModule
from tvm._ffi import register_func
from tvm.error import TVMError
from tvm.meta_schedule import TuneContext
from tvm.meta_schedule.schedule_rule import PyScheduleRule
from tvm.meta_schedule.space_generator import PostOrderApply
from tvm.meta_schedule.utils import derived_object
from tvm.script import tir as T
from tvm.target import Target
from tvm.tir.schedule import BlockRV, Schedule
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument,
# fmt: off
def get_matmul_packed(m, n, k, lhs_type="int8", rhs_dtype="int8", acc_dtype="int32"):
X = te.placeholder((m, k), name="X", dtype=lhs_type)
W = te.placeholder((n, k), name="W", dtype=rhs_dtype)
ak = te.reduce_axis((0, k), name="k")
matmul = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype(acc_dtype) * W[j, ak].astype(acc_dtype),
axis=ak,
),
name="compute",
)
return te.create_prim_func([X, W, matmul])
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class DuplicateMatmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class TrinityMatmul:
@T.prim_func
def main(a: T.handle, d: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.alloc_buffer((1024, 1024), "float32")
C = T.alloc_buffer((1024, 1024), "float32")
D = T.match_buffer(d, (1024, 1024), "float32")
for i, j in T.grid(1024, 1024):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(1024, 1024):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 3.0
for i, j in T.grid(1024, 1024):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = C[vi, vj] * 5.0
@tvm.script.ir_module
class TrinityMatmulProcessedForReference:
@T.prim_func
def main(a: T.handle, d: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, [1024, 1024], dtype="float32")
D = T.match_buffer(d, [1024, 1024], dtype="float32")
# body
# with tir.block("root")
B = T.alloc_buffer([1024, 1024], dtype="float32")
for i0_0, i1_0, i0_1, i1_1 in T.grid(16, 64, 64, 16):
with T.block("A"):
vi = T.axis.S(1024, i0_0 * 64 + i0_1)
vj = T.axis.S(1024, i1_0 * 16 + i1_1)
T.reads([A[vi, vj]])
T.writes([B[vi, vj]])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i0_0, i1_0, i0_1, i1_1 in T.grid(16, 64, 64, 16):
with T.block("C"):
vi = T.axis.S(1024, i0_0 * 64 + i0_1)
vj = T.axis.S(1024, i1_0 * 16 + i1_1)
T.reads([B[vi, vj]])
T.writes([D[vi, vj]])
D[vi, vj] = (B[vi, vj] + T.float32(3)) * T.float32(5)
# fmt: on
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
def _is_root(sch: Schedule, block: BlockRV) -> bool:
return sch.get_sref(block).parent is None
def _check_correct(schedule: Schedule):
trace = schedule.trace
for inst in trace.decisions:
assert math.prod(trace.decisions[inst]) == 1024
@derived_object
class WowSoFancyScheduleRule(PyScheduleRule):
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
pass
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
if _is_root(sch, block):
return [sch]
new_sch = sch.copy()
i, j, k = new_sch.get_loops(block=block)
i_0, i_1, i_2, i_3 = new_sch.split(loop=i, factors=[2, 4, 64, 2])
j_0, j_1, j_2, j_3 = new_sch.split(loop=j, factors=[4, 64, 2, 2])
k_0, k_1 = new_sch.split(loop=k, factors=[32, 32])
new_sch.reorder(i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3)
return [new_sch]
@derived_object
class DoubleScheduleRule(PyScheduleRule):
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
pass
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
if _is_root(sch, block):
return [sch]
new_sch = sch.copy()
i, j, k = new_sch.get_loops(block=block)
i_0, i_1, i_2, i_3 = new_sch.split(loop=i, factors=[4, 64, 2, 2])
j_0, j_1, j_2, j_3 = new_sch.split(loop=j, factors=[2, 4, 64, 2])
k_0, k_1 = new_sch.split(loop=k, factors=[32, 32])
new_sch.reorder(i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3)
result = [new_sch]
new_sch = sch.copy()
i, j, k = new_sch.get_loops(block=block)
i_0, i_1, i_2, i_3 = new_sch.split(loop=i, factors=[4, 64, 2, 2])
j_0, j_1, j_2, j_3 = new_sch.split(loop=j, factors=[2, 4, 64, 2])
k_0, k_1 = new_sch.split(loop=k, factors=[32, 32])
new_sch.reorder(i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3)
result.append(new_sch)
return result
@derived_object
class TrinityDoubleRule(PyScheduleRule):
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
pass
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
if _is_root(sch, block):
return [sch]
new_sch = sch.copy()
i, j = new_sch.get_loops(block=block)
i_0, i_1 = new_sch.split(loop=i, factors=[16, 64])
j_0, j_1 = new_sch.split(loop=j, factors=[64, 16])
new_sch.reorder(i_0, j_0, i_1, j_1)
result = [new_sch]
new_sch = sch.copy()
i, j = new_sch.get_loops(block=block)
i_0, i_1 = new_sch.split(loop=i, factors=[2, 512])
j_0, j_1 = new_sch.split(loop=j, factors=[2, 512])
new_sch.reorder(i_0, j_0, i_1, j_1)
result.append(new_sch)
return result
@derived_object
class ReorderScheduleRule(PyScheduleRule):
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
pass
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
if _is_root(sch, block):
return [sch]
new_sch = sch.copy()
i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3 = new_sch.get_loops(block=block)
new_sch.reorder(i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3, i_0, j_0)
result = [new_sch]
new_sch = sch.copy()
i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3 = new_sch.get_loops(block=block)
new_sch.reorder(i_1, j_3, i_0, j_0, j_1, k_0, i_2, j_2, k_1, i_3)
result.append(new_sch)
return result
def test_meta_schedule_post_order_apply():
mod = Matmul
context = TuneContext(
mod=mod,
target=Target("llvm"),
task_name="Test Task",
space_generator=PostOrderApply(
sch_rules=[WowSoFancyScheduleRule()],
postprocs=[],
mutator_probs={},
),
)
post_order_apply = context.space_generator
schs = post_order_apply.generate_design_space(mod)
assert len(schs) == 1
assert not tvm.ir.structural_equal(schs[0].mod, mod)
_check_correct(schs[0])
def test_meta_schedule_post_order_apply_double():
mod = Matmul
context = TuneContext(
mod=mod,
target=Target("llvm"),
task_name="Double Rules Task",
space_generator=PostOrderApply(
sch_rules=[DoubleScheduleRule()],
postprocs=[],
mutator_probs={},
),
)
post_order_apply = context.space_generator
schs = post_order_apply.generate_design_space(mod)
assert len(schs) == 2
for sch in schs:
assert not tvm.ir.structural_equal(sch.mod, mod)
_check_correct(sch)
def test_meta_schedule_post_order_apply_multiple():
mod = Matmul
context = TuneContext(
mod=mod,
target=Target("llvm"),
task_name="Double Rules Task",
space_generator=PostOrderApply(
sch_rules=[DoubleScheduleRule(), ReorderScheduleRule()],
postprocs=[],
mutator_probs={},
),
)
post_order_apply = context.space_generator
schs = post_order_apply.generate_design_space(mod)
assert len(schs) == 4
for sch in schs:
assert not tvm.ir.structural_equal(sch.mod, mod)
_check_correct(sch)
def test_meta_schedule_post_order_apply_duplicate_matmul():
mod = DuplicateMatmul
context = TuneContext(
mod=mod,
target=Target("llvm"),
task_name="Duplicate Matmul Task",
space_generator=PostOrderApply(
sch_rules=[WowSoFancyScheduleRule()],
postprocs=[],
mutator_probs={},
),
)
post_order_apply = context.space_generator
with pytest.raises(
TVMError,
match=r".*TVMError: Check failed: \(block_names_.count\(block->name_hint\) == 0\)"
r" is false: Duplicated block name matmul in function main not supported!",
):
post_order_apply.generate_design_space(mod)
def test_meta_schedule_post_order_apply_remove_block():
@derived_object
class RemoveBlock(PyScheduleRule):
def _initialize_with_tune_context(self, context: "TuneContext") -> None:
pass
def apply(self, sch: Schedule, block: BlockRV) -> List[Schedule]:
if _is_root(sch, block):
return [sch]
sch = sch.copy()
if sch.get(block).name_hint == "B":
sch.compute_inline(block)
return [sch]
def correct_trace(a, b, c, d):
return "\n".join(
[
"# from tvm import tir",
"def apply_trace(sch: tir.Schedule) -> None:",
' b0 = sch.get_block(name="A", func_name="main")',
' b1 = sch.get_block(name="B", func_name="main")',
' b2 = sch.get_block(name="C", func_name="main")',
" sch.compute_inline(block=b1)",
" l3, l4 = sch.get_loops(block=b2)",
" l5, l6 = sch.split(loop=l3, factors=" + str(a) + ", preserve_unit_iters=True)",
" l7, l8 = sch.split(loop=l4, factors=" + str(b) + ", preserve_unit_iters=True)",
" sch.reorder(l5, l7, l6, l8)",
" l9, l10 = sch.get_loops(block=b0)",
" l11, l12 = sch.split(loop=l9, factors=" + str(c) + ", preserve_unit_iters=True)",
" l13, l14 = sch.split(loop=l10, factors="
+ str(d)
+ ", preserve_unit_iters=True)",
" sch.reorder(l11, l13, l12, l14)",
]
)
mod = TrinityMatmul
context = TuneContext(
mod=mod,
target=Target("llvm"),
task_name="Remove Block Task",
space_generator=PostOrderApply(
sch_rules=[RemoveBlock(), TrinityDoubleRule()],
postprocs=[],
mutator_probs={},
),
)
post_order_apply = context.space_generator
schs = post_order_apply.generate_design_space(mod)
assert len(schs) == 4
for sch in schs:
with pytest.raises(
tvm.tir.schedule.schedule.ScheduleError,
match="ScheduleError: An error occurred in the schedule primitive 'get-block'.",
):
sch.get_block("B", "main")
sch_trace = sch.trace.simplified(True)
assert (
str(sch_trace) == correct_trace([16, 64], [64, 16], [2, 512], [2, 512])
or str(sch_trace) == correct_trace([2, 512], [2, 512], [2, 512], [2, 512])
or str(sch_trace) == correct_trace([16, 64], [64, 16], [16, 64], [64, 16])
or str(sch_trace) == correct_trace([2, 512], [2, 512], [16, 64], [64, 16])
)
def test_target_blocks_search_space():
# Test that specific blocks of trinity matmul can be targeted.
def filter_fn(block, target_names) -> bool:
return block.name_hint in target_names
def _get_sch(filter_fn):
mod = TrinityMatmul
context = TuneContext(
mod=mod,
target=Target("llvm"),
task_name="Custom Search Space Task",
space_generator=PostOrderApply(
f_block_filter=filter_fn,
sch_rules=[TrinityDoubleRule()],
postprocs=[],
mutator_probs={},
),
)
post_order_apply = context.space_generator
schs = post_order_apply.generate_design_space(mod)
return schs
# Start by checking that by default each block has a space generated.
schs = _get_sch(None)
assert len(schs) == 8
# Next check that we can target a specific block and only get its' revelant schedules.
schs = _get_sch(lambda block: filter_fn(block, ["B"]))
assert len(schs) == 2
## Check that extracting two blocks works.
schs = _get_sch(lambda block: filter_fn(block, ["A", "C"]))
assert len(schs) == 4
## Finally check that all blocks can be extracted by name.
schs = _get_sch(lambda block: filter_fn(block, ["A", "B", "C"]))
assert len(schs) == 8
@pytest.mark.parametrize(
"target,mod,expected_intr",
[
(
Target("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+neon -num-cores 2"),
IRModule({"main": get_matmul_packed(128, 128, 128, "int8", "int8", "int32")}),
"dot_4x4_i8i8s32_neon",
),
(
Target(
"llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+neon,+v8.2a,+dotprod -num-cores 2"
),
IRModule({"main": get_matmul_packed(128, 128, 128, "int8", "int8", "int32")}),
"dot_4x4_i8i8s32_sdot",
),
(
Target(
"llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+neon,+v8.2a,+dotprod -num-cores 2"
),
IRModule({"main": get_matmul_packed(128, 128, 128, "uint8", "uint8", "uint32")}),
"dot_4x4_u8u8u32_udot",
),
(
Target(
"llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+neon,+v8.2a,+dotprod -num-cores 2"
),
IRModule({"main": get_matmul_packed(128, 128, 128, "uint8", "uint8", "int32")}),
"dot_4x4_u8u8i32_hdot",
),
],
)
def test_meta_schedule_post_order_apply_arm_intrin(target, mod, expected_intr):
context = TuneContext(
mod=mod,
target=target,
task_name="Arm Intrinsic Task",
space_generator=PostOrderApply(), # Triggers default generator
rand_state=1, # Change it while all tests are not passing
)
post_order_apply = context.space_generator
schs = post_order_apply.generate_design_space(mod)
assert len(schs) != 0
for sch in schs:
sch.enter_postproc()
for proc in context.space_generator.postprocs:
proc.apply(sch)
assert any(["call_llvm_pure_intrin" in sch.mod.script() for sch in schs])
assert any([expected_intr in str(sch.trace) for sch in schs])
def test_meta_schedule_derived_object():
@derived_object
class RemoveBlock(PyScheduleRule):
@classmethod
def class_construct(cls):
return cls()
@staticmethod
def static_construct():
return RemoveBlock()
inst_by_init = RemoveBlock()
assert isinstance(inst_by_init, RemoveBlock)
inst_by_classmethod = RemoveBlock.class_construct()
assert isinstance(inst_by_classmethod, RemoveBlock)
inst_by_staticmethod = RemoveBlock.static_construct()
assert isinstance(inst_by_staticmethod, RemoveBlock)
if __name__ == "__main__":
tvm.testing.main()
| 18,150 | 35.085487 | 107 | py |
tvm | tvm-main/tests/python/unittest/test_target_codegen_device.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.contrib import utils
import numpy as np
import tvm.testing
@tvm.testing.requires_gpu
def test_large_uint_imm():
value = (1 << 63) + 123
other = tvm.tir.const(3, "uint64")
n = 12
num_thread = 2
A = te.compute((n,), lambda *i: tvm.tir.const(value, "uint64") + other, name="A")
s = te.create_schedule(A.op)
xo, xi = s[A].split(A.op.axis[0], factor=num_thread)
s[A].bind(xi, te.thread_axis("threadIdx.x"))
s[A].bind(xo, te.thread_axis("blockIdx.x"))
def check_target(device):
if not tvm.testing.device_enabled(device):
return
dev = tvm.device(device, 0)
f = tvm.build(s, [A], device)
# launch the kernel.
a = tvm.nd.empty((n,), dtype=A.dtype, device=dev)
f(a)
assert a.numpy()[0] == value + 3
check_target("cuda")
check_target("vulkan -from_device=0")
@tvm.testing.requires_gpu
def test_add_pipeline():
n = te.size_var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(), name="C")
D = te.compute(A.shape, lambda *i: C(*i) + 1, name="D")
s = te.create_schedule(D.op)
# GPU schedule have to split by gridIdx and threadIdx
num_thread = 256
xo, xi = s[C].split(C.op.axis[0], factor=num_thread)
s[C].bind(xi, te.thread_axis("threadIdx.x"))
s[C].bind(xo, te.thread_axis("blockIdx.x"))
xo, xi = s[D].split(D.op.axis[0], factor=num_thread)
s[D].bind(xi, te.thread_axis("threadIdx.x"))
s[D].bind(xo, te.thread_axis("blockIdx.x"))
def check_target(device, host="stackvm"):
if not tvm.testing.device_enabled(device) or not tvm.testing.device_enabled(host):
return
dev = tvm.device(device, 0)
mhost = tvm.driver.build(s, [A, B, D], target=tvm.target.Target(device, host))
f = mhost.entry_func
# launch the kernel.
n = 1027
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=()).astype(B.dtype), dev)
d = tvm.nd.array(np.zeros(n, dtype=D.dtype), dev)
f(a, b, d)
tvm.testing.assert_allclose(d.numpy(), a.numpy() + b.numpy() + 1)
check_target("cuda", host="llvm")
check_target("nvptx", host="llvm")
check_target("vulkan", host="llvm")
check_target("rocm", host="llvm")
if __name__ == "__main__":
test_large_uint_imm()
test_add_pipeline()
| 3,279 | 34.268817 | 90 | py |
tvm | tvm-main/tests/python/unittest/test_tir_analysis_estimate_tir_flops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import pytest
import tvm.testing
from tvm.ir import IRModule
from tvm.meta_schedule.testing.te_workload import create_te_workload
from tvm.script import tir as T
from tvm.tir.analysis import estimate_tir_flops
@pytest.mark.parametrize(
"workload, flops",
[
("C1D", 6291456),
("C2D", 236027904),
("C3D", 13217562624),
("CAP", 75497472),
("DEP", 7225344),
("DIL", 223552896),
("GMM", 4194304),
("GRP", 28901376),
("T2D", 268435456),
("CBR", 239239168),
("TBG", 25165824),
("NRM", 131072),
("SFM", 262144),
],
)
def test_te_workload(workload, flops):
te_workload = create_te_workload(workload, 0)
mod = IRModule({"main": te_workload})
assert float(flops) == estimate_tir_flops(mod)
@T.prim_func
def flops_with_let(a: T.Buffer(16, "float32")):
for i in range(8):
j = i + 8
a[j] = a[i]
def test_flops_with_let():
flops = estimate_tir_flops(IRModule({"main": flops_with_let}))
assert flops == 8
@T.prim_func
def flops_with_if(a: T.Buffer(16, "float32"), b: T.Buffer(16, "float32")):
for i in range(16):
if i % 2 == 0:
a[i] = b[i]
else:
if i % 3 == 0:
a[i] = b[i - 1] + b[i - 2]
def test_flops_with_if():
flops = estimate_tir_flops(IRModule({"main": flops_with_if}))
assert flops == 16
@T.prim_func
def flops_with_forloop_as_expression(A: T.Buffer(1)):
for i in T.serial(0, 16):
for k in T.serial(0, i):
A[0] = A[0] + 1
@T.prim_func
def flops_override(A: T.Buffer(16, "float32")):
T.func_attr({"estimated_flops": 32})
for i in range(16):
A[0] = A[0] + 1
def test_estimate_flops_forloop_as_experssion():
flops = estimate_tir_flops(
IRModule({"main": flops_with_forloop_as_expression.with_attr("estimated_flops", 32)})
)
assert flops == 32
# test whether the user estimated flop would over ride
flops = estimate_tir_flops(IRModule({"main": flops_override}))
assert flops == 32
def test_exception():
with pytest.raises(tvm.TVMError):
flops = estimate_tir_flops(IRModule({"main": flops_with_forloop_as_expression}))
def test_estimate_flops_with_decl_buffer():
def make_func(use_decl_buffer):
buffer_func = T.decl_buffer if use_decl_buffer else T.Buffer
@T.prim_func
def func(A_data: T.handle("float32")):
A = buffer_func(16, "float32", data=A_data)
for i in range(16):
A[0] = A[0] + 1
return func
flops_with_decl_buffer = estimate_tir_flops(IRModule.from_expr(make_func(True)))
flops_without_decl_buffer = estimate_tir_flops(IRModule.from_expr(make_func(True)))
assert flops_with_decl_buffer == flops_without_decl_buffer
if __name__ == "__main__":
tvm.testing.main()
| 3,681 | 27.542636 | 93 | py |
tvm | tvm-main/tests/python/unittest/test_te_hybrid_script.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm, inspect, sys, traceback, numpy, pytest, types, os
from tvm import te
from tvm.contrib import utils
from tvm.te.hybrid import script
from tvm.te.hybrid.runtime import HYBRID_GLOBALS
import tvm.testing
@pytest.mark.skip
def run_and_check(func, args, var_dict={}, target="llvm", sch=None, outs=None):
def tvm_val_2_py_val(val):
val = tvm.tir.stmt_functor.substitute(val, var_dict)
val = tvm.arith.Analyzer().simplify(val)
assert isinstance(val, (tvm.tir.IntImm,))
return val.value
dev = tvm.device(target, 0)
op = None
if sch is None:
outs = func(*tuple(tvm.runtime.convert(i) if isinstance(i, list) else i for i in args))
op = outs[0].op if isinstance(outs, list) else outs.op
sch = te.create_schedule(op)
else:
assert outs is not None
assert isinstance(outs, list)
op = outs[0].op
emu_args = []
nd_args = []
for i in args:
if isinstance(i, te.tensor.Tensor):
shape = [tvm_val_2_py_val(j) for j in i.shape]
emu_args.append(numpy.random.randn(*shape).astype(i.dtype))
nd_args.append(tvm.nd.array(emu_args[-1], dev))
elif isinstance(i, tvm.tir.Var):
emu_args.append(tvm_val_2_py_val(i))
nd_args.append(emu_args[-1])
else:
assert isinstance(i, list)
emu_args.append(numpy.array(i))
compile_args = [i for i in args if isinstance(i, (te.tensor.Tensor, tvm.tir.Var))] + (
outs if isinstance(outs, list) else [outs]
)
module = tvm.build(sch, compile_args, target=target)
assert module
out_tensors = []
for i in range(op.num_outputs):
output = op.output(i)
shape = [tvm_val_2_py_val(j) for j in output.shape]
nd_args.append(tvm.nd.array(numpy.zeros(shape).astype(output.dtype), dev))
out_tensors.append(nd_args[-1])
ref_data = func(*emu_args)
if isinstance(ref_data, numpy.ndarray):
ref_data = [ref_data]
module(*nd_args)
for nd, np in zip(out_tensors, ref_data):
tvm.testing.assert_allclose(nd.numpy(), np, rtol=1e-5, atol=1e-5)
module_args = [i for i in args if isinstance(i, (te.tensor.Tensor, tvm.tir.Var))]
module_outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
h_module = te.hybrid.build(sch, module_args, module_outs)
return h_module, module_args, module_outs
@script
def outer_product(n, m, a, b):
"""This is a simple outer product.
Actually this function is not required to be documented.
I write this docstring to test skipping docstring functionality.
"""
c = output_tensor((n, m), a.dtype)
for i in range(n):
for j in range(m):
assert i < n and j < m, "index out of range!"
c[i, j] = a[i] * b[j]
return c
@tvm.testing.skip_if_wheel_test
# Test global function
# Test bridge between frontend and backend
def test_outer_product():
n = te.size_var("n")
m = te.size_var("m")
a = te.placeholder((n,), name="a")
b = te.placeholder((m,), name="b")
try:
c = outer_product(n, m, a, b)
ir = c.op.body
except IOError as err:
assert sys.version_info[0] == 2 and str(err) == "could not get source code"
return
# Check for i in (0, n)
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i"
assert ir.min.value == 0
assert ir.extent.name == "n"
ibody = ir.body
assert isinstance(ibody, tvm.tir.For)
# Check for j in (0, m)
assert ibody.loop_var.name == "j"
assert ibody.min.value == 0
assert ibody.extent.name == "m"
# Check loop body
jblock = ibody.body
assert isinstance(jblock, tvm.tir.SeqStmt)
jbody = jblock[0]
assert isinstance(jbody, tvm.tir.AssertStmt)
assert isinstance(jbody.message, tvm.tir.StringImm)
assert jbody.message.value == "index out of range!"
jbody = jblock[1]
assert isinstance(jbody, tvm.tir.ProducerStore)
assert jbody.producer.op.name == "c"
assert len(jbody.indices) == 2
assert jbody.indices[0].name == "i"
assert jbody.indices[1].name == "j"
assert isinstance(jbody.value, tvm.tir.Mul)
mul = jbody.value
assert isinstance(mul.a, tvm.tir.ProducerLoad)
assert mul.a.producer.name == "a"
assert mul.b.producer.name == "b"
func, ins, outs = run_and_check(outer_product, [n, m, a, b], {n: 99, m: 101})
temp = utils.tempdir()
path = temp.relpath("%s.py" % func.name)
func.save(path)
func_ = te.hybrid.HybridModule()
func_.load(path)
run_and_check(func_, ins, {n: 99, m: 101}, outs=outs)
for key, _ in HYBRID_GLOBALS.items():
assert key not in globals().keys()
assert key not in outer_product.__globals__.keys()
@tvm.testing.skip_if_wheel_test
# Test local function
# Test allocation of local variable
def test_fanout():
@script
def fanout(n, a):
three = 3.0
b = output_tensor((a.shape[0] - 3,), a.dtype)
for i in range(a.shape[0] - 3):
sigma = 0.0
for j in range(3):
sigma += a[i + j]
sigma = sigma / three
b[i] = sigma
return b
n = te.size_var("n")
a = te.placeholder((n,), "float32", name="a")
try:
b = fanout(n, a)
ir = b.op.body
except IOError as err:
assert sys.version_info[0] == 2 and str(err) == "could not get source code"
return
# Check for i in (0, n-3)
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i"
assert ir.min.value == 0
assert tvm.ir.structural_equal(ir.extent, n - 3)
# Check loopbody
abody = ir.body
assert isinstance(abody, tvm.tir.ProducerRealize)
assert abody.bounds[0].min.value == 0
assert abody.bounds[0].extent.value == 1
assert abody.producer.op.name == "sigma"
# Check i loop body
rbody = abody.body
assert isinstance(rbody[0], tvm.tir.ProducerStore)
assert rbody[0].producer.op.name == "sigma"
assert len(rbody[0].indices) == 1
assert rbody[0].indices[0].value == 0
# Check fanout loop
jloop = rbody[1]
assert jloop.loop_var.name == "j"
assert jloop.min.value == 0
assert jloop.extent.value == 3
jbody = jloop.body
assert isinstance(jbody, tvm.tir.ProducerStore)
assert len(jbody.indices) == 1
assert jbody.indices[0].value == 0
assert jbody.producer.op.name == "sigma"
assert isinstance(jbody.value, tvm.tir.Add)
value = jbody.value
assert isinstance(value.a, tvm.tir.ProducerLoad)
assert value.a.producer.name == "sigma"
assert len(value.a.indices) == 1
assert value.a.indices[0].value == 0
assert value.b.producer.name == "a"
assert len(value.b.indices) == 1
assert tvm.ir.structural_equal(value.b.indices[0], ir.loop_var + jloop.loop_var)
divide = rbody[2]
assert isinstance(divide, tvm.tir.ProducerStore)
assert len(divide.indices) == 1
assert divide.indices[0].value == 0
value = divide.value
assert isinstance(value, tvm.tir.Mul)
assert value.a.producer.name == "sigma"
assert len(value.a.indices) == 1
assert value.a.indices[0].value == 0
assert abs(value.b.value - (1 / 3.0)) < 1e-5
write = rbody[3]
assert isinstance(write, tvm.tir.ProducerStore)
assert write.producer.op.name == "b"
assert write.value.producer.name == "sigma"
assert len(write.value.indices) == 1
assert write.value.indices[0].value == 0
func, ins, outs = run_and_check(fanout, [n, a], {n: 10})
run_and_check(func, ins, {n: 10}, outs=outs)
def test_looptype():
@script
def looptype(a, b, c):
d = output_tensor((16,), "int32")
e = output_tensor((16,), "int32")
f = output_tensor((16,), "int32")
for i in parallel(16):
d[i] = a[i]
for j in vectorize(16):
e[j] = b[j]
for k in unroll(16):
f[k] = c[k]
return d, e, f
a = te.placeholder((16,), name="a", dtype="int32")
b = te.placeholder((16,), name="b", dtype="int32")
c = te.placeholder((16,), name="c", dtype="int32")
try:
d, e, f = looptype(a, b, c)
ir = d.op.body
except:
return
iloop = ir[0]
jloop = ir[1]
kloop = ir[2]
assert iloop.kind == tvm.tir.ForKind.PARALLEL
assert jloop.kind == tvm.tir.ForKind.VECTORIZED
assert kloop.kind == tvm.tir.ForKind.UNROLLED
func, ins, outs = run_and_check(looptype, [a, b, c])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_if():
@script
def if_then_else(a):
b = output_tensor((10,), "int32")
c = output_tensor((10,), "int32")
for i in range(10):
if i % 2 == 0:
c[i] = a[i]
else:
c[i] = b[i]
for i in unroll(10):
b[i] = -1 if i % 2 == 0 else 1
return b, c
a = te.placeholder((10,), dtype="int32", name="a")
func, ins, outs = run_and_check(if_then_else, [a])
run_and_check(func, ins, outs=outs)
@script
def if_triple_condition(a):
b = output_tensor((10,), "int32")
for i in range(10):
if 0 <= i < 5:
b[i] = a[i]
else:
b[i] = a[i] + 1
return b
func, ins, outs = run_and_check(if_triple_condition, [a])
run_and_check(func, ins, outs=outs)
@script
def if_and(a):
b = output_tensor((10,), "int32")
for i in range(10):
if i >= 0 and i < 5:
b[i] = a[i]
else:
b[i] = a[i] + 1
return b
func, ins, outs = run_and_check(if_and, [a])
run_and_check(func, ins, outs=outs)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_bind():
@script
def vec_add(a, b):
c = output_tensor((1000,), "float32")
for tx in bind("threadIdx.x", 1000):
c[tx] = a[tx] + b[tx]
return c
a = te.placeholder((1000,), dtype="float32", name="a")
b = te.placeholder((1000,), dtype="float32", name="b")
func, ins, outs = run_and_check(vec_add, [a, b], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@script
def raw(a, b):
c = output_tensor((1000,), "float32")
for i in range(1000):
c[i] = a[i] + b[i]
return c
c = raw(a, b)
sch = te.create_schedule(c.op)
x = te.thread_axis("threadIdx.x")
sch[c].bind(c.op.axis[0], x)
func, ins, outs = run_and_check(raw, [a, b], sch=sch, outs=[c], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@te.hybrid.script
def foo(a):
c = output_tensor((a.shape[0],), a.dtype)
total = allocate((1,), a.dtype, "local")
len_i = a.shape[0]
len_j = a.shape[1]
for i in bind("threadIdx.x", len_i):
total[0] = 0.0
for k in const_range(len_j):
total[0] += a[i, k]
c[i] = total[0]
return c
a = te.placeholder((8, 4), "float32")
c = foo(a)
s = te.create_schedule(c.op)
ir = tvm.lower(s, [a, c])
func, ins, outs = run_and_check(foo, [a], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@te.hybrid.script
def max_threads(a):
b = output_tensor(a.shape, a.dtype)
n = a.shape[0]
m = max_num_threads(True)
for i in bind("threadIdx.x", m):
for j in bind("blockIdx.x", ceil_div(n, m)):
if i * m + j < n:
b[i * m + j] = a[i * m + j] + a[i * m + j]
return b
a = te.placeholder((10000,), "float32")
with tvm.target.Target("cuda"):
func, ins, outs = run_and_check(max_threads, [a], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@tvm.testing.skip_if_wheel_test
def test_math_intrin():
@script
def intrin_real(a):
b = output_tensor((8,), "float32")
b[0] = sqrt(a[0])
b[1] = log(a[1])
b[2] = exp(a[2])
b[3] = sigmoid(a[3])
b[4] = power(a[4], a[5])
b[5] = tanh(a[5])
b[6] = min(a[4], a[5])
b[7] = max(a[5], a[6])
return b
a8 = te.placeholder((8,), dtype="float32", name="a")
b8 = intrin_real(a8)
sch = te.create_schedule(b8.op)
func = tvm.build(sch, [a8, b8])
assert func
a = numpy.arange(2, 10).astype("float32")
tvm_a = tvm.nd.array(a)
tvm_b = tvm.nd.array(numpy.zeros((8,), dtype="float32"))
b = intrin_real(a)
func(tvm_a, tvm_b)
tvm.testing.assert_allclose(b, tvm_b.numpy(), rtol=1e-5)
@script
def intrin_int(a):
b = output_tensor((1,), "int32")
b[0] = popcount(a[0])
return b
a1 = te.placeholder((1,), dtype="int32")
b1 = intrin_int(a1)
sch = te.create_schedule(b1.op)
func = tvm.build(sch, [a1, b1])
assert func
a = numpy.array([114514]).astype("int32")
tvm_a = tvm.nd.array(a)
tvm_b = tvm.nd.array(numpy.array([0]).astype("int32"))
b = intrin_int(a)
func(tvm_a, tvm_b)
assert tvm_b.numpy()[0] == b[0]
@tvm.testing.skip_if_wheel_test
# test non caconical loops
def test_non_zero():
@te.hybrid.script
def blur(a):
b = output_tensor((30, 30), "float32")
for i in range(2, 32):
for j in range(2, 32):
s = 0.0
for di in range(3):
for dj in range(3):
s += a[i - di, j - dj]
b[i - 2, j - 2] = s / 9.0
return b
a = te.placeholder((32, 32), "float32", "a")
func, ins, outs = run_and_check(blur, [a])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def triangle(a, b):
c = output_tensor((10, 10), dtype="float32")
for i in range(10):
for j in range(i, 10):
c[i, j] = a[i] * b[j]
return c
a = te.placeholder((10,), dtype="float32", name="a")
b = te.placeholder((10,), dtype="float32", name="b")
func, ins, outs = run_and_check(triangle, [a, b])
run_and_check(func, ins, outs=outs)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_allocate():
@te.hybrid.script
def blur2d(a):
b = output_tensor((30, 30), "float32")
for i in range(30):
ha = allocate((3, 30), "float32")
for j in range(3):
for k in range(30):
ha[j, k] = a[i + j, k] + a[i + j, k + 1] + a[i + j, k + 2]
for j in range(30):
b[i, j] = (ha[0, j] + ha[1, j] + ha[2, j]) / 9.0
return b
a = te.placeholder((32, 32), "float32", "a")
b = blur2d(a)
sch = te.create_schedule(b.op)
func, ins, outs = run_and_check(blur2d, [a])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def share_vec_add(a, b):
c = output_tensor((256,), "float32")
shared = allocate((256,), "float32", "shared")
for i in bind("threadIdx.x", 256):
shared[i] = a[i]
local = allocate((256,), "float32", "local")
for i in bind("threadIdx.x", 256):
local[i] = b[i]
for i in bind("threadIdx.x", 256):
c[i] = shared[i] + local[i]
return c
a = te.placeholder((256,), dtype="float32", name="a")
b = te.placeholder((256,), dtype="float32", name="b")
c = share_vec_add(a, b)
func, ins, outs = run_and_check(share_vec_add, [a, b], target="cuda")
run_and_check(func, ins, outs=outs, target="cuda")
@tvm.testing.skip_if_wheel_test
def test_upstream():
@te.hybrid.script
def upstream(a):
b = output_tensor((20,), "float32")
for i in range(20):
b[i] = a[i] * i
return b
a = te.placeholder((20,), "float32")
b = te.placeholder((20,), "float32")
c = te.compute((20,), lambda x: a[x] + b[x])
d = upstream(c)
sch = te.create_schedule([c.op, d.op])
ir = tvm.lower(sch, [a, b, d])
func = tvm.build(sch, [a, b, d])
assert func
a = numpy.random.randn(20).astype("float32")
b = numpy.random.randn(20).astype("float32")
ref = numpy.zeros((20,), "float32")
for i in range(20):
ref[i] = (a[i] + b[i]) * i
tvm_a = tvm.nd.array(a)
tvm_b = tvm.nd.array(b)
tvm_d = tvm.nd.array(numpy.zeros((20,)).astype("float32"))
func(tvm_a, tvm_b, tvm_d)
tvm.testing.assert_allclose(tvm_d.numpy(), ref, 1e-5, 1e-5)
@tvm.testing.skip_if_wheel_test
def test_downstream():
@te.hybrid.script
def downstream(a):
b = output_tensor((20,), "float32")
for i in range(20):
b[i] = a[i] * i
return b
a = te.placeholder((20,), "float32")
b = downstream(a)
c = te.compute((20,), lambda x: b[x] + 1.0)
sch = te.create_schedule(c.op)
module = tvm.build(sch, [a, c])
assert module
a = numpy.random.randn(20).astype("float32")
ref = numpy.zeros((20,)).astype("float32")
for i in range(20):
ref[i] = (a[i] * i) + 1.0
tvm_a = tvm.nd.array(a)
tvm_c = tvm.nd.array(numpy.zeros((20,)).astype("float32"))
module(tvm_a, tvm_c)
tvm.testing.assert_allclose(tvm_c.numpy(), ref, 1e-5, 1e-5)
@tvm.testing.skip_if_wheel_test
def test_const_param():
@te.hybrid.script
def add_something(a, b):
c = output_tensor((11,), "int32")
for i in range(11):
c[i] = a[i] + b
return c
a = te.placeholder((11,), dtype="int32", name="a")
b = tvm.tir.const(11, "int32")
c = add_something(a, b)
sch = te.create_schedule(c.op)
module = tvm.build(sch, [a, c], "llvm")
assert module
np_a = numpy.arange(11).astype("int32")
np_b = 11
np_c = numpy.zeros((11,)).astype("int32")
nd_a = tvm.nd.array(np_a)
nd_c = tvm.nd.array(numpy.zeros((11,)).astype("int32"))
module(nd_a, nd_c)
ref = add_something(np_a, 11)
tvm.testing.assert_allclose(nd_c.numpy(), ref, 1e-5, 1e-5)
@tvm.testing.skip_if_wheel_test
def test_value_index():
@te.hybrid.script
def kernel_a(a):
b = output_tensor((16,), "int32")
c = output_tensor((4, 4), "int32")
for i in range(16):
b[i] = a[i] + 2
c[i // 4, i % 4] = a[i] + 1
return b, c
@te.hybrid.script
def kernel_b(b, a):
c = output_tensor((4, 4), "int32")
for i in range(4):
for j in range(4):
c[i, j] = a[i * 4 + j] * b[i, j]
return c
a = te.placeholder((16,), "int32")
b, c = kernel_a(a)
d = kernel_b(c, b)
sch = te.create_schedule(d.op)
module = tvm.build(sch, [a, d])
assert module
np_a = numpy.arange(16).astype("int32")
np_b, np_c = kernel_a(np_a)
ref = kernel_b(np_c, np_b)
res = tvm.nd.array(numpy.zeros((4, 4)).astype("int32"))
module(tvm.nd.array(np_a), res)
tvm.testing.assert_allclose(res.numpy(), ref)
@tvm.testing.skip_if_wheel_test
def test_func_call():
@te.hybrid.script
def foo(a, b):
for i in range(len(a)):
a[i] = i + 1.0
for i in range(len(a)):
b[i] = i + 1.0
c = outer_product(10, 10, a, b)
d = output_tensor(c.shape, c.dtype)
for i in range(10):
for j in range(10):
d[i, j] = c[i, j] + i * j
return d
a = te.placeholder((10,), name="a")
b = te.placeholder((10,), name="b")
func, ins, outs = run_and_check(foo, [a, b])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_bool():
@te.hybrid.script
def foo(a):
b = output_tensor(a.shape, a.dtype)
b[0] = 1.2
for i in range(1, a.shape[0] - 1):
if a[i] * a[i - 1] < a[i] or a[i] * a[i - 1] < a[i - 1] or i * a[i] == a[i]:
b[i] = a[i]
else:
b[i] = 0.0
return b
a = te.placeholder((10,), name="a")
func, ins, outs = run_and_check(foo, [a])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_const_range():
@te.hybrid.script
def foo(a, b):
c = output_tensor(a.shape, a.dtype)
d = output_tensor(a.shape, "int32")
for i in const_range(2):
for j in const_range(5):
c[i, j] = float32(int32(a[i, j]) + b[i, j])
for i in const_range(len(b)):
for j in const_range(len(b[0])):
d[i, j] = int32(a[i, j] + b[i, j])
return c, d
a = te.placeholder((2, 5), name="a", dtype="float32")
b = [[1, 2, 3, 4, 5], [5, 4, 3, 2, 1]]
func, ins, outs = run_and_check(foo, [a, b])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def goo(a, b):
c = output_tensor(a.shape, a.dtype)
len_b = len(b)
for i in const_range(len_b * 2):
if i < len_b:
c[i] = a[i] + b[i]
else:
c[i - len_b] = a[i - len_b] + b[i - len_b]
return c
a = te.placeholder((5,), name="a", dtype="int32")
b = [1, 2, 3, 4, 5]
c = goo(a, tvm.runtime.convert(b))
sch = te.create_schedule(c.op)
func, ins, outs = run_and_check(goo, [a, b])
run_and_check(func, ins, outs=outs)
@te.hybrid.script
def hoo(a, b):
c = output_tensor(a.shape, a.dtype)
len_b = len(b)
for i in range(a.shape[0]):
for j in const_range(len(b)):
d = a[i] * b[j]
d += a[i] + b[j]
c[i] = d
return c
a = te.placeholder((5,), name="a", dtype="int32")
b = [1, 2, 3, 4, 5]
func, ins, outs = run_and_check(hoo, [a, b])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_schedule():
@script
def outer_product(a, b):
c = output_tensor((64, 64), a.dtype)
for i in range(64):
for j in range(64):
c[i, j] = a[i] * b[j]
return c
a = te.placeholder((64,), name="a", dtype="float32")
b = te.placeholder((64,), name="b", dtype="float32")
c = outer_product(a, b)
# Test perfect loop split
# Test loop reorder
# Test loop annotation
sch = te.create_schedule(c.op)
i, j = c.op.axis
io, ii = sch[c].split(i, 4)
sch[c].parallel(ii)
jo, ji = sch[c].split(j, 4)
joo, joi = sch[c].split(jo, 4)
sch[c].vectorize(ji)
sch[c].reorder(ii, io, joo, joi, ji)
ir = tvm.lower(sch, [a, b, c])["main"].body
assert isinstance(ir, tvm.tir.AttrStmt)
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i.inner"
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i.outer"
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "j.outer.outer"
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "j.outer.inner"
ir = ir.body
func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])
run_and_check(func, ins, outs=outs)
# Test fuse
sch = te.create_schedule(c.op)
sch[c].fuse(c.op.axis[0], c.op.axis[1])
ir = tvm.lower(sch, [a, b, c])["main"].body
assert isinstance(ir, tvm.tir.AttrStmt)
ir = ir.body
assert isinstance(ir, tvm.tir.For)
assert ir.loop_var.name == "i.j.fused"
func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])
run_and_check(func, ins, outs=outs)
# Test imperfect loop split
sch = te.create_schedule(c.op)
sch[c].split(c.op.axis[0], 3)
ir = tvm.lower(sch, [a, b, c], simple_mode=True)
func, ins, outs = run_and_check(outer_product, [a, b], sch=sch, outs=[c])
run_and_check(func, ins, outs=outs)
# Test loop binds
@tvm.testing.skip_if_wheel_test
def test_capture():
n = 8
constant_tuple = (10, n)
constant_list = [[1, 2], [3, n]]
const_value = 1
@te.hybrid.script
def add_something(a):
c = output_tensor((constant_tuple[1],), "int32")
for i in range(constant_tuple[1]):
c[i] = a[i] + constant_list[1][const_value]
return c
a = te.placeholder((n,), dtype="int32", name="a")
func, ins, outs = run_and_check(add_something, [a])
run_and_check(func, ins, outs=outs)
@tvm.testing.skip_if_wheel_test
def test_array_inputs():
@script
def sum_array(inputs):
out = output_tensor((10,), inputs[0].dtype)
n = len(inputs)
for i in range(10):
for j in const_range(n):
out[i] += inputs[j][i]
return out
n = 5
inputs = []
for i in range(n):
inputs.append(te.placeholder((10,), name="t%s" % i, dtype="float32"))
out = sum_array(tvm.runtime.convert(inputs))
assert len(out.op.inputs) == n
sch = te.create_schedule(out.op)
mod = tvm.build(sch, inputs + [out], target="llvm")
assert mod
input_nd = []
out_ref = numpy.zeros((10,))
for _ in range(n):
arr = numpy.random.uniform(size=(10,)).astype("float32")
input_nd.append(tvm.nd.array(arr))
out_ref += arr
out_nd = tvm.nd.array(numpy.zeros((10,), "float32"))
mod(*input_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_ref)
if __name__ == "__main__":
test_outer_product()
test_fanout()
test_looptype()
test_if()
test_bind()
test_math_intrin()
test_non_zero()
test_allocate()
test_upstream()
test_downstream()
test_const_param()
test_value_index()
test_func_call()
test_bool()
test_const_range()
test_schedule()
test_capture()
test_array_inputs()
# TODO:
# test_inplace()
| 26,470 | 29.321879 | 95 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_simplify.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
def test_stmt_simplify():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
n = te.size_var("n")
with ib.for_range(0, n, name="i") as i:
with ib.if_scope(i < 12):
A[i] = C[i]
body = tvm.tir.LetStmt(n, 10, ib.get())
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, C, n], body))
body = tvm.tir.transform.Simplify()(mod)["main"].body
assert isinstance(body.body, tvm.tir.BufferStore)
def test_thread_extent_simplify():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
n = te.size_var("n")
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
ib.scope_attr(tx, "thread_extent", n)
ib.scope_attr(tx, "thread_extent", n)
ib.scope_attr(ty, "thread_extent", 1)
with ib.if_scope(tx + ty < 12):
A[tx] = C[tx + ty]
body = tvm.tir.LetStmt(n, 10, ib.get())
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, C, n], body))
body = tvm.tir.transform.Simplify()(mod)["main"].body
assert isinstance(body.body.body.body, tvm.tir.BufferStore)
def test_if_likely():
ib = tvm.tir.ir_builder.create()
A = ib.pointer("float32", name="A")
C = ib.pointer("float32", name="C")
n = te.size_var("n")
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
ib.scope_attr(tx, "thread_extent", 32)
ib.scope_attr(ty, "thread_extent", 32)
with ib.if_scope(ib.likely(tx * 32 + ty < n)):
with ib.if_scope(ib.likely(tx * 32 + ty < n)):
A[tx] = C[tx * 32 + ty]
body = ib.get()
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([A, C, n], body))
body = tvm.tir.transform.Simplify()(mod)["main"].body
assert isinstance(body.body.body, tvm.tir.IfThenElse)
assert not isinstance(body.body.body.then_case, tvm.tir.IfThenElse)
def test_basic_likely_elimination():
n = te.size_var("n")
X = te.placeholder(shape=(n,), name="x")
W = te.placeholder(shape=(n + 1,), dtype="int32", name="w")
def f(i):
start = W[i]
extent = W[i + 1] - W[i]
rv = te.reduce_axis((0, extent))
return te.sum(X[rv + start], axis=rv)
Y = te.compute(X.shape, f, name="y")
s = te.create_schedule([Y.op])
stmt = tvm.lower(s, [X, W, Y], simple_mode=True)
assert "if" not in str(stmt)
def test_complex_likely_elimination():
def cumsum(X):
"""
Y[i] = sum(X[:i])
"""
(m,) = X.shape
s_state = te.placeholder((m + 1,), dtype="int32", name="state")
s_init = te.compute((1,), lambda _: tvm.tir.const(0, "int32"))
s_update = te.compute((m + 1,), lambda l: s_state[l - 1] + X[l - 1])
return tvm.te.scan(s_init, s_update, s_state, inputs=[X], name="cumsum")
def sparse_lengths_sum(data, indices, lengths):
oshape = list(data.shape)
oshape[0] = lengths.shape[0]
length_offsets = cumsum(lengths)
def sls(n, d):
gg = te.reduce_axis((0, lengths[n]))
indices_idx = length_offsets[n] + gg
data_idx = indices[indices_idx]
data_val = data[data_idx, d]
return te.sum(data_val, axis=gg)
return te.compute(oshape, sls)
m, n, d, i, l = (
te.size_var("m"),
te.size_var("n"),
te.size_var("d"),
te.size_var("i"),
te.size_var("l"),
)
data_ph = te.placeholder((m, d * 32), name="data")
indices_ph = te.placeholder((i,), name="indices", dtype="int32")
lengths_ph = te.placeholder((n,), name="lengths", dtype="int32")
Y = sparse_lengths_sum(data_ph, indices_ph, lengths_ph)
s = te.create_schedule([Y.op])
(n, d) = s[Y].op.axis
(do, di) = s[Y].split(d, factor=32)
(gg,) = s[Y].op.reduce_axis
s[Y].reorder(n, do, gg, di)
s[Y].vectorize(di)
stmt = tvm.lower(s, [data_ph, indices_ph, lengths_ph, Y], simple_mode=True)
assert "if" not in str(stmt)
class BaseBeforeAfter(tvm.testing.CompareBeforeAfter):
transitively_prove_inequalities = False
convert_boolean_to_and_of_ors = False
apply_constraints_to_boolean_branches = False
propagate_knowns_to_prove_conditional = False
propagate_knowns_to_simplify_expressions = False
def transform(self):
def inner(mod):
config = {
"tir.Simplify": {
"transitively_prove_inequalities": self.transitively_prove_inequalities,
"convert_boolean_to_and_of_ors": self.convert_boolean_to_and_of_ors,
"apply_constraints_to_boolean_branches": self.apply_constraints_to_boolean_branches,
"propagate_knowns_to_prove_conditional": self.propagate_knowns_to_prove_conditional,
"propagate_knowns_to_simplify_expressions": self.propagate_knowns_to_simplify_expressions,
}
}
with tvm.transform.PassContext(config=config):
mod = tvm.tir.transform.Simplify()(mod)
return mod
return inner
class TestLoadStoreNoop(BaseBeforeAfter):
"""Store of a value that was just read from the same location is a no-op."""
def before(A: T.Buffer((1,), "float32")):
A[0] = A[0]
def expected(A: T.Buffer((1,), "float32")):
T.evaluate(0)
class TestLoadStoreNoopAfterSimplify(BaseBeforeAfter):
"""As test_load_store_noop, but requiring simplification to identify.
Previously, a bug caused the self-assignment of a buffer to
checked based on the pre-simplification assignment, not the
post-simplification. This test is to identify any similar
regression.
"""
def before(A: T.Buffer((1,), "float32")):
A[0] = A[0] + (5.0 - 5.0)
def expected(A: T.Buffer((1,), "float32")):
T.evaluate(0)
class TestNestedCondition(BaseBeforeAfter):
"""Nested IfThenElse with the same condition can be simplified.
Requires const_int_bound to narrow scope of i within the
conditional, or for rewrite_simplify to recognize the literal
constraint.
"""
def before(A: T.Buffer((16,), "float32")):
for i in T.serial(16):
if i == 5:
if i == 5:
A[i] = 0.0
def expected(A: T.Buffer((16,), "float32")):
for i in T.serial(16):
if i == 5:
A[i] = 0.0
class TestNestedProvableCondition(BaseBeforeAfter):
"""Simplify inner conditional using constraint from outer.
Requires const_int_bound to narrow scope of i within the
conditional.
"""
def before(A: T.Buffer((16,), "float32")):
for i in T.serial(16):
if i == 5:
if i < 7:
A[i] = 0.0
def expected(A: T.Buffer((16,), "float32")):
for i in T.serial(16):
if i == 5:
A[i] = 0.0
class TestNestedVarCondition(BaseBeforeAfter):
"""Simplify inner conditional using constraint from outer.
Requires for rewrite_simplify to recognize the repeated
constraint.
"""
def before(A: T.Buffer((16,), "float32"), n: T.int32):
for i in T.serial(16):
if i == n:
if i == n:
A[i] = 0.0
def expected(A: T.Buffer((16,), "float32"), n: T.int32):
for i in T.serial(16):
if i == n:
A[i] = 0.0
class TestAlteredBufferContents(BaseBeforeAfter):
"""No simplification of data-dependent conditionals.
A literal constraint must not be propagated if the values
referenced may change. TIR requires single assignment of
variables, so Var objects may be assumed constant, but BufferLoad
may not.
"""
def before(A: T.Buffer((1,), "int32"), n: T.int32):
if A[0] == n:
A[0] = A[0] + 1
if A[0] == n:
A[0] = 0
expected = before
class TestNegationOfCondition(BaseBeforeAfter):
"""Use negation of outer condition to simplify innner.
Within the body of an if statement, the negation of the
condition is known to be false.
"""
def before(A: T.Buffer((16,), "int32")):
for i in T.serial(16):
if i == 5:
if i != 5:
A[i] = 0
else:
A[i] = 1
def expected(A: T.Buffer((16,), "int32")):
for i in T.serial(16):
if i == 5:
A[i] = 1
class TestNegationOfNotEqual(BaseBeforeAfter):
"""As TestNegationOfVarCondition, but with a != outer condition.
Because ConstIntBoundAnalyzer only tracks the min and max allowed
values, the outer i!=5 condition does provide a constraint on the
bounds. This test relies on RewriteSimplifier to recognize
``i==5`` as the negation of a literal constraint.
"""
def before(A: T.Buffer((16,), "int32")):
for i in T.serial(16):
if i != 5:
if i == 5:
A[i] = 0
else:
A[i] = 1
def expected(A: T.Buffer((16,), "int32")):
for i in T.serial(16):
if i != 5:
A[i] = 1
class TestNegationOfVarCondition(BaseBeforeAfter):
"""As TestNegationOfVarCondition, but with a dynamic condition.
This simplification cannot be done with ConstIntBoundAnalyzer, and
must rely on RewriteSimplifier recognizing the repeated literal.
"""
def before(A: T.Buffer((16,), "int32"), n: T.int32):
for i in T.serial(16):
if i == n:
if i != n:
A[i] = 0
else:
A[i] = 1
def expected(A: T.Buffer((16,), "int32"), n: T.int32):
for i in T.serial(16):
if i == n:
A[i] = 1
class TestLiteralConstraintSplitBooleanAnd(BaseBeforeAfter):
"""Split a boolean AND into independent constraints
A single if condition may impose multiple literal constraints.
Each constraint that is ANDed together to form the condition
should be treated as an independent constraint. The use of n in
the condition is to ensure we exercise RewriteSimplifier.
"""
def before(A: T.Buffer((16, 16), "int32"), n: T.int32):
for i, j in T.grid(16, 16):
if i == n and j == n:
if i == n:
A[i, j] = 0
def expected(A: T.Buffer((16, 16), "int32"), n: T.int32):
for i, j in T.grid(16, 16):
if i == n and j == n:
A[i, j] = 0
class TestLiteralConstraintSplitBooleanOr(BaseBeforeAfter):
"""Split a boolean OR into independent constraints
Similar to TestLiteralConstraintSplitBooleanAnd, but splitting a
boolean OR into independent conditions. This uses the
simplification that ``!(x || y) == !x && !y``.
The use of ``n`` in the condition is to ensure we exercise
RewriteSimplifier.
"""
def before(A: T.Buffer((16, 16), "int32"), n: T.int32):
for i, j in T.grid(16, 16):
if i == n or j == n:
A[i, j] = 0
else:
if i == n:
A[i, j] = 1
else:
A[i, j] = 2
def expected(A: T.Buffer((16, 16), "int32"), n: T.int32):
for i, j in T.grid(16, 16):
if i == n or j == n:
A[i, j] = 0
else:
A[i, j] = 2
class TestProveConditionUsingLet(BaseBeforeAfter):
"""Simplify conditions using non-inlined let bindings
Not all let bindings are inlined when they occur in later
expressions. However, even if they are not inlined, they may be
used to prove the value of a condition.
"""
@T.prim_func
def before(A: T.Buffer(4, "bool")):
for i in T.serial(4):
condition = i < 3
if condition or i >= 3:
A[i] = condition
@T.prim_func
def expected(A: T.Buffer(4, "bool")):
for i in T.serial(4):
condition = i < 3
A[i] = condition
class TestProveLetCondition(BaseBeforeAfter):
"""Simplify conditions using non-inlined let bindings
Not all let bindings are inlined when they occur in later
expressions. However, even if they are not inlined, they may be
used to prove the value of a condition.
"""
@T.prim_func
def before(A: T.Buffer(4, "bool")):
for i in T.serial(4):
condition = i < 3
if i < 3:
if condition:
A[i] = condition
@T.prim_func
def expected(A: T.Buffer(4, "bool")):
for i in T.serial(4):
condition = i < 3
if i < 3:
A[i] = condition
class TestProveRepeatedLetCondition(BaseBeforeAfter):
"""Simplify conditions using non-inlined let bindings
A variable may be used as a literal constraint, and be recognized
as being True within the context of the constraint.
"""
@T.prim_func
def before(A: T.Buffer(4, "bool")):
for i in T.serial(4):
condition = i < 3
if condition:
if condition:
A[i] = condition
@T.prim_func
def expected(A: T.Buffer(4, "bool")):
for i in T.serial(4):
condition = i < 3
if condition:
A[i] = True
class TestIfThenElseExpr(BaseBeforeAfter):
@T.prim_func
def before(A: T.Buffer(16, "float32")):
for i in T.serial(16):
if i < 12:
A[i] = T.if_then_else(i < 12, 1.0, 2.0, dtype="float32")
@T.prim_func
def expected(A: T.Buffer(16, "float32")):
for i in T.serial(16):
if i < 12:
A[i] = 1.0
class TestCeilLog2Int(BaseBeforeAfter):
"""Simplify expressions resulting from topi.math.ceil_log2"""
@T.prim_func
def before(A: T.Buffer(1, "int32")):
A[0] = T.cast(
T.ceil(T.log2(T.cast(14, "float64"), dtype="float64"), dtype="float64"), dtype="int32"
)
@T.prim_func
def expected(A: T.Buffer(1, "int32")):
A[0] = 4
class TestLeftCeilLog2LowerBound(BaseBeforeAfter):
"""Integer bounds are propagated through topi.math.ceil_log2"""
@T.prim_func
def before(A: T.Buffer(16, "float32")):
for i in T.serial(16):
x = T.cast(
T.ceil(T.log2(T.cast(i + 1024 + 1, "float64"), dtype="float64"), dtype="float64"),
dtype="int32",
)
if x == 11:
A[i] = 0.0
@T.prim_func
def expected(A: T.Buffer(16, "float32")):
for i in T.serial(16):
A[i] = 0.0
class TestLeftShiftLowerBound(BaseBeforeAfter):
"""Integer bounds are propagated through left shift
min(1 << i) = 1 << min(i)
= 1 << 0
= 1
"""
@T.prim_func
def before(A: T.Buffer(16, "float32")):
for i in T.serial(16):
if T.shift_left(1, i, dtype="int32") >= 1:
A[i] = 0.0
@T.prim_func
def expected(A: T.Buffer(16, "float32")):
for i in T.serial(16):
A[i] = 0.0
class TestLeftShiftUpperBound(BaseBeforeAfter):
"""Integer bounds are propagated through left shift
max(31 << i) = 31 << max(i)
= 31 << 15
= 1015808
"""
@T.prim_func
def before(A: T.Buffer(16, "float32")):
for i in T.serial(16):
if T.shift_left(31, i, dtype="int32") <= 1015808:
A[i] = 0.0
@T.prim_func
def expected(A: T.Buffer(16, "float32")):
for i in T.serial(16):
A[i] = 0.0
class TestLeftShiftOfNegativeValue(BaseBeforeAfter):
"""No const int bounds of left shift of negative value.
This is target dependent, and does not currently have a specified
behavior in TIR. For example, in CodeGenC, this generates C code
with undefined behavior.
"""
@T.prim_func
def before(A: T.Buffer(16, "float32")):
for i in T.serial(16):
if -64 <= T.shift_left(-i, 4, dtype="int32"):
A[i] = 0.0
expected = before
class TestLeftShiftByNegativeValue(BaseBeforeAfter):
"""No const int bounds of left shift by negative bit count.
This is target dependent, and does not currently have a specified
behavior in TIR. For example, in CodeGenC, this generates C code
with undefined behavior.
"""
@T.prim_func
def before(A: T.Buffer(16, "float32")):
for i in T.serial(16):
if T.shift_left(16, -i, dtype="int32") <= 16:
A[i] = 0.0
expected = before
class TestRemoveTransitivelyProvableCondition(BaseBeforeAfter):
"""Remove comparisons that may be proven using multiple others
For example, the `0 < i` and `i <= j` conditions can be used to prove
that `0 < j`.
"""
transitively_prove_inequalities = True
i, j, k = [tvm.tir.Var(name, "int32") for name in "ijk"]
zero = tvm.tir.IntImm("int32", 0)
test_case = tvm.testing.parameter(
(tvm.tir.all(zero < i, i <= j), zero < j, True),
# Transitive comparisons from LT
(tvm.tir.all(i < j, j < k), i < k, True),
(tvm.tir.all(i < j, j == k), i < k, True),
(tvm.tir.all(i < j, j <= k), i < k, True),
(tvm.tir.all(i < j, j > k), i < k, False),
(tvm.tir.all(i < j, j >= k), i < k, False),
(tvm.tir.all(i < j, j != k), i < k, False),
# Transitive comparisons from LE
(tvm.tir.all(i <= j, j < k), i < k, True),
(tvm.tir.all(i <= j, j == k), i == k, False),
(tvm.tir.all(i <= j, j == k), i <= k, True),
(tvm.tir.all(i <= j, j <= k), i <= k, True),
(tvm.tir.all(i <= j, j <= k), i < k, False),
(tvm.tir.all(i <= j, j > k), i < k, False),
(tvm.tir.all(i <= j, j >= k), i < k, False),
(tvm.tir.all(i <= j, j != k), i < k, False),
# Transitive comparisons from GT
(tvm.tir.all(i > j, j > k), i > k, True),
(tvm.tir.all(i > j, j == k), i > k, True),
(tvm.tir.all(i > j, j >= k), i > k, True),
(tvm.tir.all(i > j, j < k), i > k, False),
(tvm.tir.all(i > j, j <= k), i > k, False),
(tvm.tir.all(i > j, j != k), i > k, False),
# Transitive comparisons from GE
(tvm.tir.all(i >= j, j > k), i > k, True),
(tvm.tir.all(i >= j, j == k), i == k, False),
(tvm.tir.all(i >= j, j == k), i >= k, True),
(tvm.tir.all(i >= j, j >= k), i >= k, True),
(tvm.tir.all(i >= j, j >= k), i > k, False),
(tvm.tir.all(i >= j, j < k), i > k, False),
(tvm.tir.all(i >= j, j <= k), i > k, False),
(tvm.tir.all(i >= j, j != k), i > k, False),
# GT or LT may be used to prove NE
(tvm.tir.all(i == j, j != k), i != k, True),
(tvm.tir.all(i == j, j < k), i != k, True),
(tvm.tir.all(i == j, j > k), i != k, True),
(tvm.tir.all(i == j, j != k), i < k, False),
(tvm.tir.all(i == j, j != k), i > k, False),
# Because these are integers, x<y is equivalent to x <= y-1,
# and may be used in equivalent simplifications.
(tvm.tir.all(i <= j - 1, j < k), i < k, True),
(tvm.tir.all(i <= j - 1, j == k), i < k, True),
(tvm.tir.all(i <= j - 1, j <= k), i < k, True),
(tvm.tir.all(i <= j - 1, j > k), i < k, False),
(tvm.tir.all(i <= j - 1, j >= k), i < k, False),
(tvm.tir.all(i <= j - 1, j != k), i < k, False),
# Either or both inequalities may have an additive offset.
(tvm.tir.all(i <= j + 5, j <= k + 7), i <= k + 12, True),
(tvm.tir.all(i <= j + 5, j <= k + 7), i <= k + 11, False),
# For floats, x < y + c1 and y < z + c2 implies that x < z + (c1 + c2).
# Because this simplification applies to integers, transitive
# application of LT or GT can give a tighter constraint.
#
# i < j + c1, j < k + c2
# i <= j + c1 - 1, j <= k + c2 - 1
# i + 1 - c1 <= j, j <= k + c2 - 1
# i + 1 - c1 <= k + c2 - 1
# i <= k + c1 + c2 - 2
# i < k + (c1 + c2 - 1)
#
(tvm.tir.all(i < j + 5, j < k + 7), i < k + 11, True),
(tvm.tir.all(i < j + 5, j < k + 7), i < k + 10, False),
)
@tvm.testing.fixture
def before(self, test_case):
priors, postulate, _ = test_case
@T.prim_func
def func(A: T.Buffer(1, "bool")):
if priors:
A[0] = postulate
return func
@tvm.testing.fixture
def expected(self, test_case):
priors, postulate, provable = test_case
analyzer = tvm.arith.Analyzer()
priors = analyzer.canonical_simplify(priors)
if provable:
@T.prim_func
def func(A: T.Buffer(1, "bool")):
if priors:
A[0] = True
return func
else:
postulate = analyzer.canonical_simplify(postulate)
@T.prim_func
def func(A: T.Buffer(1, "bool")):
if priors:
A[0] = postulate
return func
class TestSuppressTransitivelyProvableCondition(BaseBeforeAfter):
transitively_prove_inequalities = False
def before(A: T.Buffer(1, "bool"), i: T.int32, j: T.int32, k: T.int32):
if i < j and j < k:
A[0] = i < k
expected = before
class TestRewriteAsAndOfOrs(BaseBeforeAfter):
"""If enabled, rewrite boolean expressions into AND of OR"""
convert_boolean_to_and_of_ors = True
def before(A: T.Buffer(3, "bool")):
T.evaluate(A[0] or (A[1] and A[2]))
def expected(A: T.Buffer(3, "bool")):
T.evaluate((A[0] or A[1]) and (A[0] or A[2]))
class TestSuppressRewriteAsAndOfOrs(BaseBeforeAfter):
"""Only rewrite into AND of OR when allowed"""
convert_boolean_to_and_of_ors = False
def before(A: T.Buffer(3, "bool")):
T.evaluate(A[0] or (A[1] and A[2]))
expected = before
class TestRewriteAsAndOfOrsWithTopLevelAnd(BaseBeforeAfter):
"""The expression being rewritten may start with an AND
Like TestRewriteAsAndOfOrs, but with an AndNode as the outermost
booelan operator. Even though it is primarily OR nodes that are
being rewritten, the call to SimplifyAsAndOfOrs should apply to
the outermost AndNode or OrNode in order to enable better
simplification.
"""
convert_boolean_to_and_of_ors = True
def before(A: T.Buffer(4, "bool")):
T.evaluate((A[0] or A[1]) and (A[1] or (A[0] and A[2] and A[3])))
def expected(A: T.Buffer(4, "bool")):
# If the simplification is applied to the OrNode, then a
# redundant `(A[1] or A[0])` would't be canceled out. When
# applying SimplifyAsAndOfOrs to the top-level AndNode, the
# internal representation is `[[0,1], [1,0], [1,2], [1,3]]`, and
# the redundant `[1,0]` can be removed.
#
# If the simplification were only applied when encountering an
# OrNode, the internal representation would be `[[0,1]]` during
# the first call and `[[1,0], [1,2], [1,3]]` during the second
# call. As a result, the `[0,1]` and `[1,0]` representations
# wouldn't occur within the same call, and the redundant `[1,0]`
# wouldn't be removed.
T.evaluate((A[0] or A[1]) and (A[1] or A[2]) and (A[1] or A[3]))
class TestRewriteAsAndOfOrsWithSimplificationBetweenGroups(BaseBeforeAfter):
"""Apply rewrite rules between OR groups that differ by a single element
The expression `(k==20 and k!=30)` could be rewritten into `(k==20)`.
However, by default these two terms must appear as part of an explict part
of the simplified expression. The AndOfOr simplification checks for
rewrite patterns of the form `(A or B) and (A or C)`, where `(B and C)` can
simplify to a single expression `D`. These can be rewritten to `(A or D)`.
"""
convert_boolean_to_and_of_ors = True
def before(A: T.Buffer(1, "bool"), i: T.int32, j: T.int32, k: T.int32):
A[0] = (i == 0 or j == 10 or k == 20) and (i == 0 or j == 10 or k != 30)
def expected(A: T.Buffer(1, "bool"), i: T.int32, j: T.int32, k: T.int32):
A[0] = i == 0 or j == 10 or k == 20
class TestRewriteAsAndOfOrsWithSimplificationBetweenReorderedGroups(BaseBeforeAfter):
"""Rewrite rules between OR groups do not depend on order
Like TestRewriteAsAndOfOrsWithSimplificationBetweenGroups, but the groups
are ordered differently. If this removes a group entirely, the result is
ordered according to the first group in the expression.
"""
convert_boolean_to_and_of_ors = True
def before(A: T.Buffer(1, "bool"), i: T.int32, j: T.int32, k: T.int32):
A[0] = (i == 0 or j == 10 or k == 20) and (j == 10 or k != 30 or i == 0)
def expected(A: T.Buffer(1, "bool"), i: T.int32, j: T.int32, k: T.int32):
A[0] = j == 10 or k == 20 or i == 0
class TestRewriteAsAndOfOrUsingSimplificationAcrossAnd(BaseBeforeAfter):
"""Apply AndNode rewrites to non-adjacent expressions
The RewriteSimplifier rules only check for simplifications between
left/right branches of an And/Or node. Simplifications that would require
rearranging components in a chain of And/Or nodes are not performed.
"""
convert_boolean_to_and_of_ors = True
def before(A: T.Buffer(1, "bool"), i: T.int32, j: T.int32, k: T.int32):
A[0] = (k == 20) and ((i == 0 or j == 10) and (k != 30))
def expected(A: T.Buffer(1, "bool"), i: T.int32, j: T.int32, k: T.int32):
A[0] = (i == 0 or j == 10) and (k == 20)
class TestRewriteAsAndOfOrUsingSimplificationWithinOr(BaseBeforeAfter):
"""Rewrite rules between OR groups do not depend on order
The RewriteSimplifier rules only check for simplifications between
left/right branches of an And/Or node. Simplifications that would require
rearranging components in a chain of And/Or nodes are not performed.
This test validates that `(i == 20) or (i != 30)` can be rewritten to
`(i != 30)`, even when there's an intervening clause between the
clauses being simplified.
"""
convert_boolean_to_and_of_ors = True
def before(A: T.Buffer(1, "bool"), i: T.int32, j: T.int32, k: T.int32):
A[0] = (i == 20) or (j == 0) or (i != 30)
def expected(A: T.Buffer(1, "bool"), i: T.int32, j: T.int32, k: T.int32):
A[0] = (j == 0) or (i != 30)
class TestConditionalFloorMod(BaseBeforeAfter):
"""A regression test for negative floormod denominator
Previously, simplifying this function could throw an error. First, the
`canonical_simplify` would rewrite `floormod(0-i,2)` to the equivalent
`floormod(i,-2)`. Then, the rewrite_simplifier would enter a
constrained context in which `floormod(i,-2)==1`. Passing this
expression to `ModularSet::EnterConstraint`, which previously did not
support a negative value for the second argument, threw an error.
The analogous failure mode never occurred for `truncmod`, because
`truncmod(0-i,2)` would be canonicalized to `truncmod(i, -2) * -1`, and
the pattern matching in `ModularSet` didn't recognize the constant
factor.
This failure mode was resolved by supporting negative arguments in
`ModularSet`, using the same sign convention as is used by
`canonical_simplify`.
"""
def before(A: T.Buffer(1, "bool"), i: T.int32):
if T.floormod(0 - i, 2) == 0:
A[0] = T.floormod(i, 2) == 0
def expected(A: T.Buffer(1, "bool"), i: T.int32):
if T.floormod(i, -2) == 0:
A[0] = True
class TestSimplifyRHSOfBooleanAndUsingLHS(BaseBeforeAfter):
"""Boolean expressions can introduce contexts.
In `A and B`, the result of `B` only matters when `A` is
true, and can be simplified under that context. This test
simplifies `n < 10` under the assumption that `n < 5`.
"""
apply_constraints_to_boolean_branches = True
def before(A: T.Buffer(1, "bool"), n: T.int32):
A[0] = n < 5 and n < 10
def expected(A: T.Buffer(1, "bool"), n: T.int32):
A[0] = n < 5
class TestSimplifyLHSOfBooleanAndUsingRHS(BaseBeforeAfter):
"""Boolean expressions can introduce contexts for their arguments.
Like TestSimplifyRHSOfBooleanAndUsingLHS, but using the RHS to
simplify the LHS.
"""
apply_constraints_to_boolean_branches = True
def before(A: T.Buffer(1, "bool"), n: T.int32):
A[0] = n < 10 and n < 5
def expected(A: T.Buffer(1, "bool"), n: T.int32):
A[0] = n < 5
class TestSimplifyRHSOfBooleanOrUsingLHS(BaseBeforeAfter):
"""Boolean expressions can introduce contexts.
In `A or B`, the result of `B` only matters when `A` is false, so
`B` can be simplified under the assumption that `A` is false.
This test simplifies `n < 5` under the assumption that `!(n < 10)`
"""
apply_constraints_to_boolean_branches = True
def before(A: T.Buffer(1, "bool"), n: T.int32):
A[0] = n < 10 or n < 5
def expected(A: T.Buffer(1, "bool"), n: T.int32):
A[0] = n < 10
class TestSimplifyLHSOfBooleanOrUsingRHS(BaseBeforeAfter):
"""Boolean expressions can introduce contexts for their arguments.
Like TestSimplifyRHSOfBooleanOrUsingLHS, but using the RHS to
simplify the LHS.
"""
apply_constraints_to_boolean_branches = True
def before(A: T.Buffer(1, "bool"), n: T.int32):
A[0] = n < 5 or n < 10
def expected(A: T.Buffer(1, "bool"), n: T.int32):
A[0] = n < 10
class TestSimplifyRHSOfBooleanAndUsingLHSWithoutConst(BaseBeforeAfter):
"""Boolean expressions can introduce contexts.
Like TestSimplifyRHSOfBooleanAndUsingLHS, but with variables in
the conditions, preventing ConstIntBoundAnalyzer from handling it.
This proof requires the extension to transitively prove
inequalities.
"""
apply_constraints_to_boolean_branches = True
transitively_prove_inequalities = True
def before(A: T.Buffer(1, "bool"), n: T.int32, m: T.int32):
A[0] = n < m + 5 and n < m + 10
def expected(A: T.Buffer(1, "bool"), n: T.int32, m: T.int32):
A[0] = n < m + 5
class TestSimplifyLHSOfBooleanAndUsingRHSWithoutConst(BaseBeforeAfter):
"""Boolean expressions can introduce contexts for their arguments.
Like TestSimplifyLHSOfBooleanAndUsingRHS, but with variables in
the conditions, preventing ConstIntBoundAnalyzer from handling it.
This proof requires the extension to transitively prove
inequalities.
"""
apply_constraints_to_boolean_branches = True
transitively_prove_inequalities = True
def before(A: T.Buffer(1, "bool"), n: T.int32, m: T.int32):
A[0] = n < m + 10 and n < m + 5
def expected(A: T.Buffer(1, "bool"), n: T.int32, m: T.int32):
A[0] = n < m + 5
class TestSimplifyRHSOfBooleanOrUsingLHSWithoutConst(BaseBeforeAfter):
"""Boolean expressions can introduce contexts.
Like TestSimplifyRHSOfBooleanOrUsingLHS, but with variables in the
conditions, preventing ConstIntBoundAnalyzer from handling it.
This proof requires the extension to transitively prove
inequalities.
"""
apply_constraints_to_boolean_branches = True
transitively_prove_inequalities = True
def before(A: T.Buffer(1, "bool"), n: T.int32, m: T.int32):
A[0] = n < m + 10 or n < m + 5
def expected(A: T.Buffer(1, "bool"), n: T.int32, m: T.int32):
A[0] = n < m + 10
class TestSimplifyLHSOfBooleanOrUsingRHSWithoutConst(BaseBeforeAfter):
"""Boolean expressions can introduce contexts for their arguments.
Like TestSimplifyLHSOfBooleanOrUsingRHS, but with variables in the
conditions, preventing ConstIntBoundAnalyzer from handling it.
This proof requires the extension to transitively prove
inequalities.
"""
apply_constraints_to_boolean_branches = True
transitively_prove_inequalities = True
def before(A: T.Buffer(1, "bool"), n: T.int32, m: T.int32):
A[0] = n < m + 5 or n < m + 10
def expected(A: T.Buffer(1, "bool"), n: T.int32, m: T.int32):
A[0] = n < m + 10
class TestProvableConditionWithOffset(BaseBeforeAfter):
"""Use scoped-constraint to prove inequalities"""
transitively_prove_inequalities = False
def before(A: T.Buffer(1, "bool"), i: T.int32, j: T.int32):
if i < j:
A[0] = i < j + 1
def expected(A: T.Buffer(1, "bool"), i: T.int32, j: T.int32):
if i < j:
A[0] = True
class TestMostRestrictiveConditional(BaseBeforeAfter):
"""Preferentially prove part of a compound conditional.
Even if we cannot prove a conditional as true or false on its own,
proving that a conditional must satisfy a stronger condition may
allow for later rewrites. For example, if it is known that `a <= b`,
then `a >= b` cannot be proven, but can be reduced to `a == b`.
"""
i, j, k = [tvm.tir.Var(name, "int32") for name in "ijk"]
tir_int = tvm.tir.IntImm("int32", 0)
test_case = tvm.testing.parameter(
(i <= tir_int, tir_int <= i, i == tir_int),
(i <= tir_int, i != tir_int, i < tir_int),
(i != tir_int, i <= tir_int, i < tir_int),
(i != tir_int, tir_int <= i, tir_int < i),
(i <= j, j <= i, j == i),
(i <= j, i != j, i < j),
(i != j, i <= j, i < j),
(i != j, j <= i, j < i),
)
@tvm.testing.fixture
def before(self, test_case):
priors, expr_before, _ = test_case
@T.prim_func
def func(A: T.Buffer(1, "bool")):
if priors:
A[0] = expr_before
return func
@tvm.testing.fixture
def expected(self, test_case):
priors, _, expr_after = test_case
@T.prim_func
def func(A: T.Buffer(1, "bool")):
if priors:
A[0] = expr_after
return func
class TestProvableConditionWithOffset(BaseBeforeAfter):
"""Use scoped-constraint to prove inequalities"""
transitively_prove_inequalities = False
def before(A: T.Buffer(1, "bool"), i: T.int32, j: T.int32):
if i < j:
A[0] = i < j + 1
def expected(A: T.Buffer(1, "bool"), i: T.int32, j: T.int32):
if i < j:
A[0] = True
class TestAlteredBufferContents(BaseBeforeAfter):
"""Propagation of data-dependent conditionals.
A literal constraint must not be propagated if the values
referenced may change. TIR requires single assignment of
variables, so Var objects may be assumed constant, but BufferLoad
may not.
"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer((1,), "int32"), n: T.int32):
if A[0] == n:
A[0] = A[0] + 1
# If the simplifier incorrectly uses the invalidated
# A[0]==n condition required to reach this point, then it
# will incorrectly simplify to the then-case. If the
# simplifier correctly determines that A[0] now contains
# n+1, then it will correctly simplify to the else-case.
if A[0] == n:
A[0] = 5
else:
A[0] = 10
def expected(A: T.Buffer((1,), "int32"), n: T.int32):
if A[0] == n:
A[0] = A[0] + 1
A[0] = 10
class TestPossiblyAlteredBufferContents(BaseBeforeAfter):
"""No simplification of data-dependent conditionals.
Like TestAlteredBufferContents, but the `m==0` conditional
prevents the value of `A[0]` from being known at the point of the
inner conditional, either as `A[0] == n` from the outer
conditional or as `A[0] == n+1` from the write statement.
"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer((1,), "int32"), n: T.int32, m: T.int32):
if A[0] == n:
if m == 0:
A[0] = A[0] + 1
if A[0] == n:
A[0] = 5
else:
A[0] = 10
expected = before
class TestSimplifyInputAssumption(BaseBeforeAfter):
"""A T.assume annotation may be used to simplify"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(1, "int32"), n: T.int32):
T.evaluate(T.assume(n == 0))
if n == 0:
A[0] = 42
def expected(A: T.Buffer(1, "int32"), n: T.int32):
T.evaluate(T.assume(n == 0))
A[0] = 42
class TestSimplifyInputAssumption(BaseBeforeAfter):
"""A T.assume annotation may be used to simplify"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(1, "int32"), n: T.int32):
T.evaluate(T.assume(n == 0))
if n == 0:
A[0] = 42
def expected(A: T.Buffer(1, "int32"), n: T.int32):
T.evaluate(T.assume(n == 0))
A[0] = 42
class TestNoSimplifyFromScopedInputAssumption(BaseBeforeAfter):
"""A T.assume inside a scope may not apply outside that scope"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(1, "int32"), n: T.int32, m: T.int32):
if m == 0:
T.evaluate(T.assume(n == 0))
if n == 0:
A[0] = 42
expected = before
class TestSimplifyConditionalUsingBufferValue(BaseBeforeAfter):
"""Simplify a conditional using the known value in the buffer"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(1, "int32")):
A[0] = 0
if A[0] == 0:
A[0] = 42
def expected(A: T.Buffer(1, "int32")):
A[0] = 0
A[0] = 42
class TestKeepExpressionSimplifyUsingBufferValue(BaseBeforeAfter):
"""Do not simplify expressions in general using known values in the buffer
For now, because this is equivalent to inlining, preventing this
usage from occurring. Known buffer values may be used to prove
conditionals, but should not be used for other simplifications.
"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(1, "int32"), B: T.Buffer(1, "int32")):
A[0] = 0
B[0] = A[0]
expected = before
class TestSimplifyConditionalInLoopUsingBufferValue(BaseBeforeAfter):
"""Simplify a conditional using the known value in the buffer
Like TestSimplifyConditionalUsingBufferValue, but the value used
to simplify is set in a previous loop.
"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(16, "int32"), B: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = i
for j in T.serial(16):
if A[j] == j:
B[j] = 42
else:
B[j] = 100
def expected(A: T.Buffer(16, "int32"), B: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = i
for j in T.serial(16):
B[j] = 42
class TestSimplifyUsingBufferAssumption(BaseBeforeAfter):
"""A T.assume may apply to a buffer's contents"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(1, "int32")):
T.evaluate(T.assume(A[0] == 0))
if A[0] == 0:
A[0] = 42
def expected(A: T.Buffer(1, "int32")):
T.evaluate(T.assume(A[0] == 0))
A[0] = 42
class TestSimplifyUsingBufferAssumptionInLoop(BaseBeforeAfter):
"""An assumption about buffer contents may apply to a range"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
T.evaluate(T.assume(A[i] == i))
for i in T.serial(16):
if A[i] < 100:
A[i] = 0
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
T.evaluate(T.assume(A[i] == i))
for i in T.serial(16):
A[i] = 0
class TestSimplifyUsingPartiallyKnownBufferConditional(BaseBeforeAfter):
"""An assumption about buffer contents may apply to only part of a buffer"""
propagate_knowns_to_prove_conditional = True
apply_constraints_to_boolean_branches = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if 14 <= i:
T.evaluate(T.assume(A[i] == 0))
for i in T.serial(16):
if 14 <= i:
if A[i] == 0:
A[i] = 42
else:
if A[i] == 0:
A[i] = 100
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if 14 <= i:
T.evaluate(T.assume(A[i] == 0))
for i in T.serial(16):
if 14 <= i:
A[i] = 42
else:
if A[i] == 0:
A[i] = 100
class TestSimplifyUsingPartiallyKnownBufferExpression(BaseBeforeAfter):
"""An assumption about buffer contents may apply to only part of a buffer
Like TestSimplifyUsingPartiallyKnownBufferConditional, but the
conditional is expressed as part of T.assume, instead of in the
control flow.
"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
T.evaluate(T.assume(i < 14 or A[i] == 0))
for i in T.serial(16):
if 14 <= i:
if A[i] == 0:
A[i] = 42
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
T.evaluate(T.assume(i < 14 or A[i] == 0))
for i in T.serial(16):
if 14 <= i:
A[i] = 42
class TestNoSimplificationIfPredicateNotMet(BaseBeforeAfter):
"""Assumptions about buffer contents must apply to all cases to be used
Like TestSimplifyUsingPartialBufferAssumptionInLoop, but the
predicate in the second loop does not match the predicate in the
first loop. Therefore, the `T.assume` refers to a different set
of indices.
"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if 14 <= i:
T.evaluate(T.assume(A[i] == 0))
for i in T.serial(16):
if i < 14:
if A[i] == 0:
A[i] = 42
expected = before
class TestNoSimplifyUsingInvalidatedScopedConstraint(BaseBeforeAfter):
"""A write may not be used for proofs outside its conditional"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i == 0:
A[i] = 0
if A[i] == 0:
A[i] = 42
expected = before
class TestNoSimplifyUsingOverwrittenValue(BaseBeforeAfter):
"""A write that may have been overwritten may not be treated as known
The appearance of "A[i] = 5" must prevent the earlier constraint
from being used for simplification.
"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
T.evaluate(T.assume(A[i] == 0))
for i in T.serial(16):
if i == 0:
A[i] = 5
if A[i] == 0:
A[i] = 42
expected = before
class TestNoSimplifyUsingLoopDependentBufferValue(BaseBeforeAfter):
"""Do not simplify assuming reads are invariant
If a buffer's value changes across loop iterations, the buffer's
value before the loop should not be used to simplify conditionals
within the loop.
"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(16, "int32"), B: T.Buffer(1, "int32")):
B[0] = 0
for i in T.serial(16):
if B[0] < 10:
B[0] = A[i] * 2 + B[0]
else:
B[0] = A[i] + B[0]
expected = before
class TestSimplifyPriorToOverwrittenValue(BaseBeforeAfter):
"""A known value may be used until it is overwritten
Like TestNoSimplifyUsingOverwrittenValue, but the use of the
known `A[i]` value occurs before it is overwritten.
Like TestNoSimplifyUsingLoopDependentBufferValue, but the loop
iterations are all independent.
"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
T.evaluate(T.assume(A[i] == 0))
for i in T.serial(16):
if A[i] == 0:
A[i] = 17
if i == 0:
A[i] = 5
if A[i] == 0:
A[i] = 42
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
T.evaluate(T.assume(A[i] == 0))
for i in T.serial(16):
A[i] = 17
if i == 0:
A[i] = 5
if A[i] == 0:
A[i] = 42
class TestSimplifyElementWiseUsingPreLoopBufferValue(BaseBeforeAfter):
"""Allow data-Do not simplify assuming reads are invariant
If an element-wise loop reads and overwrites a buffer value, the
pre-loop buffer value may be used to simplify conditions that
occur prior to the write.
"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(16, "int32"), B: T.Buffer(16, "int32")):
for i in T.serial(16):
B[i] = 0
for i in T.serial(16):
if B[i] < 10:
B[i] = A[i] * 2 + B[i]
else:
B[i] = A[i] + B[i]
def expected(A: T.Buffer(16, "int32"), B: T.Buffer(16, "int32")):
for i in T.serial(16):
B[i] = 0
for i in T.serial(16):
B[i] = A[i] * 2 + B[i]
class TestSimplifyNonConditional(BaseBeforeAfter):
"""Propagate a known value to later expressions."""
propagate_knowns_to_simplify_expressions = True
def before(A: T.Buffer(1, "int32")):
A[0] = 0
A[0] = A[0] + 1
def expected(A: T.Buffer(1, "int32")):
A[0] = 0
A[0] = 1
class TestSuppressSimplifyNonConditional(BaseBeforeAfter):
"""Propagate a known value to later expressions.
Like TestSimplifyNonConditional, but with data-propagation turned off.
"""
propagate_knowns_to_simplify_expressions = False
def before(A: T.Buffer(1, "int32")):
A[0] = 0
A[0] = A[0] + 1
expected = before
class TestSimplifyUsingTransitiveKnownBufferValue(BaseBeforeAfter):
"""Propagate known buffer values
If a known value of a buffer depends on another known value, it
can be tracked backwards through both.
"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(1, "int32")):
T.evaluate(T.assume(A[0] == 0))
A[0] = A[0] + 1
A[0] = A[0] + 1
A[0] = A[0] + 1
if A[0] == 3:
A[0] = 42
def expected(A: T.Buffer(1, "int32")):
T.evaluate(T.assume(A[0] == 0))
A[0] = A[0] + 1
A[0] = A[0] + 1
A[0] = A[0] + 1
A[0] = 42
class TestSimplifyRampIndexBroadcastValue(BaseBeforeAfter):
"""Simplifications involving buffer loads with ramp indices"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(4, "int32")):
A[T.ramp(0, 1, 4)] = T.broadcast(0, 4)
if A[0] == 0:
A[0] = 42
if A[1] == 0:
A[1] = 60
def expected(A: T.Buffer(4, "int32")):
A[T.ramp(0, 1, 4)] = T.broadcast(0, 4)
A[0] = 42
A[1] = 60
class TestSimplifyRampIndexRampValue(BaseBeforeAfter):
"""Simplifications involving buffer loads with ramp indices"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(4, "int32")):
A[T.ramp(0, 1, 4)] = T.ramp(11, 1, 4)
if A[0] == 11:
A[0] = 42
if A[1] == 12:
A[1] = 60
def expected(A: T.Buffer(4, "int32")):
A[T.ramp(0, 1, 4)] = T.ramp(11, 1, 4)
A[0] = 42
A[1] = 60
class TestSimplifyUsingPartiallyProvenBufferValueGather(BaseBeforeAfter):
"""Propagate known buffer values in part of buffer.
Even if a constraint can't be solved for all values in an
assignment, it may be provable in part of a buffer. Here, the
known 0 values in the padding of A produces known 0 values in the
padding of B.
"""
transitively_prove_inequalities = True
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(24, "int32"), B: T.Buffer(24, "int32"), F: T.Buffer(3, "int32")):
# A has non-zero values only in the range 3 <= i < 17
for i in T.serial(24):
T.evaluate(T.assume(((3 <= i) and (i < 17)) or A[i] == 0))
# After convoluting with F, B has non-zero values only in the
# range 3 <= i < 19.
for i in T.serial(24):
B[i] = 0
for f in T.serial(3):
if 0 <= i - f:
B[i] = B[i] + A[i - f] * F[f]
# Which means that this loop is unnecessary. It would be
# removed entirely in tir.transform.RemoveNoOp, but here we
# want to test that the simplification works as intended.
for i in T.serial(24):
if i < 3 or 19 <= i:
if B[i] != 0:
B[i] = 0
def expected(A: T.Buffer(24, "int32"), B: T.Buffer(24, "int32"), F: T.Buffer(3, "int32")):
for i in T.serial(24):
T.evaluate(T.assume(((3 <= i) and (i < 17)) or A[i] == 0))
for i in T.serial(24):
B[i] = 0
for f in T.serial(3):
if 0 <= i - f:
B[i] = B[i] + A[i - f] * F[f]
for i in T.serial(24):
if i < 3 or 19 <= i:
T.evaluate(0)
class TestSimplifyUsingPartiallyProvenBufferValueScatter(BaseBeforeAfter):
"""Propagate known buffer values in part of buffer.
Like TestSimplifyUsingPartiallyProvenBufferValueGather, but the
compute loop is over the input buffer A, rather than the output
buffer B.
"""
propagate_knowns_to_prove_conditional = True
def before(A: T.Buffer(24, "int32"), B: T.Buffer(24, "int32"), F: T.Buffer(3, "int32")):
# A has non-zero values only in the range 3 <= i < 17
for i in T.serial(24):
T.evaluate(T.assume(((3 <= i) and (i < 17)) or A[i] == 0))
for i in T.serial(24):
B[i] = 0
# After convoluting with F, B has non-zero values only in the
# range 3 <= i < 19.
for i in T.serial(24):
for f in T.serial(3):
if i + f >= 0 and i + f < 24:
B[i + f] = B[i + f] + A[i] * F[f]
# Which means that this loop is unnecessary. It actually gets
# removed in tir.transform.RemoveNoOp, but here we want to
# test that the simplification works as intended.
for i in T.serial(24):
if i < 3 or 19 <= i:
if B[i] != 0:
B[i] = 0
def expected(A: T.Buffer(24, "int32"), B: T.Buffer(24, "int32"), F: T.Buffer(3, "int32")):
for i in T.serial(24):
T.evaluate(T.assume(((3 <= i) and (i < 17)) or A[i] == 0))
for i in T.serial(24):
B[i] = 0
for i in T.serial(24):
for f in T.serial(3):
if i + f < 24:
B[i + f] = B[i + f] + A[i] * F[f]
for i in T.serial(24):
if i < 3 or 19 <= i:
T.evaluate(0)
class TestSimplifyBufferStore(BaseBeforeAfter):
"""Simplification using prior known"""
propagate_knowns_to_simplify_expressions = True
def before(A: T.Buffer(1, "int32")):
A[0] = 5
A[0] = A[0] + 7
def expected(A: T.Buffer(1, "int32")):
A[0] = 5
A[0] = 12
class TestSimplifyTrivialLetBufferVar(BaseBeforeAfter):
"""A LetStmt used in a buffer definition should be retained"""
def before(A_ptr: T.handle("float32")):
A_ptr_redef: T.handle("float32") = A_ptr
A = T.decl_buffer(1, "float32", data=A_ptr_redef)
A[0] = 42.0
expected = before
class TestSimplifyTrivialLetElemOffset(BaseBeforeAfter):
"""A LetStmt used in a buffer definition should be retained"""
def before(A_ptr: T.handle("float32"), A_offset: T.int32):
A_offset_redef = A_offset
A = T.decl_buffer(1, "float32", elem_offset=A_offset_redef, data=A_ptr)
A[0] = 42.0
expected = before
class TestSimplifyTrivialLetShape(BaseBeforeAfter):
"""A LetStmt used in a buffer definition should be retained"""
def before(A_ptr: T.handle("float32"), A_size: T.int32):
A_size_redef = A_size
A = T.decl_buffer([A_size_redef], "float32", data=A_ptr)
A[0] = 42.0
expected = before
class TestSimplifyTrivialLetStride(BaseBeforeAfter):
"""A LetStmt used in a buffer definition should be retained"""
def before(A_ptr: T.handle("float32"), A_stride: T.int32):
A_stride_redef = A_stride
A = T.decl_buffer(1, "float32", strides=[A_stride_redef], data=A_ptr)
A[0] = 42.0
expected = before
class TestBufferShapeConstraint(BaseBeforeAfter):
def before(a: T.handle):
n = T.int64()
A = T.match_buffer(a, (n * 32,), "float32")
A[T.min(T.int64(0), n)] = T.float32(0)
def expected(a: T.handle):
n = T.int64()
A = T.match_buffer(a, (n * 32,), "float32")
A[T.int64(0)] = T.float32(0)
class TestBufferShapeConstraintWithOffset(BaseBeforeAfter):
def before(a: T.handle):
n = T.int64()
A = T.match_buffer(a, (n * 32 + 1 - 2,), "float32")
A[T.min(T.int64(1), n)] = T.float32(0)
def expected(a: T.handle):
n = T.int64()
A = T.match_buffer(a, (n * 32 + 1 - 2,), "float32")
A[T.int64(1)] = T.float32(0)
if __name__ == "__main__":
tvm.testing.main()
| 54,798 | 30.100454 | 110 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_schedule_rule_add_rfactor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing import te_workload
from tvm.meta_schedule.testing.space_generation import (
check_sketches,
generate_design_space,
)
from tvm.script import tir as T
from tvm.target import Target
from tvm.te import create_prim_func
def test_cpu_matmul():
@T.prim_func
def cpu_matmul_0(
A: T.Buffer((4, 512), "float32"),
B: T.Buffer((512, 4), "float32"),
C: T.Buffer((4, 4), "float32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2 in T.grid(4, 4, 512):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(A[i, k], B[k, j])
T.writes(C[i, j])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
@T.prim_func
def cpu_matmul_1(
A: T.Buffer((4, 512), "float32"),
B: T.Buffer((512, 4), "float32"),
C: T.Buffer((4, 4), "float32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C_rf = T.alloc_buffer([4, 4, 128], dtype="float32")
for i0, i1, i2_0, i2_1 in T.grid(4, 4, 4, 128):
with T.block("C_rf"):
vi2_1, i, j, vi2_0 = T.axis.remap("SSSR", [i2_1, i0, i1, i2_0])
T.reads(A[i, vi2_0 * 128 + vi2_1], B[vi2_0 * 128 + vi2_1, j])
T.writes(C_rf[i, j, vi2_1])
with T.init():
C_rf[i, j, vi2_1] = T.float32(0)
C_rf[i, j, vi2_1] = (
C_rf[i, j, vi2_1] + A[i, vi2_0 * 128 + vi2_1] * B[vi2_0 * 128 + vi2_1, j]
)
for i0, i1, i2_1 in T.grid(4, 4, 128):
with T.block("C"):
vi2_1, i, j = T.axis.remap("RSS", [i2_1, i0, i1])
T.reads(C_rf[i, j, vi2_1])
T.writes(C[i, j])
T.block_attr({"meta_schedule.random_compute_producer": 1})
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + C_rf[i, j, vi2_1]
@T.prim_func
def cpu_matmul_2(
A: T.Buffer((4, 512), "float32"),
B: T.Buffer((512, 4), "float32"),
C: T.Buffer((4, 4), "float32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C_rf = T.alloc_buffer([4, 4, 4], dtype="float32")
for i0, i1, i2_0, i2_1 in T.grid(4, 4, 4, 128):
with T.block("C_rf"):
vi2_0, i, j, vi2_1 = T.axis.remap("SSSR", [i2_0, i0, i1, i2_1])
T.reads(A[i, vi2_0 * 128 + vi2_1], B[vi2_0 * 128 + vi2_1, j])
T.writes(C_rf[i, j, vi2_0])
with T.init():
C_rf[i, j, vi2_0] = T.float32(0)
C_rf[i, j, vi2_0] = (
C_rf[i, j, vi2_0] + A[i, vi2_0 * 128 + vi2_1] * B[vi2_0 * 128 + vi2_1, j]
)
for i0, i1, i2_0 in T.grid(4, 4, 4):
with T.block("C"):
vi2_0, i, j = T.axis.remap("RSS", [i2_0, i0, i1])
T.reads(C_rf[i, j, vi2_0])
T.writes(C[i, j])
T.block_attr({"meta_schedule.random_compute_producer": 1})
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + C_rf[i, j, vi2_0]
decision_0 = [] # type: ignore
decision_1 = [
("SamplePerfectTile", [4, 128]),
]
decision_2 = [
("SamplePerfectTile", [4, 128]),
]
mod = create_prim_func(te_workload.matmul(n=4, m=4, k=512))
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm --num-cores=32"),
types=ms.schedule_rule.AddRFactor,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[cpu_matmul_0, cpu_matmul_1, cpu_matmul_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cpu_argmax():
@T.prim_func
def argmax(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer((128,), "int32"),
argmax_v1: T.Buffer((128,), "float32"),
) -> None:
for i0, i1 in T.grid(128, 128):
with T.block("argmax"):
i = T.axis.spatial(128, i0)
k = T.axis.reduce(128, i1)
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_0(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer(128, "int32"),
argmax_v1: T.Buffer(128, "float32"),
) -> None:
for i0, i1 in T.grid(128, 128):
with T.block("argmax"):
i, k = T.axis.remap("SR", [i0, i1])
T.reads(idx[i, k], val[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_1(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer(128, "int32"),
argmax_v1: T.Buffer(128, "float32"),
) -> None:
argmax_v0_rf = T.alloc_buffer([128, 16], dtype="int32")
argmax_v1_rf = T.alloc_buffer([128, 16], dtype="float32")
for i0, i1_0, i1_1 in T.grid(128, 8, 16):
with T.block("argmax_rf"):
vi1_1, i, vi1_0 = T.axis.remap("SSR", [i1_1, i0, i1_0])
T.reads(idx[i, vi1_0 * 16 + vi1_1], val[i, vi1_0 * 16 + vi1_1])
T.writes(argmax_v0_rf[i, vi1_1], argmax_v1_rf[i, vi1_1])
with T.init():
argmax_v0_rf[i, vi1_1] = -1
argmax_v1_rf[i, vi1_1] = T.float32(-3.4028234663852886e38)
v_argmax_v0_rf: T.int32 = T.Select(
argmax_v1_rf[i, vi1_1] >= val[i, vi1_0 * 16 + vi1_1],
argmax_v0_rf[i, vi1_1],
idx[i, vi1_0 * 16 + vi1_1],
)
v_argmax_v1_rf: T.float32 = T.Select(
argmax_v1_rf[i, vi1_1] >= val[i, vi1_0 * 16 + vi1_1],
argmax_v1_rf[i, vi1_1],
val[i, vi1_0 * 16 + vi1_1],
)
argmax_v0_rf[i, vi1_1] = v_argmax_v0_rf
argmax_v1_rf[i, vi1_1] = v_argmax_v1_rf
for i0, i1_1 in T.grid(128, 16):
with T.block("argmax"):
vi1_1, i = T.axis.remap("RS", [i1_1, i0])
T.reads(argmax_v0_rf[i, vi1_1], argmax_v1_rf[i, vi1_1])
T.writes(argmax_v0[i], argmax_v1[i])
T.block_attr({"meta_schedule.random_compute_producer": 1})
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_1], argmax_v0[i], argmax_v0_rf[i, vi1_1]
)
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_1], argmax_v1[i], argmax_v1_rf[i, vi1_1]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
@T.prim_func
def argmax_2(
idx: T.Buffer((128, 128), "int32"),
val: T.Buffer((128, 128), "float32"),
argmax_v0: T.Buffer(128, "int32"),
argmax_v1: T.Buffer(128, "float32"),
) -> None:
# body
# with T.block("root")
argmax_v0_rf = T.alloc_buffer([128, 8], dtype="int32")
argmax_v1_rf = T.alloc_buffer([128, 8], dtype="float32")
for i0, i1_0, i1_1 in T.grid(128, 8, 16):
with T.block("argmax_rf"):
vi1_0, i, vi1_1 = T.axis.remap("SSR", [i1_0, i0, i1_1])
T.reads(idx[i, vi1_0 * 16 + vi1_1], val[i, vi1_0 * 16 + vi1_1])
T.writes(argmax_v0_rf[i, vi1_0], argmax_v1_rf[i, vi1_0])
with T.init():
argmax_v0_rf[i, vi1_0] = -1
argmax_v1_rf[i, vi1_0] = T.float32(-3.4028234663852886e38)
v_argmax_v0_rf: T.int32 = T.Select(
argmax_v1_rf[i, vi1_0] >= val[i, vi1_0 * 16 + vi1_1],
argmax_v0_rf[i, vi1_0],
idx[i, vi1_0 * 16 + vi1_1],
)
v_argmax_v1_rf: T.float32 = T.Select(
argmax_v1_rf[i, vi1_0] >= val[i, vi1_0 * 16 + vi1_1],
argmax_v1_rf[i, vi1_0],
val[i, vi1_0 * 16 + vi1_1],
)
argmax_v0_rf[i, vi1_0] = v_argmax_v0_rf
argmax_v1_rf[i, vi1_0] = v_argmax_v1_rf
for i0, i1_0 in T.grid(128, 8):
with T.block("argmax"):
vi1_0, i = T.axis.remap("RS", [i1_0, i0])
T.reads(argmax_v0_rf[i, vi1_0], argmax_v1_rf[i, vi1_0])
T.writes(argmax_v0[i], argmax_v1[i])
T.block_attr({"meta_schedule.random_compute_producer": 1})
with T.init():
argmax_v0[i] = -1
argmax_v1[i] = T.float32(-3.4028234663852886e38)
v_argmax_v0: T.int32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_0], argmax_v0[i], argmax_v0_rf[i, vi1_0]
)
v_argmax_v1: T.float32 = T.Select(
argmax_v1[i] >= argmax_v1_rf[i, vi1_0], argmax_v1[i], argmax_v1_rf[i, vi1_0]
)
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
decision_0 = [] # type: ignore
decision_1 = [
("SamplePerfectTile", [8, 16]),
]
decision_2 = [
("SamplePerfectTile", [8, 16]),
]
mod = argmax
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm --num-cores=32"),
types=ms.schedule_rule.AddRFactor,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[argmax_0, argmax_1, argmax_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
if __name__ == "__main__":
test_cpu_matmul()
test_cpu_argmax()
| 12,043 | 40.388316 | 99 | py |
tvm | tvm-main/tests/python/unittest/test_te_schedule_lstm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_lstm_cell_inline():
num_step = 128
num_input = 256
num_hidden = 1152
batch_size = 4
# Global transition matrix
X = te.placeholder((num_step - 1, batch_size, num_input), name="X")
Wi2h = te.placeholder((4, num_hidden, num_input), name="Wi2h")
Wh2h = te.placeholder((4, num_hidden, num_hidden), name="Wh2h")
# h: output hidden state, c: cell state.
s_state_h = te.placeholder((num_step, batch_size, num_hidden))
s_state_c = te.placeholder((num_step, batch_size, num_hidden))
s_init_c = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_c")
s_init_h = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_h")
# LSTM transition
k = te.reduce_axis((0, num_input), name="ki2h")
s_i2h = te.compute(
(num_step, 4, batch_size, num_hidden),
lambda t, x, i, j: te.sum(X[t - 1, i, k] * Wi2h[x, j, k], axis=k),
name="s_i2h",
)
k = te.reduce_axis((0, num_hidden), name="ki2h")
s_h2h = te.compute(
(num_step, 4, batch_size, num_hidden),
lambda t, x, i, j: te.sum(s_state_h[t - 1, i, k] * Wh2h[x, j, k], axis=k),
name="s_h2h",
)
# Gate rules
gates = te.compute(s_i2h.shape, lambda *i: s_i2h(*i) + s_h2h(*i), name="gates")
gshape = (num_step, batch_size, num_hidden)
in_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, 0, i, j]), name="in_gate")
in_transform = te.compute(
gshape, lambda t, i, j: te.tanh(gates[t, 1, i, j]), name="in_transform"
)
forget_gate = te.compute(
gshape, lambda t, i, j: te.sigmoid(gates[t, 2, i, j]), name="forget_gate"
)
out_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, 3, i, j]), name="out_gate")
next_c = te.compute(
gshape,
lambda t, i, j: forget_gate[t, i, j] * s_state_c[t - 1, i, j]
+ in_gate[t, i, j] * in_transform[t, i, j],
name="next_c",
)
next_h = te.compute(
gshape, lambda t, i, j: out_gate[t, i, j] * te.tanh(next_c[t, i, j]), name="next_h"
)
update_c = te.compute(gshape, lambda *i: next_c(*i), name="update_c")
update_h = te.compute(gshape, lambda *i: next_h(*i), name="update_h")
# schedule
scan_h, scan_c = tvm.te.scan(
[s_init_h, s_init_c],
[update_h, update_c],
[s_state_h, s_state_c],
inputs=[X],
name="lstm_scan",
)
# schedule
s = te.create_schedule(scan_h.op)
# Inline gate computations
s[gates].compute_inline()
s[in_gate].compute_inline()
s[in_transform].compute_inline()
s[forget_gate].compute_inline()
s[out_gate].compute_inline()
# verify we can lower correctly
tvm.lower(s, [X, Wi2h, Wh2h, scan_h, scan_c])
if __name__ == "__main__":
test_lstm_cell_inline()
| 3,627 | 38.434783 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_trace_apply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.meta_schedule as ms
import tvm.testing
from tvm.script import tir as T
from tvm.target import Target
from tvm.target.codegen import llvm_lookup_intrinsic_id
from tvm.tir import Schedule, floordiv, floormod
from tvm.tir.tensor_intrin.cuda import *
from tvm.tir.tensor_intrin.x86 import VNNI_DOT_16x4_INTRIN as VNNI_INTRIN
# fmt: off
@tvm.script.ir_module
class Dense:
@T.prim_func
def main(
p0: T.Buffer((128, 128), "float32"),
p1: T.Buffer((128, 128), "float32"),
T_matmul_NT: T.Buffer((128, 128), "float32"),
) -> None:
# function attr dict
T.func_attr({"layout_free_buffers": [1], "tir.noalias": True, "global_symbol": "main"})
# body
# with T.block("root")
for i0, i1, i2 in T.grid(128, 128, 128):
with T.block("T_matmul_NT"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(p0[i, k], p1[j, k])
T.writes(T_matmul_NT[i, j])
T.block_attr({"layout_free_placeholders": []})
with T.init():
T_matmul_NT[i, j] = T.float32(0)
T_matmul_NT[i, j] = T_matmul_NT[i, j] + p0[i, k] * p1[j, k]
@tvm.script.ir_module
class DenseAdd:
@T.prim_func
def main(
p0: T.Buffer((128, 128), "float32"),
p1: T.Buffer((128, 128), "float32"),
T_add: T.Buffer((128, 128), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
# body
# with T.block("root")
T_matmul_NT = T.alloc_buffer([128, 128], dtype="float32")
compile_engine_const = T.alloc_buffer([], dtype="float32")
for i0, i1, i2 in T.grid(128, 128, 128):
with T.block("T_matmul_NT"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(p0[i, k], p1[j, k])
T.writes(T_matmul_NT[i, j])
T.block_attr({"layout_free_placeholders": []})
with T.init():
T_matmul_NT[i, j] = T.float32(0)
T_matmul_NT[i, j] = T_matmul_NT[i, j] + p0[i, k] * p1[j, k]
with T.block("compile_engine_const"):
vi = T.axis.spatial(1, 0)
T.reads()
T.writes(compile_engine_const[()])
compile_engine_const[()] = T.float32(1)
for i0, i1 in T.grid(128, 128):
with T.block("T_add"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(T_matmul_NT[ax0, ax1], compile_engine_const[()])
T.writes(T_add[ax0, ax1])
T_add[ax0, ax1] = T_matmul_NT[ax0, ax1] + compile_engine_const[()]
@tvm.script.ir_module
class DenseAdd_scheduled_cpu:
@T.prim_func
def main(
p0: T.Buffer((128, 128), "float32"),
p1: T.Buffer((128, 128), "float32"),
T_add: T.Buffer((128, 128), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
# body
# with T.block("root")
T_matmul_NT_global = T.alloc_buffer([128, 128], dtype="float32")
p1_global = T.alloc_buffer([2, 128, 64], dtype="float32")
for ax0, ax1 in T.grid(128, 128):
with T.block("p1_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(p1[v0, v1])
T.writes(p1_global[v0 // 64, v1, v0 % 64])
T.block_attr({"meta_schedule.layout_rewrite_preproc": 1})
p1_global[v0 // 64, v1, v0 % 64] = p1[v0, v1]
for i0_0_i1_0_fused_fused in T.parallel(4):
for i0_1, i1_1 in T.grid(8, 1):
for i0_2_init, i1_2_init, i0_3_init in T.grid(4, 1, 2):
for i1_3_fused_init in T.vectorized(64):
with T.block("T_matmul_NT_init"):
i = T.axis.spatial(
128,
i0_0_i1_0_fused_fused // 2 * 64
+ i0_1 * 8
+ i0_2_init * 2
+ i0_3_init,
)
j = T.axis.spatial(
128,
i0_0_i1_0_fused_fused % 2 * 64
+ i1_1 * 64
+ i1_2_init * 64
+ i1_3_fused_init,
)
T.reads()
T.writes(T_matmul_NT_global[i, j])
T.block_attr(
{
"layout_free_placeholders": [],
"meta_schedule.tiling_structure": "SSRSRS",
}
)
T_matmul_NT_global[i, j] = T.float32(0)
for i2_0, i0_2, i1_2, i2_1, i0_3 in T.grid(128, 4, 1, 1, 2):
for i1_3_fused in T.vectorized(64):
with T.block("T_matmul_NT_update"):
i = T.axis.spatial(
128, i0_0_i1_0_fused_fused // 2 * 64 + i0_1 * 8 + i0_2 * 2 + i0_3
)
j = T.axis.spatial(
128,
i0_0_i1_0_fused_fused % 2 * 64 + i1_1 * 64 + i1_2 * 64 + i1_3_fused,
)
k = T.axis.reduce(128, i2_0 + i2_1)
T.reads(
T_matmul_NT_global[i, j], p0[i, k], p1_global[j // 64, k, j % 64]
)
T.writes(T_matmul_NT_global[i, j])
T.block_attr(
{
"layout_free_placeholders": [],
"meta_schedule.tiling_structure": "SSRSRS",
}
)
T_matmul_NT_global[i, j] = (
T_matmul_NT_global[i, j] + p0[i, k] * p1_global[j // 64, k, j % 64]
)
for ax0 in T.serial(64):
for ax1_fused in T.vectorized(64):
with T.block("T_matmul_NT_global"):
v0 = T.axis.spatial(128, i0_0_i1_0_fused_fused // 2 * 64 + ax0)
v1 = T.axis.spatial(128, i0_0_i1_0_fused_fused % 2 * 64 + ax1_fused)
T.reads(T_matmul_NT_global[v0, v1])
T.writes(T_add[v0, v1])
T_add[v0, v1] = T_matmul_NT_global[v0, v1] + T.float32(1)
@tvm.script.ir_module
class DenseAdd_cpu_no_write_cache:
@T.prim_func
def main(p0: T.Buffer((128, 128), "float32"), p1: T.Buffer((128, 128), "float32"), T_add: T.Buffer((128, 128), "float32")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
# body
# with T.block("root")
T_matmul_NT = T.alloc_buffer([128, 128], dtype="float32")
p1_global = T.alloc_buffer([8, 4, 16, 32], dtype="float32")
for ax0, ax1 in T.grid(128, 128):
with T.block("p1_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(p1[v0, v1])
T.writes(p1_global[v1 // 16, v0 // 32, v1 % 16, v0 % 32])
T.block_attr({"meta_schedule.layout_rewrite_preproc":1})
p1_global[v1 // 16, v0 // 32, v1 % 16, v0 % 32] = p1[v0, v1]
for i0_0_i1_0_i0_1_i1_1_fused in T.parallel(16, annotations={"pragma_auto_unroll_max_step":16, "pragma_unroll_explicit":1}):
for i0_2_init, i1_2_init, i0_3_init in T.grid(4, 4, 2):
for i1_3_fused_init in T.vectorized(32):
with T.block("T_matmul_NT_init"):
i = T.axis.spatial(128, i0_0_i1_0_i0_1_i1_1_fused * 8 + i0_2_init * 2 + i0_3_init)
j = T.axis.spatial(128, i1_2_init * 32 + i1_3_fused_init)
T.reads()
T.writes(T_matmul_NT[i, j])
T.block_attr({"layout_free_placeholders":[], "meta_schedule.tiling_structure":"SSRSRS"})
T_matmul_NT[i, j] = T.float32(0)
for i2_0, i0_2, i1_2, i2_1, i0_3 in T.grid(8, 4, 4, 16, 2):
for i1_3_fused in T.vectorized(32):
with T.block("T_matmul_NT_update"):
i = T.axis.spatial(128, i0_0_i1_0_i0_1_i1_1_fused * 8 + i0_2 * 2 + i0_3)
j = T.axis.spatial(128, i1_2 * 32 + i1_3_fused)
k = T.axis.reduce(128, i2_0 * 16 + i2_1)
T.reads(T_matmul_NT[i, j], p0[i, k], p1_global[k // 16, j // 32, k % 16, j % 32])
T.writes(T_matmul_NT[i, j])
T.block_attr({"layout_free_placeholders":[], "meta_schedule.tiling_structure":"SSRSRS"})
T_matmul_NT[i, j] = T_matmul_NT[i, j] + p0[i, k] * p1_global[k // 16, j // 32, k % 16, j % 32]
for i0_i1_fused in T.parallel(16384):
with T.block("T_add"):
ax0 = T.axis.spatial(128, i0_i1_fused // 128)
ax1 = T.axis.spatial(128, i0_i1_fused % 128)
T.reads(T_matmul_NT[ax0, ax1])
T.writes(T_add[ax0, ax1])
T_add[ax0, ax1] = T_matmul_NT[ax0, ax1] + T.float32(1)
@tvm.script.ir_module
class DenseAdd_scheduled_gpu:
@T.prim_func
def main(
p0: T.Buffer((128, 128), "float32"),
p1: T.Buffer((128, 128), "float32"),
T_add: T.Buffer((128, 128), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
# body
# with T.block("root")
T_matmul_NT_local = T.alloc_buffer([128, 128], dtype="float32", scope="local")
p0_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
p1_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(
32,
thread="blockIdx.x",
annotations={"pragma_auto_unroll_max_step": 64, "pragma_unroll_explicit": 1},
):
for i0_1_i1_1_fused in T.thread_binding(1, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(128, thread="threadIdx.x"):
for i0_3_init, i1_3_init, i0_4_init, i1_4_init in T.grid(1, 4, 1, 1):
with T.block("T_matmul_NT_init"):
i = T.axis.spatial(
128,
i0_0_i1_0_fused // 4 * 16
+ i0_2_i1_2_fused // 8
+ i0_3_init
+ i0_4_init,
)
j = T.axis.spatial(
128,
i0_0_i1_0_fused % 4 * 32
+ i0_2_i1_2_fused % 8 * 4
+ i1_3_init
+ i1_4_init,
)
T.reads()
T.writes(T_matmul_NT_local[i, j])
T.block_attr(
{
"layout_free_placeholders": [],
"meta_schedule.thread_extent_high_inclusive": 256,
"meta_schedule.thread_extent_low_inclusive": 16,
"meta_schedule.tiling_structure": "SSSRRSRS",
}
)
T_matmul_NT_local[i, j] = T.float32(0)
for i2_0 in T.serial(32):
for ax0_ax1_fused_0 in T.serial(1):
for ax0_ax1_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
for ax0_ax1_fused_2 in T.vectorized(2):
with T.block("p0_shared"):
T.where(
(ax0_ax1_fused_0 * 128 + ax0_ax1_fused_1) * 2
+ ax0_ax1_fused_2
< 64
)
v0 = T.axis.spatial(
128,
i0_0_i1_0_fused // 4 * 16
+ (
ax0_ax1_fused_0 * 256
+ ax0_ax1_fused_1 * 2
+ ax0_ax1_fused_2
)
// 4,
)
v1 = T.axis.spatial(
128,
i2_0 * 4
+ (
ax0_ax1_fused_0 * 256
+ ax0_ax1_fused_1 * 2
+ ax0_ax1_fused_2
)
% 4,
)
T.reads(p0[v0, v1])
T.writes(p0_shared[v0, v1])
p0_shared[v0, v1] = p0[v0, v1]
for ax0_ax1_fused_0 in T.serial(1):
for ax0_ax1_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
for ax0_ax1_fused_2 in T.vectorized(4):
with T.block("p1_shared"):
T.where(
(ax0_ax1_fused_0 * 128 + ax0_ax1_fused_1) * 4
+ ax0_ax1_fused_2
< 128
)
v0 = T.axis.spatial(
128,
i0_0_i1_0_fused % 4 * 32
+ (
ax0_ax1_fused_0 * 512
+ ax0_ax1_fused_1 * 4
+ ax0_ax1_fused_2
)
// 4,
)
v1 = T.axis.spatial(
128,
i2_0 * 4
+ (
ax0_ax1_fused_0 * 512
+ ax0_ax1_fused_1 * 4
+ ax0_ax1_fused_2
)
% 4,
)
T.reads(p1[v0, v1])
T.writes(p1_shared[v0, v1])
p1_shared[v0, v1] = p1[v0, v1]
for i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in T.grid(1, 1, 4, 4, 1, 1):
with T.block("T_matmul_NT_update"):
i = T.axis.spatial(
128,
i0_0_i1_0_fused // 4 * 16 + i0_2_i1_2_fused // 8 + i0_3 + i0_4,
)
j = T.axis.spatial(
128,
i0_0_i1_0_fused % 4 * 32
+ i0_2_i1_2_fused % 8 * 4
+ i1_3
+ i1_4,
)
k = T.axis.reduce(128, i2_0 * 4 + i2_1 * 4 + i2_2)
T.reads(T_matmul_NT_local[i, j], p0_shared[i, k], p1_shared[j, k])
T.writes(T_matmul_NT_local[i, j])
T.block_attr(
{
"layout_free_placeholders": [],
"meta_schedule.thread_extent_high_inclusive": 256,
"meta_schedule.thread_extent_low_inclusive": 16,
"meta_schedule.tiling_structure": "SSSRRSRS",
}
)
T_matmul_NT_local[i, j] = (
T_matmul_NT_local[i, j] + p0_shared[i, k] * p1_shared[j, k]
)
for ax0, ax1 in T.grid(1, 4):
with T.block("T_matmul_NT_local"):
v0 = T.axis.spatial(
128, i0_0_i1_0_fused // 4 * 16 + i0_2_i1_2_fused // 8 + ax0
)
v1 = T.axis.spatial(
128, i0_0_i1_0_fused % 4 * 32 + i0_2_i1_2_fused % 8 * 4 + ax1
)
T.reads(T_matmul_NT_local[v0, v1])
T.writes(T_add[v0, v1])
T_add[v0, v1] = T_matmul_NT_local[v0, v1] + T.float32(1)
@tvm.script.ir_module
class Conv2dInt8:
@T.prim_func
def main(p0: T.Buffer((16, 56, 56, 64), "int8"), p1: T.Buffer((256, 1, 1, 64), "int8"), p2: T.Buffer((1, 1, 1, 256), "int32"), p3: T.Buffer((1, 1, 1, 256), "int32"), p4: T.Buffer((1, 1, 1, 256), "int64"), p5: T.Buffer((1, 1, 1, 256), "int64"), p6: T.Buffer((1, 1, 1, 256), "int64"), p7: T.Buffer((), "int32"), p8: T.Buffer(1, "int32"), compute: T.Buffer((16, 56, 56, 256), "int32")) -> None:
# function attr dict
T.func_attr({"tir.noalias": True, "global_symbol": "main"})
# body
# with T.block("root")
pad_temp = T.alloc_buffer([16, 56, 56, 64], dtype="int8")
conv2d_nhwc = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_subtract = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_cast = T.alloc_buffer([16, 56, 56, 256], dtype="int64")
T_multiply = T.alloc_buffer([16, 56, 56, 256], dtype="int64")
T_add_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int64")
T_right_shift = T.alloc_buffer([16, 56, 56, 256], dtype="int64")
T_cast_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add_2 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_cast_2 = T.alloc_buffer([16, 56, 56, 256], dtype="uint8")
T_cast_3 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_subtract_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 64):
with T.block("pad_temp"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(p0[i0_1, i1_1, i2_1, i3_1])
T.writes(pad_temp[i0_1, i1_1, i2_1, i3_1])
pad_temp[i0_1, i1_1, i2_1, i3_1] = p0[i0_1, i1_1, i2_1, i3_1]
for i0, i1, i2, i3, i4, i5, i6 in T.grid(16, 56, 56, 256, 1, 1, 64):
with T.block("conv2d_nhwc"):
nn, yy, xx, ff, ry, rx, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
T.reads(pad_temp[nn, yy + ry, xx + rx, rc], p1[ff, ry, rx, rc])
T.writes(conv2d_nhwc[nn, yy, xx, ff])
with T.init():
conv2d_nhwc[nn, yy, xx, ff] = 0
conv2d_nhwc[nn, yy, xx, ff] = conv2d_nhwc[nn, yy, xx, ff] + T.cast(pad_temp[nn, yy + ry, xx + rx, rc], "int32") * T.cast(p1[ff, ry, rx, rc], "int32")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_subtract"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(conv2d_nhwc[ax0, ax1, ax2, ax3], p2[0, 0, 0, ax3])
T.writes(T_subtract[ax0, ax1, ax2, ax3])
T_subtract[ax0, ax1, ax2, ax3] = conv2d_nhwc[ax0, ax1, ax2, ax3] - p2[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_add"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_subtract[ax0, ax1, ax2, ax3], p3[0, 0, 0, ax3])
T.writes(T_add[ax0, ax1, ax2, ax3])
T_add[ax0, ax1, ax2, ax3] = T_subtract[ax0, ax1, ax2, ax3] + p3[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_cast"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add[ax0, ax1, ax2, ax3])
T.writes(T_cast[ax0, ax1, ax2, ax3])
T_cast[ax0, ax1, ax2, ax3] = T.cast(T_add[ax0, ax1, ax2, ax3], "int64")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_multiply"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_cast[ax0, ax1, ax2, ax3], p4[0, 0, 0, ax3])
T.writes(T_multiply[ax0, ax1, ax2, ax3])
T_multiply[ax0, ax1, ax2, ax3] = T_cast[ax0, ax1, ax2, ax3] * p4[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_add_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_multiply[ax0, ax1, ax2, ax3], p5[0, 0, 0, ax3])
T.writes(T_add_1[ax0, ax1, ax2, ax3])
T_add_1[ax0, ax1, ax2, ax3] = T_multiply[ax0, ax1, ax2, ax3] + p5[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_right_shift"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add_1[ax0, ax1, ax2, ax3], p6[0, 0, 0, ax3])
T.writes(T_right_shift[ax0, ax1, ax2, ax3])
T_right_shift[ax0, ax1, ax2, ax3] = T.shift_right(T_add_1[ax0, ax1, ax2, ax3], p6[0, 0, 0, ax3], dtype="int64")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_cast_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_right_shift[ax0, ax1, ax2, ax3])
T.writes(T_cast_1[ax0, ax1, ax2, ax3])
T_cast_1[ax0, ax1, ax2, ax3] = T.cast(T_right_shift[ax0, ax1, ax2, ax3], "int32")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_add_2"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(p7[()], T_cast_1[ax0, ax1, ax2, ax3])
T.writes(T_add_2[ax0, ax1, ax2, ax3])
T_add_2[ax0, ax1, ax2, ax3] = p7[()] + T_cast_1[ax0, ax1, ax2, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("compute"):
i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add_2[i0_2, i1_2, i2_2, i3_2])
T.writes(compute_1[i0_2, i1_2, i2_2, i3_2])
compute_1[i0_2, i1_2, i2_2, i3_2] = T.max(T.min(T_add_2[i0_2, i1_2, i2_2, i3_2], 255), 0)
for i0_3, i1_3, i2_3, i3_3 in T.grid(16, 56, 56, 256):
with T.block("T_cast_2"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_3, i1_3, i2_3, i3_3])
T.reads(compute_1[ax0, ax1, ax2, ax3])
T.writes(T_cast_2[ax0, ax1, ax2, ax3])
T_cast_2[ax0, ax1, ax2, ax3] = T.cast(compute_1[ax0, ax1, ax2, ax3], "uint8")
for i0_4, i1_4, i2_4, i3_4 in T.grid(16, 56, 56, 256):
with T.block("T_cast_3"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_4, i1_4, i2_4, i3_4])
T.reads(T_cast_2[ax0, ax1, ax2, ax3])
T.writes(T_cast_3[ax0, ax1, ax2, ax3])
T_cast_3[ax0, ax1, ax2, ax3] = T.cast(T_cast_2[ax0, ax1, ax2, ax3], "int32")
for i0_5, i1_5, i2_5, i3_5 in T.grid(16, 56, 56, 256):
with T.block("T_subtract_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_5, i1_5, i2_5, i3_5])
T.reads(T_cast_3[ax0, ax1, ax2, ax3], p8[0])
T.writes(T_subtract_1[ax0, ax1, ax2, ax3])
T_subtract_1[ax0, ax1, ax2, ax3] = T_cast_3[ax0, ax1, ax2, ax3] - p8[0]
for i0_6, i1_6, i2_6, i3_6 in T.grid(16, 56, 56, 256):
with T.block("compute_1"):
i0_7, i1_7, i2_7, i3_7 = T.axis.remap("SSSS", [i0_6, i1_6, i2_6, i3_6])
T.reads(T_subtract_1[i0_7, i1_7, i2_7, i3_7])
T.writes(compute[i0_7, i1_7, i2_7, i3_7])
compute[i0_7, i1_7, i2_7, i3_7] = T.q_multiply_shift(T_subtract_1[i0_7, i1_7, i2_7, i3_7], 1963325822, 31, 1, dtype="int32")
@tvm.script.ir_module
class Conv2dInt8_target:
@T.prim_func
def main(p0: T.Buffer((16, 56, 56, 64), "int8"), p1: T.Buffer((256, 1, 1, 64), "int8"), p2: T.Buffer((1, 1, 1, 256), "int32"), p3: T.Buffer((1, 1, 1, 256), "int32"), p4: T.Buffer((1, 1, 1, 256), "int64"), p5: T.Buffer((1, 1, 1, 256), "int64"), p6: T.Buffer((1, 1, 1, 256), "int64"), p7: T.Buffer((), "int32"), p8: T.Buffer(1, "int32"), p9: T.Buffer((16, 56, 56, 256), "int32"), compute: T.Buffer((16, 56, 56, 256), "uint8")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
pad_temp = T.alloc_buffer([16, 56, 56, 64], dtype="int8")
conv2d_nhwc = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_subtract = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_cast = T.alloc_buffer([16, 56, 56, 256], dtype="int64")
T_multiply = T.alloc_buffer([16, 56, 56, 256], dtype="int64")
T_add_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int64")
T_right_shift = T.alloc_buffer([16, 56, 56, 256], dtype="int64")
T_cast_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add_2 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_cast_2 = T.alloc_buffer([16, 56, 56, 256], dtype="uint8")
T_cast_3 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_subtract_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_2 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add_3 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_3 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_cast_4 = T.alloc_buffer([16, 56, 56, 256], dtype="uint8")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 64):
with T.block("pad_temp"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(p0[i0_1, i1_1, i2_1, i3_1])
T.writes(pad_temp[i0_1, i1_1, i2_1, i3_1])
pad_temp[i0_1, i1_1, i2_1, i3_1] = p0[i0_1, i1_1, i2_1, i3_1]
for i0, i1, i2, i3, i4, i5, i6 in T.grid(16, 56, 56, 256, 1, 1, 64):
with T.block("conv2d_nhwc"):
nn, yy, xx, ff, ry, rx, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
T.reads(pad_temp[nn, yy + ry, xx + rx, rc], p1[ff, ry, rx, rc])
T.writes(conv2d_nhwc[nn, yy, xx, ff])
with T.init():
conv2d_nhwc[nn, yy, xx, ff] = 0
conv2d_nhwc[nn, yy, xx, ff] = conv2d_nhwc[nn, yy, xx, ff] + T.cast(pad_temp[nn, yy + ry, xx + rx, rc], "int32") * T.cast(p1[ff, ry, rx, rc], "int32")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_subtract"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(conv2d_nhwc[ax0, ax1, ax2, ax3], p2[0, 0, 0, ax3])
T.writes(T_subtract[ax0, ax1, ax2, ax3])
T_subtract[ax0, ax1, ax2, ax3] = conv2d_nhwc[ax0, ax1, ax2, ax3] - p2[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_add"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_subtract[ax0, ax1, ax2, ax3], p3[0, 0, 0, ax3])
T.writes(T_add[ax0, ax1, ax2, ax3])
T_add[ax0, ax1, ax2, ax3] = T_subtract[ax0, ax1, ax2, ax3] + p3[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_cast"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add[ax0, ax1, ax2, ax3])
T.writes(T_cast[ax0, ax1, ax2, ax3])
T_cast[ax0, ax1, ax2, ax3] = T.cast(T_add[ax0, ax1, ax2, ax3], "int64")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_multiply"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_cast[ax0, ax1, ax2, ax3], p4[0, 0, 0, ax3])
T.writes(T_multiply[ax0, ax1, ax2, ax3])
T_multiply[ax0, ax1, ax2, ax3] = T_cast[ax0, ax1, ax2, ax3] * p4[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_add_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_multiply[ax0, ax1, ax2, ax3], p5[0, 0, 0, ax3])
T.writes(T_add_1[ax0, ax1, ax2, ax3])
T_add_1[ax0, ax1, ax2, ax3] = T_multiply[ax0, ax1, ax2, ax3] + p5[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_right_shift"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add_1[ax0, ax1, ax2, ax3], p6[0, 0, 0, ax3])
T.writes(T_right_shift[ax0, ax1, ax2, ax3])
T_right_shift[ax0, ax1, ax2, ax3] = T.shift_right(T_add_1[ax0, ax1, ax2, ax3], p6[0, 0, 0, ax3], dtype="int64")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_cast_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_right_shift[ax0, ax1, ax2, ax3])
T.writes(T_cast_1[ax0, ax1, ax2, ax3])
T_cast_1[ax0, ax1, ax2, ax3] = T.cast(T_right_shift[ax0, ax1, ax2, ax3], "int32")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_add_2"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(p7[()], T_cast_1[ax0, ax1, ax2, ax3])
T.writes(T_add_2[ax0, ax1, ax2, ax3])
T_add_2[ax0, ax1, ax2, ax3] = p7[()] + T_cast_1[ax0, ax1, ax2, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("compute"):
i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add_2[i0_2, i1_2, i2_2, i3_2])
T.writes(compute_1[i0_2, i1_2, i2_2, i3_2])
compute_1[i0_2, i1_2, i2_2, i3_2] = T.max(T.min(T_add_2[i0_2, i1_2, i2_2, i3_2], 255), 0)
for i0_3, i1_3, i2_3, i3_3 in T.grid(16, 56, 56, 256):
with T.block("T_cast_2"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_3, i1_3, i2_3, i3_3])
T.reads(compute_1[ax0, ax1, ax2, ax3])
T.writes(T_cast_2[ax0, ax1, ax2, ax3])
T_cast_2[ax0, ax1, ax2, ax3] = T.cast(compute_1[ax0, ax1, ax2, ax3], "uint8")
for i0_4, i1_4, i2_4, i3_4 in T.grid(16, 56, 56, 256):
with T.block("T_cast_3"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_4, i1_4, i2_4, i3_4])
T.reads(T_cast_2[ax0, ax1, ax2, ax3])
T.writes(T_cast_3[ax0, ax1, ax2, ax3])
T_cast_3[ax0, ax1, ax2, ax3] = T.cast(T_cast_2[ax0, ax1, ax2, ax3], "int32")
for i0_5, i1_5, i2_5, i3_5 in T.grid(16, 56, 56, 256):
with T.block("T_subtract_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_5, i1_5, i2_5, i3_5])
T.reads(T_cast_3[ax0, ax1, ax2, ax3], p8[0])
T.writes(T_subtract_1[ax0, ax1, ax2, ax3])
T_subtract_1[ax0, ax1, ax2, ax3] = T_cast_3[ax0, ax1, ax2, ax3] - p8[0]
for i0_6, i1_6, i2_6, i3_6 in T.grid(16, 56, 56, 256):
with T.block("compute_1"):
i0_7, i1_7, i2_7, i3_7 = T.axis.remap("SSSS", [i0_6, i1_6, i2_6, i3_6])
T.reads(T_subtract_1[i0_7, i1_7, i2_7, i3_7])
T.writes(compute_2[i0_7, i1_7, i2_7, i3_7])
compute_2[i0_7, i1_7, i2_7, i3_7] = T.q_multiply_shift(T_subtract_1[i0_7, i1_7, i2_7, i3_7], 1098990753, 31, 1, dtype="int32")
for i0_8, i1_8, i2_8, i3_8 in T.grid(16, 56, 56, 256):
with T.block("T_add_3"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_8, i1_8, i2_8, i3_8])
T.reads(compute_2[ax0, ax1, ax2, ax3], p9[ax0, ax1, ax2, ax3])
T.writes(T_add_3[ax0, ax1, ax2, ax3])
T_add_3[ax0, ax1, ax2, ax3] = compute_2[ax0, ax1, ax2, ax3] + p9[ax0, ax1, ax2, ax3]
for i0_9, i1_9, i2_9, i3_9 in T.grid(16, 56, 56, 256):
with T.block("compute_2"):
i0_10, i1_10, i2_10, i3_10 = T.axis.remap("SSSS", [i0_9, i1_9, i2_9, i3_9])
T.reads(T_add_3[i0_10, i1_10, i2_10, i3_10])
T.writes(compute_3[i0_10, i1_10, i2_10, i3_10])
compute_3[i0_10, i1_10, i2_10, i3_10] = T.max(T.min(T_add_3[i0_10, i1_10, i2_10, i3_10], 255), 0)
for i0_11, i1_11, i2_11, i3_11 in T.grid(16, 56, 56, 256):
with T.block("T_cast_4"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_11, i1_11, i2_11, i3_11])
T.reads(compute_3[ax0, ax1, ax2, ax3])
T.writes(T_cast_4[ax0, ax1, ax2, ax3])
T_cast_4[ax0, ax1, ax2, ax3] = T.cast(compute_3[ax0, ax1, ax2, ax3], "uint8")
for i0_12, i1_12, i2_12, i3_12 in T.grid(16, 56, 56, 256):
with T.block("compute_3"):
i0_13, i1_13, i2_13, i3_13 = T.axis.remap("SSSS", [i0_12, i1_12, i2_12, i3_12])
T.reads(T_cast_4[i0_13, i1_13, i2_13, i3_13])
T.writes(compute[i0_13, i1_13, i2_13, i3_13])
compute[i0_13, i1_13, i2_13, i3_13] = T.max(T.min(T_cast_4[i0_13, i1_13, i2_13, i3_13], T.uint8(255)), T.uint8(0))
@tvm.script.ir_module
class Conv2dInt8_tensorcore_scheduled:
@T.prim_func
def main(p0: T.Buffer((16, 56, 56, 64), "int8"), p1: T.Buffer((256, 1, 1, 64), "int8"), p2: T.Buffer((1, 1, 1, 256), "int32"), p3: T.Buffer((1, 1, 1, 256), "int32"), p4: T.Buffer((1, 1, 1, 256), "int64"), p5: T.Buffer((1, 1, 1, 256), "int64"), p6: T.Buffer((1, 1, 1, 256), "int64"), p7: T.Buffer((), "int32"), p8: T.Buffer(1, "int32"), p9: T.Buffer((16, 56, 56, 256), "int32"), compute: T.Buffer((16, 56, 56, 256), "uint8")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A_s0 = T.int32()
A_s0_1 = T.int32()
A_s0_2 = T.int32()
A_s0_3 = T.int32()
A_s1 = T.int32()
A_s1_1 = T.int32()
A_s1_2 = T.int32()
A_s1_3 = T.int32()
B_s0 = T.int32()
B_s1 = T.int32()
C_s0 = T.int32()
C_s0_1 = T.int32()
C_s0_2 = T.int32()
C_s0_3 = T.int32()
C_s0_4 = T.int32()
C_s1 = T.int32()
C_s1_1 = T.int32()
C_s1_2 = T.int32()
C_s1_3 = T.int32()
C_s1_4 = T.int32()
# body
# with T.block("root")
conv2d_nhwc_reindex_shared = T.alloc_buffer([50176, 256], dtype="int32", scope="shared")
conv2d_nhwc_reindex_shared_wmma_accumulator = T.alloc_buffer([50176, 256], dtype="int32", scope="wmma.accumulator")
pad_temp_reindex_shared = T.alloc_buffer([50176, 64], dtype="int8", scope="shared")
p1_reindex_shared = T.alloc_buffer([1, 1, 256, 64], dtype="int8", scope="shared")
pad_temp_reindex_shared_wmma_matrix_a = T.alloc_buffer([50176, 64], dtype="int8", scope="wmma.matrix_a")
p1_reindex_shared_wmma_matrix_b = T.alloc_buffer([1, 1, 256, 64], dtype="int8", scope="wmma.matrix_b")
for ax2_0_0_ax3_0_0_fused in T.thread_binding(3136, thread="blockIdx.x", annotations={"pragma_auto_unroll_max_step":512, "pragma_unroll_explicit":1}):
for ax2_0_1_ax3_0_1_fused in T.thread_binding(1, thread="vthread.x"):
for ax2_0_2_ax3_0_2_fused in T.thread_binding(16, thread="threadIdx.x"):
for ax2_0_3_init, ax3_0_3_init, ax2_0_4_init, ax3_0_4_init in T.grid(1, 1, 1, 1):
with T.block("conv2d_nhwc_o_init"):
v2_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 8 * 8 + ax2_0_2_ax3_0_2_fused // 2 + ax2_0_3_init + ax2_0_4_init)
v3_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 8 * 2 + ax2_0_2_ax3_0_2_fused % 2 + ax3_0_3_init + ax3_0_4_init)
T.reads()
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.block_attr({"meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "warp_execution":1})
C = T.match_buffer(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16], [16, 16], dtype="int32", strides=[C_s0, C_s1], scope="wmma.accumulator", offset_factor=16)
T.tvm_fill_fragment(C.data, 16, 16, 16, C.elem_offset // C_s0 // 16 * (C_s0 // 16) + C.elem_offset % C_s0 // 16, T.float32(0), dtype="handle")
for ax0_0, ax1_0, ax4_0_0 in T.grid(1, 1, 2):
for ax0_ax1_fused_0 in T.serial(16):
for ax0_ax1_fused_1 in T.thread_binding(16, thread="threadIdx.x"):
for ax0_ax1_fused_2 in T.vectorized(16):
with T.block("pad_temp_reindex_shared"):
v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused // 8 * 128 + (ax0_ax1_fused_0 * 256 + ax0_ax1_fused_1 * 16 + ax0_ax1_fused_2) // 32)
v1 = T.axis.spatial(64, ax4_0_0 * 32 + (ax0_ax1_fused_0 * 256 + ax0_ax1_fused_1 * 16 + ax0_ax1_fused_2) % 32)
T.reads(p0[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1])
T.writes(pad_temp_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 16]]})
pad_temp_reindex_shared[v0, v1] = p0[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1]
for ax0_ax1_ax2_ax3_fused_0 in T.serial(8):
for ax0_ax1_ax2_ax3_fused_1 in T.thread_binding(16, thread="threadIdx.x"):
for ax0_ax1_ax2_ax3_fused_2 in T.vectorized(8):
with T.block("p1_reindex_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(1, 0)
v2 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 8 * 32 + (ax0_ax1_ax2_ax3_fused_0 * 128 + ax0_ax1_ax2_ax3_fused_1 * 8 + ax0_ax1_ax2_ax3_fused_2) // 32)
v3 = T.axis.spatial(64, ax4_0_0 * 32 + (ax0_ax1_ax2_ax3_fused_0 * 128 + ax0_ax1_ax2_ax3_fused_1 * 8 + ax0_ax1_ax2_ax3_fused_2) % 32)
T.reads(p1[v2, v0, v1, v3])
T.writes(p1_reindex_shared[v0, v1, v2, v3])
T.block_attr({"buffer_dim_align":[[0, 2, 32, 16]]})
p1_reindex_shared[v0, v1, v2, v3] = p1[v2, v0, v1, v3]
for ax0_1, ax1_1, ax4_0_1 in T.grid(1, 1, 1):
for ax0_0_1, ax1_0_1 in T.grid(1, 2):
with T.block("pad_temp_reindex_shared_wmma.matrix_a_o"):
v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 8 * 8 + ax2_0_2_ax3_0_2_fused // 2 + ax0_0_1)
v1_o = T.axis.spatial(4, ax4_0_0 * 2 + ax1_0_1)
T.reads(pad_temp_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
A = T.match_buffer(pad_temp_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16], [16, 16], dtype="int8", strides=[A_s0, A_s1], scope="shared", offset_factor=16)
C_1 = T.match_buffer(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16], [16, 16], dtype="int8", strides=[C_s0_1, C_s1_1], scope="wmma.matrix_a", offset_factor=16)
T.tvm_load_matrix_sync(C_1.data, 16, 16, 16, C_1.elem_offset // C_s0_1 // 16 * (C_s0_1 // 16) + C_1.elem_offset % C_s0_1 // 16, T.tvm_access_ptr(T.type_annotation(dtype="int8"), A.data, A.elem_offset, A_s0 * 16, 1, dtype="handle"), A_s0, "row_major", dtype="handle")
for ax0, ax1, ax2_0, ax3_0 in T.grid(1, 1, 1, 2):
with T.block("p1_reindex_shared_wmma.matrix_b_o"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
v2_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 8 * 2 + ax2_0_2_ax3_0_2_fused % 2 + ax2_0)
v3_o = T.axis.spatial(4, ax4_0_0 * 2 + ax3_0)
T.reads(p1_reindex_shared[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.writes(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
A_1 = T.match_buffer(p1_reindex_shared[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16], [16, 16], dtype="int8", strides=[A_s0_1, A_s1_1], scope="shared", offset_factor=16)
C_2 = T.match_buffer(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16], [16, 16], dtype="int8", strides=[C_s0_2, C_s1_2], scope="wmma.matrix_b", offset_factor=16)
T.tvm_load_matrix_sync(C_2.data, 16, 16, 16, C_2.elem_offset // C_s0_2 // 16 * (C_s0_2 // 16) + C_2.elem_offset % C_s0_2 // 16, T.tvm_access_ptr(T.type_annotation(dtype="int8"), A_1.data, A_1.elem_offset, A_s0_1 * 16, 1, dtype="handle"), A_s0_1, "col_major", dtype="handle")
for ax2_0_3, ax3_0_3, ax0_2, ax1_2, ax4_0_2, ax2_0_4, ax3_0_4 in T.grid(1, 1, 1, 1, 2, 1, 1):
with T.block("conv2d_nhwc_o_update"):
v0 = T.axis.reduce(1, ax0_0 + ax0_1 + ax0_2)
v1 = T.axis.reduce(1, ax1_0 + ax1_1 + ax1_2)
v2_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 8 * 8 + ax2_0_2_ax3_0_2_fused // 2 + ax2_0_3 + ax2_0_4)
v3_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 8 * 2 + ax2_0_2_ax3_0_2_fused % 2 + ax3_0_3 + ax3_0_4)
v4_o = T.axis.reduce(4, ax4_0_0 * 2 + ax4_0_1 * 2 + ax4_0_2)
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16], pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 : v2_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16], p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 : v3_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.block_attr({"meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "warp_execution":1})
A_2 = T.match_buffer(pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 : v2_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16], [16, 16], dtype="int8", strides=[A_s0_2, A_s1_2], scope="wmma.matrix_a", offset_factor=16)
B = T.match_buffer(p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 : v3_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16], [16, 16], dtype="int8", strides=[B_s0, B_s1], scope="wmma.matrix_b", offset_factor=16)
C_3 = T.match_buffer(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16], [16, 16], dtype="int32", strides=[C_s0_3, C_s1_3], scope="wmma.accumulator", offset_factor=16)
T.tvm_mma_sync(C_3.data, C_3.elem_offset // C_s0_3 // 16 * (C_s0_3 // 16) + C_3.elem_offset % C_s0_3 // 16, A_2.data, A_2.elem_offset // A_s0_2 // 16 * (A_s0_2 // 16) + A_2.elem_offset % A_s0_2 // 16, B.data, B.elem_offset // B_s0 // 16 * (B_s0 // 16) + B.elem_offset % B_s0 // 16, C_3.data, C_3.elem_offset // C_s0_3 // 16 * (C_s0_3 // 16) + C_3.elem_offset % C_s0_3 // 16, dtype="handle")
for ax0_0, ax1_0 in T.grid(1, 1):
with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator_o"):
v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 8 * 8 + ax2_0_2_ax3_0_2_fused // 2 + ax0_0)
v1_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 8 * 2 + ax2_0_2_ax3_0_2_fused % 2 + ax1_0)
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
A_3 = T.match_buffer(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16], [16, 16], dtype="int32", strides=[A_s0_3, A_s1_3], scope="wmma.accumulator", offset_factor=16)
C_4 = T.match_buffer(conv2d_nhwc_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16], [16, 16], dtype="int32", strides=[C_s0_4, C_s1_4], scope="shared", offset_factor=16)
T.tvm_store_matrix_sync(A_3.data, 16, 16, 16, A_3.elem_offset // A_s0_3 // 16 * (A_s0_3 // 16) + A_3.elem_offset % A_s0_3 // 16, T.tvm_access_ptr(T.type_annotation(dtype="int32"), C_4.data, C_4.elem_offset, C_s0_4 * 16, 2, dtype="handle"), C_s0_4, "row_major", dtype="handle")
for ax0, ax1_0 in T.grid(128, 2):
for ax1_1 in T.thread_binding(16, thread="threadIdx.x"):
with T.block("conv2d_nhwc_reindex_shared"):
v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused // 8 * 128 + ax0)
v1 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 8 * 32 + ax1_0 * 16 + ax1_1)
T.reads(p7[()], conv2d_nhwc_reindex_shared[v0, v1], p2[0, 0, 0, v1], p3[0, 0, 0, v1], p4[0, 0, 0, v1], p5[0, 0, 0, v1], p6[0, 0, 0, v1], p8[0], p9[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1])
T.writes(compute[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1])
compute[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1] = T.max(T.min(T.cast(T.max(T.min(T.q_multiply_shift(T.cast(T.cast(T.max(T.min(p7[()] + T.cast(T.shift_right(T.cast(conv2d_nhwc_reindex_shared[v0, v1] - p2[0, 0, 0, v1] + p3[0, 0, 0, v1], "int64") * p4[0, 0, 0, v1] + p5[0, 0, 0, v1], p6[0, 0, 0, v1], dtype="int64"), "int32"), 255), 0), "uint8"), "int32") - p8[0], 1098990753, 31, 1, dtype="int32") + p9[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1], 255), 0), "uint8"), T.uint8(255)), T.uint8(0))
@tvm.script.ir_module
class Conv2dInt8_NCHWc:
@T.prim_func
def main(p0: T.Buffer((1, 32, 7, 7, 16), "uint8"), p1: T.Buffer((128, 32, 1, 1, 4, 16, 4), "int8"), p2: T.Buffer((1, 128, 1, 1, 16), "int32"), p3: T.Buffer((1, 128, 1, 1, 16), "float32"), p4: T.Buffer(1, "float32"), p5: T.Buffer((1, 128, 7, 7, 16), "int32"), compute: T.Buffer((1, 128, 7, 7, 16), "uint8")) -> None:
# function attr dict
T.func_attr({"tir.noalias": True, "global_symbol": "main"})
# body
# with T.block("root")
compile_engine_const = T.alloc_buffer([], dtype="float32")
conv2d_NCHWc_int8 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
T_add = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
T_cast = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_multiply = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
compile_engine_const_1 = T.alloc_buffer([], dtype="float32")
T_add_1 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_floor = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_cast_1 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
compute_1 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
T_cast_2 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="uint8")
T_cast_3 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_subtract = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_multiply_1 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
compile_engine_const_2 = T.alloc_buffer([], dtype="float32")
T_add_2 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_floor_1 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_cast_4 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
T_add_3 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
compute_2 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
T_cast_5 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="uint8")
with T.block("compile_engine_const"):
vi = T.axis.spatial(1, 0)
T.reads()
T.writes(compile_engine_const[()])
compile_engine_const[()] = T.float32(0.94537687301635742)
for i0, i1, i2, i3, i4, i5, i6, i7, i8, i9 in T.grid(1, 128, 7, 7, 16, 1, 1, 32, 4, 4):
with T.block("conv2d_NCHWc_int8"):
n, oc_chunk, oh, ow, oc_block, kh, kw, ic_outer, ic_f_inner, ic_s_inner = T.axis.remap("SSSSSRRRRR", [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9])
T.reads(p0[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], p1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner])
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
T.block_attr({"schedule_rule":"meta_schedule.conv2d_NCHWc_int8", "workload":["conv2d_NCHWc_int8.x86", ["TENSOR", [1, 32, 7, 7, 16], "uint8"], ["TENSOR", [128, 32, 1, 1, 4, 16, 4], "int8"], [1, 1], [0, 0, 0, 0], [1, 1], "NCHW16c", "NCHW16c", "int32"]})
with T.init():
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] + T.cast(p0[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], "int32") * T.cast(p1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner], "int32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_add"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(conv2d_NCHWc_int8[ax0, ax1, ax2, ax3, ax4], p2[ax0, ax1, 0, 0, ax4])
T.writes(T_add[ax0, ax1, ax2, ax3, ax4])
T_add[ax0, ax1, ax2, ax3, ax4] = conv2d_NCHWc_int8[ax0, ax1, ax2, ax3, ax4] + p2[ax0, ax1, 0, 0, ax4]
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_add[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast[ax0, ax1, ax2, ax3, ax4])
T_cast[ax0, ax1, ax2, ax3, ax4] = T.cast(T_add[ax0, ax1, ax2, ax3, ax4], "float32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_multiply"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_cast[ax0, ax1, ax2, ax3, ax4], p3[ax0, ax1, 0, 0, ax4])
T.writes(T_multiply[ax0, ax1, ax2, ax3, ax4])
T_multiply[ax0, ax1, ax2, ax3, ax4] = T_cast[ax0, ax1, ax2, ax3, ax4] * p3[ax0, ax1, 0, 0, ax4]
with T.block("compile_engine_const_1"):
vi = T.axis.spatial(1, 0)
T.reads()
T.writes(compile_engine_const_1[()])
compile_engine_const_1[()] = T.float32(54.5)
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_add_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_multiply[ax0, ax1, ax2, ax3, ax4], compile_engine_const_1[()])
T.writes(T_add_1[ax0, ax1, ax2, ax3, ax4])
T_add_1[ax0, ax1, ax2, ax3, ax4] = T_multiply[ax0, ax1, ax2, ax3, ax4] + compile_engine_const_1[()]
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_floor"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_add_1[ax0, ax1, ax2, ax3, ax4])
T.writes(T_floor[ax0, ax1, ax2, ax3, ax4])
T_floor[ax0, ax1, ax2, ax3, ax4] = T.floor(T_add_1[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_floor[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast_1[ax0, ax1, ax2, ax3, ax4])
T_cast_1[ax0, ax1, ax2, ax3, ax4] = T.cast(T_floor[ax0, ax1, ax2, ax3, ax4], "int32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("compute"):
i0_1, i1_1, i2_1, i3_1, i4_1 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_cast_1[i0_1, i1_1, i2_1, i3_1, i4_1])
T.writes(compute_1[i0_1, i1_1, i2_1, i3_1, i4_1])
compute_1[i0_1, i1_1, i2_1, i3_1, i4_1] = T.max(T.min(T_cast_1[i0_1, i1_1, i2_1, i3_1, i4_1], 255), 0)
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast_2"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(compute_1[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast_2[ax0, ax1, ax2, ax3, ax4])
T_cast_2[ax0, ax1, ax2, ax3, ax4] = T.cast(compute_1[ax0, ax1, ax2, ax3, ax4], "uint8")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast_3"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_cast_2[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast_3[ax0, ax1, ax2, ax3, ax4])
T_cast_3[ax0, ax1, ax2, ax3, ax4] = T.cast(T_cast_2[ax0, ax1, ax2, ax3, ax4], "float32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_subtract"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_cast_3[ax0, ax1, ax2, ax3, ax4], p4[0])
T.writes(T_subtract[ax0, ax1, ax2, ax3, ax4])
T_subtract[ax0, ax1, ax2, ax3, ax4] = T_cast_3[ax0, ax1, ax2, ax3, ax4] - p4[0]
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_multiply_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(compile_engine_const[()], T_subtract[ax0, ax1, ax2, ax3, ax4])
T.writes(T_multiply_1[ax0, ax1, ax2, ax3, ax4])
T_multiply_1[ax0, ax1, ax2, ax3, ax4] = compile_engine_const[()] * T_subtract[ax0, ax1, ax2, ax3, ax4]
with T.block("compile_engine_const_2"):
vi = T.axis.spatial(1, 0)
T.reads()
T.writes(compile_engine_const_2[()])
compile_engine_const_2[()] = T.float32(0.5)
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_add_2"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_multiply_1[ax0, ax1, ax2, ax3, ax4], compile_engine_const_2[()])
T.writes(T_add_2[ax0, ax1, ax2, ax3, ax4])
T_add_2[ax0, ax1, ax2, ax3, ax4] = T_multiply_1[ax0, ax1, ax2, ax3, ax4] + compile_engine_const_2[()]
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_floor_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_add_2[ax0, ax1, ax2, ax3, ax4])
T.writes(T_floor_1[ax0, ax1, ax2, ax3, ax4])
T_floor_1[ax0, ax1, ax2, ax3, ax4] = T.floor(T_add_2[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast_4"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_floor_1[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast_4[ax0, ax1, ax2, ax3, ax4])
T_cast_4[ax0, ax1, ax2, ax3, ax4] = T.cast(T_floor_1[ax0, ax1, ax2, ax3, ax4], "int32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_add_3"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_cast_4[ax0, ax1, ax2, ax3, ax4], p5[ax0, ax1, ax2, ax3, ax4])
T.writes(T_add_3[ax0, ax1, ax2, ax3, ax4])
T_add_3[ax0, ax1, ax2, ax3, ax4] = T_cast_4[ax0, ax1, ax2, ax3, ax4] + p5[ax0, ax1, ax2, ax3, ax4]
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("compute_1"):
i0_2, i1_2, i2_2, i3_2, i4_2 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_add_3[i0_2, i1_2, i2_2, i3_2, i4_2])
T.writes(compute_2[i0_2, i1_2, i2_2, i3_2, i4_2])
compute_2[i0_2, i1_2, i2_2, i3_2, i4_2] = T.max(T.min(T_add_3[i0_2, i1_2, i2_2, i3_2, i4_2], 255), 0)
for i0_3, i1_3, i2_3, i3_3, i4_3 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast_5"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0_3, i1_3, i2_3, i3_3, i4_3])
T.reads(compute_2[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast_5[ax0, ax1, ax2, ax3, ax4])
T_cast_5[ax0, ax1, ax2, ax3, ax4] = T.cast(compute_2[ax0, ax1, ax2, ax3, ax4], "uint8")
for i0_4, i1_4, i2_4, i3_4, i4_4 in T.grid(1, 128, 7, 7, 16):
with T.block("compute_2"):
i0_5, i1_5, i2_5, i3_5, i4_5 = T.axis.remap("SSSSS", [i0_4, i1_4, i2_4, i3_4, i4_4])
T.reads(T_cast_5[i0_5, i1_5, i2_5, i3_5, i4_5])
T.writes(compute[i0_5, i1_5, i2_5, i3_5, i4_5])
compute[i0_5, i1_5, i2_5, i3_5, i4_5] = T.max(T.min(T_cast_5[i0_5, i1_5, i2_5, i3_5, i4_5], T.uint8(255)), T.uint8(0))
@tvm.script.ir_module
class Conv2dInt8_NCHWc_target:
@T.prim_func
def main(p0: T.Buffer((1, 32, 7, 7, 16), "uint8"), p1: T.Buffer((128, 32, 1, 1, 4, 16, 4), "int8"), p2: T.Buffer((1, 128, 1, 1, 16), "int32"), p3: T.Buffer((1, 128, 1, 1, 16), "float32"), p4: T.Buffer(1, "float32"), p5: T.Buffer((1, 128, 7, 7, 16), "uint8"), T_cast: T.Buffer((1, 128, 7, 7, 16), "int32")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
compile_engine_const = T.alloc_buffer([], dtype="float32")
conv2d_NCHWc_int8 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
T_add = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
T_cast_1 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_multiply = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
compile_engine_const_1 = T.alloc_buffer([], dtype="float32")
T_add_1 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_floor = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_cast_2 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
compute = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
T_cast_3 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="uint8")
T_cast_4 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_subtract = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_multiply_1 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
compile_engine_const_2 = T.alloc_buffer([], dtype="float32")
T_add_2 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_floor_1 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_cast_5 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
compile_engine_const_3 = T.alloc_buffer([], dtype="float32")
T_cast_6 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_multiply_2 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
compile_engine_const_4 = T.alloc_buffer([], dtype="float32")
T_add_3 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_floor_2 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="float32")
T_cast_7 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
T_add_4 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
compute_1 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
T_cast_8 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="uint8")
compute_2 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="uint8")
with T.block("compile_engine_const"):
vi = T.axis.spatial(1, 0)
T.reads()
T.writes(compile_engine_const[()])
compile_engine_const[()] = T.float32(0.95489668846130371)
for i0, i1, i2, i3, i4, i5, i6, i7, i8, i9 in T.grid(1, 128, 7, 7, 16, 1, 1, 32, 4, 4):
with T.block("conv2d_NCHWc_int8"):
n, oc_chunk, oh, ow, oc_block, kh, kw, ic_outer, ic_f_inner, ic_s_inner = T.axis.remap("SSSSSRRRRR", [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9])
T.reads(p0[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], p1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner])
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
T.block_attr({"schedule_rule":"meta_schedule.conv2d_NCHWc_int8", "workload":["conv2d_NCHWc_int8.x86", ["TENSOR", [1, 32, 7, 7, 16], "uint8"], ["TENSOR", [128, 32, 1, 1, 4, 16, 4], "int8"], [1, 1], [0, 0, 0, 0], [1, 1], "NCHW16c", "NCHW16c", "int32"]})
with T.init():
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] + T.cast(p0[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner], "int32") * T.cast(p1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner], "int32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_add"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(conv2d_NCHWc_int8[ax0, ax1, ax2, ax3, ax4], p2[ax0, ax1, 0, 0, ax4])
T.writes(T_add[ax0, ax1, ax2, ax3, ax4])
T_add[ax0, ax1, ax2, ax3, ax4] = conv2d_NCHWc_int8[ax0, ax1, ax2, ax3, ax4] + p2[ax0, ax1, 0, 0, ax4]
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_add[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast_1[ax0, ax1, ax2, ax3, ax4])
T_cast_1[ax0, ax1, ax2, ax3, ax4] = T.cast(T_add[ax0, ax1, ax2, ax3, ax4], "float32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_multiply"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_cast_1[ax0, ax1, ax2, ax3, ax4], p3[ax0, ax1, 0, 0, ax4])
T.writes(T_multiply[ax0, ax1, ax2, ax3, ax4])
T_multiply[ax0, ax1, ax2, ax3, ax4] = T_cast_1[ax0, ax1, ax2, ax3, ax4] * p3[ax0, ax1, 0, 0, ax4]
with T.block("compile_engine_const_1"):
vi = T.axis.spatial(1, 0)
T.reads()
T.writes(compile_engine_const_1[()])
compile_engine_const_1[()] = T.float32(65.5)
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_add_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_multiply[ax0, ax1, ax2, ax3, ax4], compile_engine_const_1[()])
T.writes(T_add_1[ax0, ax1, ax2, ax3, ax4])
T_add_1[ax0, ax1, ax2, ax3, ax4] = T_multiply[ax0, ax1, ax2, ax3, ax4] + compile_engine_const_1[()]
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_floor"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_add_1[ax0, ax1, ax2, ax3, ax4])
T.writes(T_floor[ax0, ax1, ax2, ax3, ax4])
T_floor[ax0, ax1, ax2, ax3, ax4] = T.floor(T_add_1[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_floor[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast_2[ax0, ax1, ax2, ax3, ax4])
T_cast_2[ax0, ax1, ax2, ax3, ax4] = T.cast(T_floor[ax0, ax1, ax2, ax3, ax4], "int32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("compute"):
i0_1, i1_1, i2_1, i3_1, i4_1 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_cast_2[i0_1, i1_1, i2_1, i3_1, i4_1])
T.writes(compute[i0_1, i1_1, i2_1, i3_1, i4_1])
compute[i0_1, i1_1, i2_1, i3_1, i4_1] = T.max(T.min(T_cast_2[i0_1, i1_1, i2_1, i3_1, i4_1], 255), 0)
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast_2"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(compute[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast_3[ax0, ax1, ax2, ax3, ax4])
T_cast_3[ax0, ax1, ax2, ax3, ax4] = T.cast(compute[ax0, ax1, ax2, ax3, ax4], "uint8")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast_3"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_cast_3[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast_4[ax0, ax1, ax2, ax3, ax4])
T_cast_4[ax0, ax1, ax2, ax3, ax4] = T.cast(T_cast_3[ax0, ax1, ax2, ax3, ax4], "float32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_subtract"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_cast_4[ax0, ax1, ax2, ax3, ax4], p4[0])
T.writes(T_subtract[ax0, ax1, ax2, ax3, ax4])
T_subtract[ax0, ax1, ax2, ax3, ax4] = T_cast_4[ax0, ax1, ax2, ax3, ax4] - p4[0]
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_multiply_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(compile_engine_const[()], T_subtract[ax0, ax1, ax2, ax3, ax4])
T.writes(T_multiply_1[ax0, ax1, ax2, ax3, ax4])
T_multiply_1[ax0, ax1, ax2, ax3, ax4] = compile_engine_const[()] * T_subtract[ax0, ax1, ax2, ax3, ax4]
with T.block("compile_engine_const_2"):
vi = T.axis.spatial(1, 0)
T.reads()
T.writes(compile_engine_const_2[()])
compile_engine_const_2[()] = T.float32(0.5)
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_add_2"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_multiply_1[ax0, ax1, ax2, ax3, ax4], compile_engine_const_2[()])
T.writes(T_add_2[ax0, ax1, ax2, ax3, ax4])
T_add_2[ax0, ax1, ax2, ax3, ax4] = T_multiply_1[ax0, ax1, ax2, ax3, ax4] + compile_engine_const_2[()]
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_floor_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_add_2[ax0, ax1, ax2, ax3, ax4])
T.writes(T_floor_1[ax0, ax1, ax2, ax3, ax4])
T_floor_1[ax0, ax1, ax2, ax3, ax4] = T.floor(T_add_2[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast_4"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_floor_1[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast_5[ax0, ax1, ax2, ax3, ax4])
T_cast_5[ax0, ax1, ax2, ax3, ax4] = T.cast(T_floor_1[ax0, ax1, ax2, ax3, ax4], "int32")
with T.block("compile_engine_const_3"):
vi = T.axis.spatial(1, 0)
T.reads()
T.writes(compile_engine_const_3[()])
compile_engine_const_3[()] = T.float32(0.71245479583740234)
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast_5"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(p5[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast_6[ax0, ax1, ax2, ax3, ax4])
T_cast_6[ax0, ax1, ax2, ax3, ax4] = T.cast(p5[ax0, ax1, ax2, ax3, ax4], "float32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_multiply_2"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(compile_engine_const_3[()], T_cast_6[ax0, ax1, ax2, ax3, ax4])
T.writes(T_multiply_2[ax0, ax1, ax2, ax3, ax4])
T_multiply_2[ax0, ax1, ax2, ax3, ax4] = compile_engine_const_3[()] * T_cast_6[ax0, ax1, ax2, ax3, ax4]
with T.block("compile_engine_const_4"):
vi = T.axis.spatial(1, 0)
T.reads()
T.writes(compile_engine_const_4[()])
compile_engine_const_4[()] = T.float32(0.5)
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_add_3"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_multiply_2[ax0, ax1, ax2, ax3, ax4], compile_engine_const_4[()])
T.writes(T_add_3[ax0, ax1, ax2, ax3, ax4])
T_add_3[ax0, ax1, ax2, ax3, ax4] = T_multiply_2[ax0, ax1, ax2, ax3, ax4] + compile_engine_const_4[()]
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_floor_2"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_add_3[ax0, ax1, ax2, ax3, ax4])
T.writes(T_floor_2[ax0, ax1, ax2, ax3, ax4])
T_floor_2[ax0, ax1, ax2, ax3, ax4] = T.floor(T_add_3[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast_6"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_floor_2[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast_7[ax0, ax1, ax2, ax3, ax4])
T_cast_7[ax0, ax1, ax2, ax3, ax4] = T.cast(T_floor_2[ax0, ax1, ax2, ax3, ax4], "int32")
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("T_add_4"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_cast_5[ax0, ax1, ax2, ax3, ax4], T_cast_7[ax0, ax1, ax2, ax3, ax4])
T.writes(T_add_4[ax0, ax1, ax2, ax3, ax4])
T_add_4[ax0, ax1, ax2, ax3, ax4] = T_cast_5[ax0, ax1, ax2, ax3, ax4] + T_cast_7[ax0, ax1, ax2, ax3, ax4]
for i0, i1, i2, i3, i4 in T.grid(1, 128, 7, 7, 16):
with T.block("compute_1"):
i0_2, i1_2, i2_2, i3_2, i4_2 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_add_4[i0_2, i1_2, i2_2, i3_2, i4_2])
T.writes(compute_1[i0_2, i1_2, i2_2, i3_2, i4_2])
compute_1[i0_2, i1_2, i2_2, i3_2, i4_2] = T.max(T.min(T_add_4[i0_2, i1_2, i2_2, i3_2, i4_2], 255), 0)
for i0_3, i1_3, i2_3, i3_3, i4_3 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast_7"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0_3, i1_3, i2_3, i3_3, i4_3])
T.reads(compute_1[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast_8[ax0, ax1, ax2, ax3, ax4])
T_cast_8[ax0, ax1, ax2, ax3, ax4] = T.cast(compute_1[ax0, ax1, ax2, ax3, ax4], "uint8")
for i0_4, i1_4, i2_4, i3_4, i4_4 in T.grid(1, 128, 7, 7, 16):
with T.block("compute_2"):
i0_5, i1_5, i2_5, i3_5, i4_5 = T.axis.remap("SSSSS", [i0_4, i1_4, i2_4, i3_4, i4_4])
T.reads(T_cast_8[i0_5, i1_5, i2_5, i3_5, i4_5])
T.writes(compute_2[i0_5, i1_5, i2_5, i3_5, i4_5])
compute_2[i0_5, i1_5, i2_5, i3_5, i4_5] = T.max(T.min(T_cast_8[i0_5, i1_5, i2_5, i3_5, i4_5], T.uint8(255)), T.uint8(0))
for i0_6, i1_6, i2_6, i3_6, i4_6 in T.grid(1, 128, 7, 7, 16):
with T.block("T_cast_8"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0_6, i1_6, i2_6, i3_6, i4_6])
T.reads(compute_2[ax0, ax1, ax2, ax3, ax4])
T.writes(T_cast[ax0, ax1, ax2, ax3, ax4])
T_cast[ax0, ax1, ax2, ax3, ax4] = T.cast(compute_2[ax0, ax1, ax2, ax3, ax4], "int32")
def get_conv2d_vnni_mod(intrin_id):
@tvm.script.ir_module
class Conv2dInt8_NCHWc_scheduled:
@T.prim_func
def main(p0: T.Buffer((1, 32, 7, 7, 16), "uint8"), p1: T.Buffer((128, 32, 1, 1, 4, 16, 4), "int8"), p2: T.Buffer((1, 128, 1, 1, 16), "int32"), p3: T.Buffer((1, 128, 1, 1, 16), "float32"), p4: T.Buffer(1, "float32"), p5: T.Buffer((1, 128, 7, 7, 16), "uint8"), T_cast: T.Buffer((1, 128, 7, 7, 16), "int32")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
conv2d_NCHWc_int8 = T.alloc_buffer([1, 128, 7, 7, 16], dtype="int32")
for i0_0_i1_0_i2_0_i3_0_i4_0_0_i0_1_i1_1_fused in T.parallel(128, annotations={"pragma_auto_unroll_max_step":64, "pragma_unroll_explicit":1}):
for i2_1, i3_1, i4_0_1 in T.grid(7, 1, 1):
for i0_2_init, i1_2_init, i2_2_init, i3_2_init, i4_0_2_init, i0_3_init, i1_3_init, i2_3_init, i3_3_init, i4_0_3_init in T.grid(1, 1, 1, 1, 1, 1, 1, 1, 7, 1):
with T.block("conv2d_NCHWc_int8_o_init"):
n = T.axis.spatial(1, i0_2_init + i0_3_init)
oc_chunk = T.axis.spatial(128, i0_0_i1_0_i2_0_i3_0_i4_0_0_i0_1_i1_1_fused + i1_2_init + i1_3_init)
oh = T.axis.spatial(7, i2_1 + i2_2_init + i2_3_init)
ow = T.axis.spatial(7, i3_1 * 7 + i3_2_init * 7 + i3_3_init)
oc_block_o = T.axis.spatial(1, i4_0_1 + i4_0_2_init + i4_0_3_init)
T.reads()
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0 : 16])
for i4_1 in T.vectorized(16):
with T.block("conv2d_NCHWc_int8_init"):
oc_block_i_init = T.axis.spatial(16, i4_1)
T.reads()
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i_init])
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block_i_init] = 0
for i5_0, i6_0, i7_0, i8_0, i9_0_0, i0_2, i1_2, i2_2, i3_2, i4_0_2, i5_1, i6_1, i7_1, i8_1, i9_0_1, i0_3, i1_3, i2_3, i3_3, i4_0_3 in T.grid(1, 1, 4, 4, 1, 1, 1, 1, 1, 1, 1, 1, 8, 1, 1, 1, 1, 1, 7, 1):
with T.block("conv2d_NCHWc_int8_o_update"):
n = T.axis.spatial(1, i0_2 + i0_3)
oc_chunk = T.axis.spatial(128, i0_0_i1_0_i2_0_i3_0_i4_0_0_i0_1_i1_1_fused + i1_2 + i1_3)
oh = T.axis.spatial(7, i2_1 + i2_2 + i2_3)
ow = T.axis.spatial(7, i3_1 * 7 + i3_2 * 7 + i3_3)
oc_block_o = T.axis.spatial(1, i4_0_1 + i4_0_2 + i4_0_3)
kh = T.axis.reduce(1, i5_0 + i5_1)
kw = T.axis.reduce(1, i6_0 + i6_1)
ic_outer = T.axis.reduce(32, i7_0 * 8 + i7_1)
ic_f_inner = T.axis.reduce(4, i8_0 + i8_1)
ic_s_inner_o = T.axis.reduce(1, i9_0_0 + i9_0_1)
T.reads(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0 : 16], p0[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4], p1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0 : 16, 0 : 4])
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0 : 16])
A = T.match_buffer(p0[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 : ic_f_inner * 4 + 4], [4], dtype="uint8", offset_factor=1)
B = T.match_buffer(p1[oc_chunk, ic_outer, kh, kw, ic_f_inner, 0 : 16, 0 : 4], [16, 4], dtype="int8", offset_factor=1)
C = T.match_buffer(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, 0 : 16], [16], dtype="int32", offset_factor=1)
A_u8x4: T.uint8x4 = A[0:4]
A_i32: T.int32 = T.reinterpret(A_u8x4, dtype="int32")
B_i8x64: T.int8x64 = B[0, 0:64]
B_i32x16: T.int32x16 = T.reinterpret(B_i8x64, dtype="int32x16")
C_i32x16: T.int32x16 = C[0:16]
C[0:16] = T.call_llvm_pure_intrin(T.uint32(intrin_id), T.uint32(3), C_i32x16, T.broadcast(A_i32, 16), B_i32x16, dtype="int32x16")
for ax0, ax1, ax2, ax3 in T.grid(1, 1, 1, 7):
for ax4_fused in T.vectorized(16):
with T.block("T_cast_8"):
ax0_1 = T.axis.spatial(1, ax0)
ax1_1 = T.axis.spatial(128, i0_0_i1_0_i2_0_i3_0_i4_0_0_i0_1_i1_1_fused + ax1)
ax2_1 = T.axis.spatial(7, i2_1 + ax2)
ax3_1, ax4 = T.axis.remap("SS", [ax3, ax4_fused])
T.reads(conv2d_NCHWc_int8[ax0_1, ax1_1, ax2_1, ax3_1, ax4], p2[ax0_1, ax1_1, 0, 0, ax4], p3[ax0_1, ax1_1, 0, 0, ax4], p4[0], p5[ax0_1, ax1_1, ax2_1, ax3_1, ax4])
T.writes(T_cast[ax0_1, ax1_1, ax2_1, ax3_1, ax4])
T_cast[ax0_1, ax1_1, ax2_1, ax3_1, ax4] = T.cast(T.max(T.min(T.cast(T.max(T.min(T.cast(T.floor(T.float32(0.95489668846130371) * (T.cast(T.cast(T.max(T.min(T.cast(T.floor(T.cast(conv2d_NCHWc_int8[ax0_1, ax1_1, ax2_1, ax3_1, ax4] + p2[ax0_1, ax1_1, 0, 0, ax4], "float32") * p3[ax0_1, ax1_1, 0, 0, ax4] + T.float32(65.5), dtype="float32"), "int32"), 255), 0), "uint8"), "float32") - p4[0]) + T.float32(0.5), dtype="float32"), "int32") + T.cast(T.floor(T.float32(0.71245479583740234) * T.cast(p5[ax0_1, ax1_1, ax2_1, ax3_1, ax4], "float32") + T.float32(0.5), dtype="float32"), "int32"), 255), 0), "uint8"), T.uint8(255)), T.uint8(0)), "int32")
return Conv2dInt8_NCHWc_scheduled
@tvm.script.ir_module
class Conv2dWinogradAddRelu:
@T.prim_func
def main(p0: T.Buffer((1, 56, 56, 64), "float32"), p1: T.Buffer((6, 6, 64, 64), "float32"), p2: T.Buffer((1, 1, 1, 64), "float32"), T_relu: T.Buffer((1, 56, 56, 64), "float32")) -> None:
# function attr dict
T.func_attr({"layout_free_buffers": [1], "tir.noalias": True, "global_symbol": "main"})
# body
# with T.block("root")
data_pad = T.alloc_buffer([1, 58, 58, 64], dtype="float32")
input_tile = T.alloc_buffer([6, 6, 196, 64], dtype="float32")
B = T.alloc_buffer([6, 6], dtype="float32")
data_pack = T.alloc_buffer([6, 6, 196, 64], dtype="float32")
bgemm = T.alloc_buffer([6, 6, 196, 64], dtype="float32")
A = T.alloc_buffer([6, 4], dtype="float32")
inverse = T.alloc_buffer([4, 4, 196, 64], dtype="float32")
conv2d_winograd = T.alloc_buffer([1, 56, 56, 64], dtype="float32")
T_add = T.alloc_buffer([1, 56, 56, 64], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 58, 58, 64):
with T.block("data_pad"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(p0[i0_1, i1_1 - 1, i2_1 - 1, i3_1])
T.writes(data_pad[i0_1, i1_1, i2_1, i3_1])
T.block_attr({"schedule_rule":"None"})
data_pad[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(1 <= i1_1 and i1_1 < 57 and 1 <= i2_1 and i2_1 < 57, p0[i0_1, i1_1 - 1, i2_1 - 1, i3_1], T.float32(0), dtype="float32")
for i0, i1, i2, i3 in T.grid(6, 6, 196, 64):
with T.block("input_tile"):
eps, nu, p, ci = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(data_pad[p // 196, p % 196 // 14 * 4 + eps, p % 14 * 4 + nu, ci])
T.writes(input_tile[eps, nu, p, ci])
T.block_attr({"schedule_rule":"None"})
input_tile[eps, nu, p, ci] = data_pad[p // 196, p % 196 // 14 * 4 + eps, p % 14 * 4 + nu, ci]
for i0, i1 in T.grid(6, 6):
with T.block("B"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads()
T.writes(B[i, j])
T.block_attr({"const_matrix":True, "schedule_rule":"meta_schedule.compute_inline"})
B[i, j] = T.Select(i % 6 == 5 and j % 6 == 5, T.float32(1), T.Select(i % 6 == 5 and j % 6 == 4, T.float32(0), T.Select(i % 6 == 5 and j % 6 == 3, T.float32(0), T.Select(i % 6 == 5 and j % 6 == 2, T.float32(0), T.Select(i % 6 == 5 and j % 6 == 1, T.float32(0), T.Select(i % 6 == 5 and j % 6 == 0, T.float32(0), T.Select(i % 6 == 4 and j % 6 == 5, T.float32(1.5), T.Select(i % 6 == 4 and j % 6 == 4, T.float32(1), T.Select(i % 6 == 4 and j % 6 == 3, T.float32(1), T.Select(i % 6 == 4 and j % 6 == 2, T.float32(1), T.Select(i % 6 == 4 and j % 6 == 1, T.float32(1), T.Select(i % 6 == 4 and j % 6 == 0, T.float32(1), T.Select(i % 6 == 3 and j % 6 == 5, T.float32(-2), T.Select(i % 6 == 3 and j % 6 == 4, T.float32(-0.5), T.Select(i % 6 == 3 and j % 6 == 3, T.float32(2), T.Select(i % 6 == 3 and j % 6 == 2, T.float32(2.5), T.Select(i % 6 == 3 and j % 6 == 1, T.float32(0.5), T.Select(i % 6 == 3 and j % 6 == 0, T.float32(1.5), T.Select(i % 6 == 2 and j % 6 == 5, T.float32(-1.5), T.Select(i % 6 == 2 and j % 6 == 4, T.float32(-1), T.Select(i % 6 == 2 and j % 6 == 3, T.float32(-1), T.Select(i % 6 == 2 and j % 6 == 2, T.float32(0.5), T.Select(i % 6 == 2 and j % 6 == 1, T.float32(-2.5), T.Select(i % 6 == 2 and j % 6 == 0, T.float32(-2), T.Select(i % 6 == 1 and j % 6 == 5, T.float32(1), T.Select(i % 6 == 1 and j % 6 == 4, T.float32(0.5), T.Select(i % 6 == 1 and j % 6 == 3, T.float32(-2), T.Select(i % 6 == 1 and j % 6 == 2, T.float32(-1), T.Select(i % 6 == 1 and j % 6 == 1, T.float32(1), T.Select(i % 6 == 1 and j % 6 == 0, T.float32(-1.5), T.Select(i % 6 == 0 and j % 6 == 5, T.float32(0), T.Select(i % 6 == 0 and j % 6 == 4, T.float32(0), T.Select(i % 6 == 0 and j % 6 == 3, T.float32(0), T.Select(i % 6 == 0 and j % 6 == 2, T.float32(0), T.Select(i % 6 == 0 and j % 6 == 1, T.float32(0), T.Select(i % 6 == 0 and j % 6 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))))))))))))))
for i0, i1, i2, i3, i4, i5 in T.grid(6, 6, 196, 64, 6, 6):
with T.block("data_pack"):
eps, nu, p, ci, r_a, r_b = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
T.reads(input_tile[r_a, r_b, p, ci], B[T.min(r_a, r_b) : T.max(r_a, r_b) + 1, T.min(eps, nu) : T.max(eps, nu) + 1])
T.writes(data_pack[eps, nu, p, ci])
T.block_attr({"auto_scheduler_simplify_const_tensor_indices":["eps", "nu", "r_a", "r_b"], "schedule_rule":"meta_schedule.winograd_data_pack.cuda"})
with T.init():
data_pack[eps, nu, p, ci] = T.float32(0)
data_pack[eps, nu, p, ci] = data_pack[eps, nu, p, ci] + input_tile[r_a, r_b, p, ci] * B[r_a, eps] * B[r_b, nu]
for i0, i1, i2, i3, i4 in T.grid(6, 6, 196, 64, 64):
with T.block("bgemm"):
eps, nu, p, co, ci = T.axis.remap("SSSSR", [i0, i1, i2, i3, i4])
T.reads(data_pack[eps, nu, p, ci], p1[eps, nu, co, ci])
T.writes(bgemm[eps, nu, p, co])
T.block_attr({"layout_free_placeholders":[]})
with T.init():
bgemm[eps, nu, p, co] = T.float32(0)
bgemm[eps, nu, p, co] = bgemm[eps, nu, p, co] + data_pack[eps, nu, p, ci] * p1[eps, nu, co, ci]
for i0, i1 in T.grid(6, 4):
with T.block("A"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads()
T.writes(A[i, j])
T.block_attr({"const_matrix":True, "schedule_rule":"meta_schedule.compute_inline"})
A[i, j] = T.Select(i % 6 == 5 and j % 4 == 3, T.float32(1), T.Select(i % 6 == 5 and j % 4 == 2, T.float32(0), T.Select(i % 6 == 5 and j % 4 == 1, T.float32(0), T.Select(i % 6 == 5 and j % 4 == 0, T.float32(0), T.Select(i % 6 == 4 and j % 4 == 3, T.float32(-8), T.Select(i % 6 == 4 and j % 4 == 2, T.float32(4), T.Select(i % 6 == 4 and j % 4 == 1, T.float32(-2), T.Select(i % 6 == 4 and j % 4 == 0, T.float32(1), T.Select(i % 6 == 3 and j % 4 == 3, T.float32(0.125), T.Select(i % 6 == 3 and j % 4 == 2, T.float32(0.25), T.Select(i % 6 == 3 and j % 4 == 1, T.float32(0.5), T.Select(i % 6 == 3 and j % 4 == 0, T.float32(1), T.Select(i % 6 == 2 and j % 4 == 3, T.float32(1), T.Select(i % 6 == 2 and j % 4 == 2, T.float32(1), T.Select(i % 6 == 2 and j % 4 == 1, T.float32(1), T.Select(i % 6 == 2 and j % 4 == 0, T.float32(1), T.Select(i % 6 == 1 and j % 4 == 3, T.float32(-1), T.Select(i % 6 == 1 and j % 4 == 2, T.float32(1), T.Select(i % 6 == 1 and j % 4 == 1, T.float32(-1), T.Select(i % 6 == 1 and j % 4 == 0, T.float32(1), T.Select(i % 6 == 0 and j % 4 == 3, T.float32(0), T.Select(i % 6 == 0 and j % 4 == 2, T.float32(0), T.Select(i % 6 == 0 and j % 4 == 1, T.float32(0), T.Select(i % 6 == 0 and j % 4 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))
for i0, i1, i2, i3, i4, i5 in T.grid(4, 4, 196, 64, 6, 6):
with T.block("inverse"):
vh, vw, p, co, r_a, r_b = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
T.reads(bgemm[r_a, r_b, p, co], A[T.min(r_a, r_b) : T.max(r_a, r_b) + 1, T.min(vh, vw) : T.max(vh, vw) + 1])
T.writes(inverse[vh, vw, p, co])
T.block_attr({"auto_scheduler_simplify_const_tensor_indices":["vh", "vw", "r_a", "r_b"], "schedule_rule":"meta_schedule.winograd_inverse.cuda"})
with T.init():
inverse[vh, vw, p, co] = T.float32(0)
inverse[vh, vw, p, co] = inverse[vh, vw, p, co] + bgemm[r_a, r_b, p, co] * A[r_a, vh] * A[r_b, vw]
for i0, i1, i2, i3 in T.grid(1, 56, 56, 64):
with T.block("conv2d_winograd"):
n, h, w, co = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inverse[h % 4, w % 4, n * 196 + h // 4 * 14 + w // 4, co])
T.writes(conv2d_winograd[n, h, w, co])
conv2d_winograd[n, h, w, co] = inverse[h % 4, w % 4, n * 196 + h // 4 * 14 + w // 4, co]
for i0, i1, i2, i3 in T.grid(1, 56, 56, 64):
with T.block("T_add"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(conv2d_winograd[ax0, ax1, ax2, ax3], p2[ax0, 0, 0, ax3])
T.writes(T_add[ax0, ax1, ax2, ax3])
T_add[ax0, ax1, ax2, ax3] = conv2d_winograd[ax0, ax1, ax2, ax3] + p2[ax0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(1, 56, 56, 64):
with T.block("T_relu"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add[ax0, ax1, ax2, ax3])
T.writes(T_relu[ax0, ax1, ax2, ax3])
T_relu[ax0, ax1, ax2, ax3] = T.max(T_add[ax0, ax1, ax2, ax3], T.float32(0))
@tvm.script.ir_module
class Conv2dWinogradAddResidualRelu:
@T.prim_func
def main(p0: T.Buffer((1, 56, 56, 64), "float32"), p1: T.Buffer((6, 6, 64, 64), "float32"), p2: T.Buffer((1, 1, 1, 64), "float32"), p3: T.Buffer((1, 56, 56, 64), "float32"), T_relu: T.Buffer((1, 56, 56, 64), "float32")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
# body
# with T.block("root")
data_pad = T.alloc_buffer([1, 58, 58, 64], dtype="float32")
input_tile = T.alloc_buffer([6, 6, 196, 64], dtype="float32")
B = T.alloc_buffer([6, 6], dtype="float32")
data_pack = T.alloc_buffer([6, 6, 196, 64], dtype="float32")
bgemm = T.alloc_buffer([6, 6, 196, 64], dtype="float32")
A = T.alloc_buffer([6, 4], dtype="float32")
inverse = T.alloc_buffer([4, 4, 196, 64], dtype="float32")
conv2d_winograd = T.alloc_buffer([1, 56, 56, 64], dtype="float32")
T_add = T.alloc_buffer([1, 56, 56, 64], dtype="float32")
T_add_1 = T.alloc_buffer([1, 56, 56, 64], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 58, 58, 64):
with T.block("data_pad"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(p0[i0_1, i1_1 - 1, i2_1 - 1, i3_1])
T.writes(data_pad[i0_1, i1_1, i2_1, i3_1])
T.block_attr({"schedule_rule":"None"})
data_pad[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(1 <= i1_1 and i1_1 < 57 and 1 <= i2_1 and i2_1 < 57, p0[i0_1, i1_1 - 1, i2_1 - 1, i3_1], T.float32(0), dtype="float32")
for i0, i1, i2, i3 in T.grid(6, 6, 196, 64):
with T.block("input_tile"):
eps, nu, p, ci = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(data_pad[p // 196, p % 196 // 14 * 4 + eps, p % 14 * 4 + nu, ci])
T.writes(input_tile[eps, nu, p, ci])
T.block_attr({"schedule_rule":"None"})
input_tile[eps, nu, p, ci] = data_pad[p // 196, p % 196 // 14 * 4 + eps, p % 14 * 4 + nu, ci]
for i0, i1 in T.grid(6, 6):
with T.block("B"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads()
T.writes(B[i, j])
T.block_attr({"const_matrix":True, "schedule_rule":"meta_schedule.compute_inline"})
B[i, j] = T.Select(i % 6 == 5 and j % 6 == 5, T.float32(1), T.Select(i % 6 == 5 and j % 6 == 4, T.float32(0), T.Select(i % 6 == 5 and j % 6 == 3, T.float32(0), T.Select(i % 6 == 5 and j % 6 == 2, T.float32(0), T.Select(i % 6 == 5 and j % 6 == 1, T.float32(0), T.Select(i % 6 == 5 and j % 6 == 0, T.float32(0), T.Select(i % 6 == 4 and j % 6 == 5, T.float32(1.5), T.Select(i % 6 == 4 and j % 6 == 4, T.float32(1), T.Select(i % 6 == 4 and j % 6 == 3, T.float32(1), T.Select(i % 6 == 4 and j % 6 == 2, T.float32(1), T.Select(i % 6 == 4 and j % 6 == 1, T.float32(1), T.Select(i % 6 == 4 and j % 6 == 0, T.float32(1), T.Select(i % 6 == 3 and j % 6 == 5, T.float32(-2), T.Select(i % 6 == 3 and j % 6 == 4, T.float32(-0.5), T.Select(i % 6 == 3 and j % 6 == 3, T.float32(2), T.Select(i % 6 == 3 and j % 6 == 2, T.float32(2.5), T.Select(i % 6 == 3 and j % 6 == 1, T.float32(0.5), T.Select(i % 6 == 3 and j % 6 == 0, T.float32(1.5), T.Select(i % 6 == 2 and j % 6 == 5, T.float32(-1.5), T.Select(i % 6 == 2 and j % 6 == 4, T.float32(-1), T.Select(i % 6 == 2 and j % 6 == 3, T.float32(-1), T.Select(i % 6 == 2 and j % 6 == 2, T.float32(0.5), T.Select(i % 6 == 2 and j % 6 == 1, T.float32(-2.5), T.Select(i % 6 == 2 and j % 6 == 0, T.float32(-2), T.Select(i % 6 == 1 and j % 6 == 5, T.float32(1), T.Select(i % 6 == 1 and j % 6 == 4, T.float32(0.5), T.Select(i % 6 == 1 and j % 6 == 3, T.float32(-2), T.Select(i % 6 == 1 and j % 6 == 2, T.float32(-1), T.Select(i % 6 == 1 and j % 6 == 1, T.float32(1), T.Select(i % 6 == 1 and j % 6 == 0, T.float32(-1.5), T.Select(i % 6 == 0 and j % 6 == 5, T.float32(0), T.Select(i % 6 == 0 and j % 6 == 4, T.float32(0), T.Select(i % 6 == 0 and j % 6 == 3, T.float32(0), T.Select(i % 6 == 0 and j % 6 == 2, T.float32(0), T.Select(i % 6 == 0 and j % 6 == 1, T.float32(0), T.Select(i % 6 == 0 and j % 6 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))))))))))))))
for i0, i1, i2, i3, i4, i5 in T.grid(6, 6, 196, 64, 6, 6):
with T.block("data_pack"):
eps, nu, p, ci, r_a, r_b = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
T.reads(input_tile[r_a, r_b, p, ci], B[T.min(r_a, r_b) : T.max(r_a, r_b) + 1, T.min(eps, nu) : T.max(eps, nu) + 1])
T.writes(data_pack[eps, nu, p, ci])
T.block_attr({"auto_scheduler_simplify_const_tensor_indices":["eps", "nu", "r_a", "r_b"], "schedule_rule":"meta_schedule.winograd_data_pack.cuda"})
with T.init():
data_pack[eps, nu, p, ci] = T.float32(0)
data_pack[eps, nu, p, ci] = data_pack[eps, nu, p, ci] + input_tile[r_a, r_b, p, ci] * B[r_a, eps] * B[r_b, nu]
for i0, i1, i2, i3, i4 in T.grid(6, 6, 196, 64, 64):
with T.block("bgemm"):
eps, nu, p, co, ci = T.axis.remap("SSSSR", [i0, i1, i2, i3, i4])
T.reads(data_pack[eps, nu, p, ci], p1[eps, nu, co, ci])
T.writes(bgemm[eps, nu, p, co])
T.block_attr({"layout_free_placeholders":[]})
with T.init():
bgemm[eps, nu, p, co] = T.float32(0)
bgemm[eps, nu, p, co] = bgemm[eps, nu, p, co] + data_pack[eps, nu, p, ci] * p1[eps, nu, co, ci]
for i0, i1 in T.grid(6, 4):
with T.block("A"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads()
T.writes(A[i, j])
T.block_attr({"const_matrix":True, "schedule_rule":"meta_schedule.compute_inline"})
A[i, j] = T.Select(i % 6 == 5 and j % 4 == 3, T.float32(1), T.Select(i % 6 == 5 and j % 4 == 2, T.float32(0), T.Select(i % 6 == 5 and j % 4 == 1, T.float32(0), T.Select(i % 6 == 5 and j % 4 == 0, T.float32(0), T.Select(i % 6 == 4 and j % 4 == 3, T.float32(-8), T.Select(i % 6 == 4 and j % 4 == 2, T.float32(4), T.Select(i % 6 == 4 and j % 4 == 1, T.float32(-2), T.Select(i % 6 == 4 and j % 4 == 0, T.float32(1), T.Select(i % 6 == 3 and j % 4 == 3, T.float32(0.125), T.Select(i % 6 == 3 and j % 4 == 2, T.float32(0.25), T.Select(i % 6 == 3 and j % 4 == 1, T.float32(0.5), T.Select(i % 6 == 3 and j % 4 == 0, T.float32(1), T.Select(i % 6 == 2 and j % 4 == 3, T.float32(1), T.Select(i % 6 == 2 and j % 4 == 2, T.float32(1), T.Select(i % 6 == 2 and j % 4 == 1, T.float32(1), T.Select(i % 6 == 2 and j % 4 == 0, T.float32(1), T.Select(i % 6 == 1 and j % 4 == 3, T.float32(-1), T.Select(i % 6 == 1 and j % 4 == 2, T.float32(1), T.Select(i % 6 == 1 and j % 4 == 1, T.float32(-1), T.Select(i % 6 == 1 and j % 4 == 0, T.float32(1), T.Select(i % 6 == 0 and j % 4 == 3, T.float32(0), T.Select(i % 6 == 0 and j % 4 == 2, T.float32(0), T.Select(i % 6 == 0 and j % 4 == 1, T.float32(0), T.Select(i % 6 == 0 and j % 4 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))
for i0, i1, i2, i3, i4, i5 in T.grid(4, 4, 196, 64, 6, 6):
with T.block("inverse"):
vh, vw, p, co, r_a, r_b = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5])
T.reads(bgemm[r_a, r_b, p, co], A[T.min(r_a, r_b) : T.max(r_a, r_b) + 1, T.min(vh, vw) : T.max(vh, vw) + 1])
T.writes(inverse[vh, vw, p, co])
T.block_attr({"auto_scheduler_simplify_const_tensor_indices":["vh", "vw", "r_a", "r_b"], "schedule_rule":"meta_schedule.winograd_inverse.cuda"})
with T.init():
inverse[vh, vw, p, co] = T.float32(0)
inverse[vh, vw, p, co] = inverse[vh, vw, p, co] + bgemm[r_a, r_b, p, co] * A[r_a, vh] * A[r_b, vw]
for i0, i1, i2, i3 in T.grid(1, 56, 56, 64):
with T.block("conv2d_winograd"):
n, h, w, co = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inverse[h % 4, w % 4, n * 196 + h // 4 * 14 + w // 4, co])
T.writes(conv2d_winograd[n, h, w, co])
conv2d_winograd[n, h, w, co] = inverse[h % 4, w % 4, n * 196 + h // 4 * 14 + w // 4, co]
for i0, i1, i2, i3 in T.grid(1, 56, 56, 64):
with T.block("T_add"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(conv2d_winograd[ax0, ax1, ax2, ax3], p2[ax0, 0, 0, ax3])
T.writes(T_add[ax0, ax1, ax2, ax3])
T_add[ax0, ax1, ax2, ax3] = conv2d_winograd[ax0, ax1, ax2, ax3] + p2[ax0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(1, 56, 56, 64):
with T.block("T_add_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add[ax0, ax1, ax2, ax3], p3[ax0, ax1, ax2, ax3])
T.writes(T_add_1[ax0, ax1, ax2, ax3])
T_add_1[ax0, ax1, ax2, ax3] = T_add[ax0, ax1, ax2, ax3] + p3[ax0, ax1, ax2, ax3]
for i0, i1, i2, i3 in T.grid(1, 56, 56, 64):
with T.block("T_relu"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add_1[ax0, ax1, ax2, ax3])
T.writes(T_relu[ax0, ax1, ax2, ax3])
T_relu[ax0, ax1, ax2, ax3] = T.max(T_add_1[ax0, ax1, ax2, ax3], T.float32(0))
@tvm.script.ir_module
class Conv2dWinogradAddResidualRelu_scheduled:
@T.prim_func
def main(p0: T.Buffer((1, 56, 56, 64), "float32"), p1: T.Buffer((6, 6, 64, 64), "float32"), p2: T.Buffer((1, 1, 1, 64), "float32"), p3: T.Buffer((1, 56, 56, 64), "float32"), T_relu: T.Buffer((1, 56, 56, 64), "float32")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
# body
# with T.block("root")
input_tile_local = T.alloc_buffer([6, 6, 196, 64], dtype="float32", scope="local")
data_pack = T.alloc_buffer([6, 6, 196, 64], dtype="float32")
bgemm = T.alloc_buffer([6, 6, 196, 64], dtype="float32")
inverse = T.alloc_buffer([4, 4, 196, 64], dtype="float32")
bgemm_local = T.alloc_buffer([6, 6, 196, 64], dtype="float32", scope="local")
data_pack_shared = T.alloc_buffer([6, 6, 196, 64], dtype="float32", scope="shared")
p1_shared = T.alloc_buffer([6, 6, 64, 64], dtype="float32", scope="shared")
for i2_0_i3_0_i2_1_i3_1_fused_0 in T.thread_binding(98, thread="blockIdx.x", annotations={"pragma_auto_unroll_max_step":1024, "pragma_unroll_explicit":1}):
for i2_0_i3_0_i2_1_i3_1_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
for ax0, ax1, ax2, ax3 in T.grid(6, 6, 1, 1):
with T.block("input_tile"):
eps, nu = T.axis.remap("SS", [ax0, ax1])
p = T.axis.spatial(196, (i2_0_i3_0_i2_1_i3_1_fused_0 * 128 + i2_0_i3_0_i2_1_i3_1_fused_1) // 896 * 14 + (i2_0_i3_0_i2_1_i3_1_fused_0 * 128 + i2_0_i3_0_i2_1_i3_1_fused_1) % 112 // 8 + ax2)
ci = T.axis.spatial(64, (i2_0_i3_0_i2_1_i3_1_fused_0 * 128 + i2_0_i3_0_i2_1_i3_1_fused_1) % 896 // 112 * 8 + (i2_0_i3_0_i2_1_i3_1_fused_0 * 128 + i2_0_i3_0_i2_1_i3_1_fused_1) % 8 + ax3)
T.reads(p0[p // 196, p % 196 // 14 * 4 + eps - 1, p % 14 * 4 + nu - 1, ci])
T.writes(input_tile_local[eps, nu, p, ci])
T.block_attr({"schedule_rule":"None"})
input_tile_local[eps, nu, p, ci] = T.if_then_else(1 <= p % 196 // 14 * 4 + eps and p % 196 // 14 * 4 + eps < 57 and 1 <= p % 14 * 4 + nu and p % 14 * 4 + nu < 57, p0[p // 196, p % 196 // 14 * 4 + eps - 1, p % 14 * 4 + nu - 1, ci], T.float32(0), dtype="float32")
for i0 in T.unroll(6):
for i1 in T.unroll(6):
with T.block("data_pack_init"):
eps, nu = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(196, (i2_0_i3_0_i2_1_i3_1_fused_0 * 128 + i2_0_i3_0_i2_1_i3_1_fused_1) // 896 * 14 + (i2_0_i3_0_i2_1_i3_1_fused_0 * 128 + i2_0_i3_0_i2_1_i3_1_fused_1) % 112 // 8)
ci = T.axis.spatial(64, (i2_0_i3_0_i2_1_i3_1_fused_0 * 128 + i2_0_i3_0_i2_1_i3_1_fused_1) % 896 // 112 * 8 + (i2_0_i3_0_i2_1_i3_1_fused_0 * 128 + i2_0_i3_0_i2_1_i3_1_fused_1) % 8)
T.reads()
T.writes(data_pack[eps, nu, p, ci])
T.block_attr({"auto_scheduler_simplify_const_tensor_indices":["eps", "nu", "r_a", "r_b"], "schedule_rule":"meta_schedule.winograd_data_pack.cuda"})
data_pack[eps, nu, p, ci] = T.float32(0)
for i4 in T.unroll(6):
for i5 in T.unroll(6):
with T.block("data_pack_update"):
eps, nu = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(196, (i2_0_i3_0_i2_1_i3_1_fused_0 * 128 + i2_0_i3_0_i2_1_i3_1_fused_1) // 896 * 14 + (i2_0_i3_0_i2_1_i3_1_fused_0 * 128 + i2_0_i3_0_i2_1_i3_1_fused_1) % 112 // 8)
ci = T.axis.spatial(64, (i2_0_i3_0_i2_1_i3_1_fused_0 * 128 + i2_0_i3_0_i2_1_i3_1_fused_1) % 896 // 112 * 8 + (i2_0_i3_0_i2_1_i3_1_fused_0 * 128 + i2_0_i3_0_i2_1_i3_1_fused_1) % 8)
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.reads(data_pack[eps, nu, p, ci], input_tile_local[r_a, r_b, p, ci])
T.writes(data_pack[eps, nu, p, ci])
T.block_attr({"auto_scheduler_simplify_const_tensor_indices":["eps", "nu", "r_a", "r_b"], "schedule_rule":"meta_schedule.winograd_data_pack.cuda"})
data_pack[eps, nu, p, ci] = data_pack[eps, nu, p, ci] + input_tile_local[r_a, r_b, p, ci] * T.Select(r_a % 6 == 5 and eps % 6 == 5, T.float32(1), T.Select(r_a % 6 == 5 and eps % 6 == 4, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 3, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 2, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 1, T.float32(0), T.Select(r_a % 6 == 5 and eps % 6 == 0, T.float32(0), T.Select(r_a % 6 == 4 and eps % 6 == 5, T.float32(1.5), T.Select(r_a % 6 == 4 and eps % 6 == 4, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 3, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 2, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 1, T.float32(1), T.Select(r_a % 6 == 4 and eps % 6 == 0, T.float32(1), T.Select(r_a % 6 == 3 and eps % 6 == 5, T.float32(-2), T.Select(r_a % 6 == 3 and eps % 6 == 4, T.float32(-0.5), T.Select(r_a % 6 == 3 and eps % 6 == 3, T.float32(2), T.Select(r_a % 6 == 3 and eps % 6 == 2, T.float32(2.5), T.Select(r_a % 6 == 3 and eps % 6 == 1, T.float32(0.5), T.Select(r_a % 6 == 3 and eps % 6 == 0, T.float32(1.5), T.Select(r_a % 6 == 2 and eps % 6 == 5, T.float32(-1.5), T.Select(r_a % 6 == 2 and eps % 6 == 4, T.float32(-1), T.Select(r_a % 6 == 2 and eps % 6 == 3, T.float32(-1), T.Select(r_a % 6 == 2 and eps % 6 == 2, T.float32(0.5), T.Select(r_a % 6 == 2 and eps % 6 == 1, T.float32(-2.5), T.Select(r_a % 6 == 2 and eps % 6 == 0, T.float32(-2), T.Select(r_a % 6 == 1 and eps % 6 == 5, T.float32(1), T.Select(r_a % 6 == 1 and eps % 6 == 4, T.float32(0.5), T.Select(r_a % 6 == 1 and eps % 6 == 3, T.float32(-2), T.Select(r_a % 6 == 1 and eps % 6 == 2, T.float32(-1), T.Select(r_a % 6 == 1 and eps % 6 == 1, T.float32(1), T.Select(r_a % 6 == 1 and eps % 6 == 0, T.float32(-1.5), T.Select(r_a % 6 == 0 and eps % 6 == 5, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 4, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 3, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 2, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 1, T.float32(0), T.Select(r_a % 6 == 0 and eps % 6 == 0, T.float32(1), T.float32(0))))))))))))))))))))))))))))))))))))) * T.Select(r_b % 6 == 5 and nu % 6 == 5, T.float32(1), T.Select(r_b % 6 == 5 and nu % 6 == 4, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 3, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 2, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 1, T.float32(0), T.Select(r_b % 6 == 5 and nu % 6 == 0, T.float32(0), T.Select(r_b % 6 == 4 and nu % 6 == 5, T.float32(1.5), T.Select(r_b % 6 == 4 and nu % 6 == 4, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 3, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 2, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 1, T.float32(1), T.Select(r_b % 6 == 4 and nu % 6 == 0, T.float32(1), T.Select(r_b % 6 == 3 and nu % 6 == 5, T.float32(-2), T.Select(r_b % 6 == 3 and nu % 6 == 4, T.float32(-0.5), T.Select(r_b % 6 == 3 and nu % 6 == 3, T.float32(2), T.Select(r_b % 6 == 3 and nu % 6 == 2, T.float32(2.5), T.Select(r_b % 6 == 3 and nu % 6 == 1, T.float32(0.5), T.Select(r_b % 6 == 3 and nu % 6 == 0, T.float32(1.5), T.Select(r_b % 6 == 2 and nu % 6 == 5, T.float32(-1.5), T.Select(r_b % 6 == 2 and nu % 6 == 4, T.float32(-1), T.Select(r_b % 6 == 2 and nu % 6 == 3, T.float32(-1), T.Select(r_b % 6 == 2 and nu % 6 == 2, T.float32(0.5), T.Select(r_b % 6 == 2 and nu % 6 == 1, T.float32(-2.5), T.Select(r_b % 6 == 2 and nu % 6 == 0, T.float32(-2), T.Select(r_b % 6 == 1 and nu % 6 == 5, T.float32(1), T.Select(r_b % 6 == 1 and nu % 6 == 4, T.float32(0.5), T.Select(r_b % 6 == 1 and nu % 6 == 3, T.float32(-2), T.Select(r_b % 6 == 1 and nu % 6 == 2, T.float32(-1), T.Select(r_b % 6 == 1 and nu % 6 == 1, T.float32(1), T.Select(r_b % 6 == 1 and nu % 6 == 0, T.float32(-1.5), T.Select(r_b % 6 == 0 and nu % 6 == 5, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 4, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 3, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 2, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 1, T.float32(0), T.Select(r_b % 6 == 0 and nu % 6 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))))))))))))))
for i0_0_i1_0_i2_0_i3_0_fused in T.thread_binding(168, thread="blockIdx.x", annotations={"pragma_auto_unroll_max_step":1024, "pragma_unroll_explicit":1}):
for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(4, thread="vthread.x"):
for i0_2_i1_2_i2_2_i3_2_fused in T.thread_binding(48, thread="threadIdx.x"):
for i0_3_init, i1_3_init, i2_3_init, i3_3_init, i0_4_init, i1_4_init, i2_4_init, i3_4_init in T.grid(1, 1, 14, 1, 1, 1, 1, 1):
with T.block("bgemm_init"):
eps = T.axis.spatial(6, i0_1_i1_1_i2_1_i3_1_fused // 2 * 3 + i0_2_i1_2_i2_2_i3_2_fused // 16 + i0_3_init + i0_4_init)
nu = T.axis.spatial(6, i0_0_i1_0_i2_0_i3_0_fused // 28 + i1_3_init + i1_4_init)
p = T.axis.spatial(196, i0_0_i1_0_i2_0_i3_0_fused % 28 // 4 * 28 + i0_1_i1_1_i2_1_i3_1_fused % 2 * 14 + i2_3_init + i2_4_init)
co = T.axis.spatial(64, i0_0_i1_0_i2_0_i3_0_fused % 4 * 16 + i0_2_i1_2_i2_2_i3_2_fused % 16 + i3_3_init + i3_4_init)
T.reads()
T.writes(bgemm_local[eps, nu, p, co])
T.block_attr({"layout_free_placeholders":[], "meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS"})
bgemm_local[eps, nu, p, co] = T.float32(0)
for i4_0 in T.serial(2):
for ax0_ax1_ax2_ax3_fused_0 in T.serial(28):
for ax0_ax1_ax2_ax3_fused_1 in T.thread_binding(48, thread="threadIdx.x"):
for ax0_ax1_ax2_ax3_fused_2 in T.vectorized(4):
with T.block("data_pack_shared"):
v0 = T.axis.spatial(6, (ax0_ax1_ax2_ax3_fused_0 * 192 + ax0_ax1_ax2_ax3_fused_1 * 4 + ax0_ax1_ax2_ax3_fused_2) // 896)
v1 = T.axis.spatial(6, i0_0_i1_0_i2_0_i3_0_fused // 28)
v2 = T.axis.spatial(196, i0_0_i1_0_i2_0_i3_0_fused % 28 // 4 * 28 + (ax0_ax1_ax2_ax3_fused_0 * 192 + ax0_ax1_ax2_ax3_fused_1 * 4 + ax0_ax1_ax2_ax3_fused_2) % 896 // 32)
v3 = T.axis.spatial(64, i4_0 * 32 + (ax0_ax1_ax2_ax3_fused_0 * 192 + ax0_ax1_ax2_ax3_fused_1 * 4 + ax0_ax1_ax2_ax3_fused_2) % 32)
T.reads(data_pack[v0, v1, v2, v3])
T.writes(data_pack_shared[v0, v1, v2, v3])
data_pack_shared[v0, v1, v2, v3] = data_pack[v0, v1, v2, v3]
for ax0_ax1_ax2_ax3_fused_0 in T.serial(16):
for ax0_ax1_ax2_ax3_fused_1 in T.thread_binding(48, thread="threadIdx.x"):
for ax0_ax1_ax2_ax3_fused_2 in T.vectorized(4):
with T.block("p1_shared"):
v0 = T.axis.spatial(6, (ax0_ax1_ax2_ax3_fused_0 * 192 + ax0_ax1_ax2_ax3_fused_1 * 4 + ax0_ax1_ax2_ax3_fused_2) // 512)
v1 = T.axis.spatial(6, i0_0_i1_0_i2_0_i3_0_fused // 28)
v2 = T.axis.spatial(64, i0_0_i1_0_i2_0_i3_0_fused % 4 * 16 + (ax0_ax1_ax2_ax3_fused_0 * 192 + ax0_ax1_ax2_ax3_fused_1 * 4 + ax0_ax1_ax2_ax3_fused_2) % 512 // 32)
v3 = T.axis.spatial(64, i4_0 * 32 + (ax0_ax1_ax2_ax3_fused_0 * 192 + ax0_ax1_ax2_ax3_fused_1 * 4 + ax0_ax1_ax2_ax3_fused_2) % 32)
T.reads(p1[v0, v1, v2, v3])
T.writes(p1_shared[v0, v1, v2, v3])
p1_shared[v0, v1, v2, v3] = p1[v0, v1, v2, v3]
for i4_1, i0_3, i1_3, i2_3, i3_3, i4_2, i0_4, i1_4, i2_4, i3_4 in T.grid(2, 1, 1, 14, 1, 16, 1, 1, 1, 1):
with T.block("bgemm_update"):
eps = T.axis.spatial(6, i0_1_i1_1_i2_1_i3_1_fused // 2 * 3 + i0_2_i1_2_i2_2_i3_2_fused // 16 + i0_3 + i0_4)
nu = T.axis.spatial(6, i0_0_i1_0_i2_0_i3_0_fused // 28 + i1_3 + i1_4)
p = T.axis.spatial(196, i0_0_i1_0_i2_0_i3_0_fused % 28 // 4 * 28 + i0_1_i1_1_i2_1_i3_1_fused % 2 * 14 + i2_3 + i2_4)
co = T.axis.spatial(64, i0_0_i1_0_i2_0_i3_0_fused % 4 * 16 + i0_2_i1_2_i2_2_i3_2_fused % 16 + i3_3 + i3_4)
ci = T.axis.reduce(64, i4_0 * 32 + i4_1 * 16 + i4_2)
T.reads(bgemm_local[eps, nu, p, co], data_pack_shared[eps, nu, p, ci], p1_shared[eps, nu, co, ci])
T.writes(bgemm_local[eps, nu, p, co])
T.block_attr({"layout_free_placeholders":[], "meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "meta_schedule.tiling_structure":"SSSRRSRS"})
bgemm_local[eps, nu, p, co] = bgemm_local[eps, nu, p, co] + data_pack_shared[eps, nu, p, ci] * p1_shared[eps, nu, co, ci]
for ax0, ax1, ax2, ax3 in T.grid(1, 1, 14, 1):
with T.block("bgemm_local"):
v0 = T.axis.spatial(6, i0_1_i1_1_i2_1_i3_1_fused // 2 * 3 + i0_2_i1_2_i2_2_i3_2_fused // 16 + ax0)
v1 = T.axis.spatial(6, i0_0_i1_0_i2_0_i3_0_fused // 28 + ax1)
v2 = T.axis.spatial(196, i0_0_i1_0_i2_0_i3_0_fused % 28 // 4 * 28 + i0_1_i1_1_i2_1_i3_1_fused % 2 * 14 + ax2)
v3 = T.axis.spatial(64, i0_0_i1_0_i2_0_i3_0_fused % 4 * 16 + i0_2_i1_2_i2_2_i3_2_fused % 16 + ax3)
T.reads(bgemm_local[v0, v1, v2, v3])
T.writes(bgemm[v0, v1, v2, v3])
bgemm[v0, v1, v2, v3] = bgemm_local[v0, v1, v2, v3]
for i2_0_i3_0_i2_1_i3_1_fused_0 in T.thread_binding(25, thread="blockIdx.x", annotations={"pragma_auto_unroll_max_step":1024, "pragma_unroll_explicit":1}):
for i2_0_i3_0_i2_1_i3_1_fused_1 in T.thread_binding(512, thread="threadIdx.x"):
for i0 in T.unroll(4):
for i1 in T.unroll(4):
with T.block("inverse_init"):
T.where(i2_0_i3_0_i2_1_i3_1_fused_0 * 512 + i2_0_i3_0_i2_1_i3_1_fused_1 < 12544)
vh, vw = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(196, (i2_0_i3_0_i2_1_i3_1_fused_0 * 512 + i2_0_i3_0_i2_1_i3_1_fused_1) // 448 * 7 + (i2_0_i3_0_i2_1_i3_1_fused_0 * 512 + i2_0_i3_0_i2_1_i3_1_fused_1) % 224 // 32)
co = T.axis.spatial(64, (i2_0_i3_0_i2_1_i3_1_fused_0 * 512 + i2_0_i3_0_i2_1_i3_1_fused_1) % 448 // 224 * 32 + (i2_0_i3_0_i2_1_i3_1_fused_0 * 512 + i2_0_i3_0_i2_1_i3_1_fused_1) % 32)
T.reads()
T.writes(inverse[vh, vw, p, co])
T.block_attr({"auto_scheduler_simplify_const_tensor_indices":["vh", "vw", "r_a", "r_b"], "schedule_rule":"meta_schedule.winograd_inverse.cuda"})
inverse[vh, vw, p, co] = T.float32(0)
for i4 in T.unroll(6):
for i5 in T.unroll(6):
with T.block("inverse_update"):
T.where(i2_0_i3_0_i2_1_i3_1_fused_0 * 512 + i2_0_i3_0_i2_1_i3_1_fused_1 < 12544)
vh, vw = T.axis.remap("SS", [i0, i1])
p = T.axis.spatial(196, (i2_0_i3_0_i2_1_i3_1_fused_0 * 512 + i2_0_i3_0_i2_1_i3_1_fused_1) // 448 * 7 + (i2_0_i3_0_i2_1_i3_1_fused_0 * 512 + i2_0_i3_0_i2_1_i3_1_fused_1) % 224 // 32)
co = T.axis.spatial(64, (i2_0_i3_0_i2_1_i3_1_fused_0 * 512 + i2_0_i3_0_i2_1_i3_1_fused_1) % 448 // 224 * 32 + (i2_0_i3_0_i2_1_i3_1_fused_0 * 512 + i2_0_i3_0_i2_1_i3_1_fused_1) % 32)
r_a, r_b = T.axis.remap("RR", [i4, i5])
T.reads(inverse[vh, vw, p, co], bgemm[r_a, r_b, p, co])
T.writes(inverse[vh, vw, p, co])
T.block_attr({"auto_scheduler_simplify_const_tensor_indices":["vh", "vw", "r_a", "r_b"], "schedule_rule":"meta_schedule.winograd_inverse.cuda"})
inverse[vh, vw, p, co] = inverse[vh, vw, p, co] + bgemm[r_a, r_b, p, co] * T.Select(r_a % 6 == 5 and vh % 4 == 3, T.float32(1), T.Select(r_a % 6 == 5 and vh % 4 == 2, T.float32(0), T.Select(r_a % 6 == 5 and vh % 4 == 1, T.float32(0), T.Select(r_a % 6 == 5 and vh % 4 == 0, T.float32(0), T.Select(r_a % 6 == 4 and vh % 4 == 3, T.float32(-8), T.Select(r_a % 6 == 4 and vh % 4 == 2, T.float32(4), T.Select(r_a % 6 == 4 and vh % 4 == 1, T.float32(-2), T.Select(r_a % 6 == 4 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 3 and vh % 4 == 3, T.float32(0.125), T.Select(r_a % 6 == 3 and vh % 4 == 2, T.float32(0.25), T.Select(r_a % 6 == 3 and vh % 4 == 1, T.float32(0.5), T.Select(r_a % 6 == 3 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 3, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 2, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 1, T.float32(1), T.Select(r_a % 6 == 2 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 1 and vh % 4 == 3, T.float32(-1), T.Select(r_a % 6 == 1 and vh % 4 == 2, T.float32(1), T.Select(r_a % 6 == 1 and vh % 4 == 1, T.float32(-1), T.Select(r_a % 6 == 1 and vh % 4 == 0, T.float32(1), T.Select(r_a % 6 == 0 and vh % 4 == 3, T.float32(0), T.Select(r_a % 6 == 0 and vh % 4 == 2, T.float32(0), T.Select(r_a % 6 == 0 and vh % 4 == 1, T.float32(0), T.Select(r_a % 6 == 0 and vh % 4 == 0, T.float32(1), T.float32(0))))))))))))))))))))))))) * T.Select(r_b % 6 == 5 and vw % 4 == 3, T.float32(1), T.Select(r_b % 6 == 5 and vw % 4 == 2, T.float32(0), T.Select(r_b % 6 == 5 and vw % 4 == 1, T.float32(0), T.Select(r_b % 6 == 5 and vw % 4 == 0, T.float32(0), T.Select(r_b % 6 == 4 and vw % 4 == 3, T.float32(-8), T.Select(r_b % 6 == 4 and vw % 4 == 2, T.float32(4), T.Select(r_b % 6 == 4 and vw % 4 == 1, T.float32(-2), T.Select(r_b % 6 == 4 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 3 and vw % 4 == 3, T.float32(0.125), T.Select(r_b % 6 == 3 and vw % 4 == 2, T.float32(0.25), T.Select(r_b % 6 == 3 and vw % 4 == 1, T.float32(0.5), T.Select(r_b % 6 == 3 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 3, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 2, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 1, T.float32(1), T.Select(r_b % 6 == 2 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 1 and vw % 4 == 3, T.float32(-1), T.Select(r_b % 6 == 1 and vw % 4 == 2, T.float32(1), T.Select(r_b % 6 == 1 and vw % 4 == 1, T.float32(-1), T.Select(r_b % 6 == 1 and vw % 4 == 0, T.float32(1), T.Select(r_b % 6 == 0 and vw % 4 == 3, T.float32(0), T.Select(r_b % 6 == 0 and vw % 4 == 2, T.float32(0), T.Select(r_b % 6 == 0 and vw % 4 == 1, T.float32(0), T.Select(r_b % 6 == 0 and vw % 4 == 0, T.float32(1), T.float32(0)))))))))))))))))))))))))
for i0_i1_i2_i3_fused_0 in T.thread_binding(1568, thread="blockIdx.x", annotations={"pragma_auto_unroll_max_step":1024, "pragma_unroll_explicit":1}):
for i0_i1_i2_i3_fused_1 in T.thread_binding(128, thread="threadIdx.x"):
with T.block("conv2d_winograd"):
n = T.axis.spatial(1, 0)
h = T.axis.spatial(56, (i0_i1_i2_i3_fused_0 * 128 + i0_i1_i2_i3_fused_1) // 3584)
w = T.axis.spatial(56, (i0_i1_i2_i3_fused_0 * 128 + i0_i1_i2_i3_fused_1) % 3584 // 64)
co = T.axis.spatial(64, (i0_i1_i2_i3_fused_0 * 128 + i0_i1_i2_i3_fused_1) % 64)
T.reads(inverse[h % 4, w % 4, n * 196 + h // 4 * 14 + w // 4, co], p2[n, 0, 0, co], p3[n, h, w, co])
T.writes(T_relu[n, h, w, co])
T_relu[n, h, w, co] = T.max(inverse[h % 4, w % 4, n * 196 + h // 4 * 14 + w // 4, co] + p2[n, 0, 0, co] + p3[n, h, w, co], T.float32(0))
@tvm.script.ir_module
class Conv2dInt8_with_predicate:
@T.prim_func
def main(p0: T.Buffer((16, 56, 56, 64), "int8"), p1: T.Buffer((256, 1, 1, 64), "int8"), p2: T.Buffer((1, 1, 1, 256), "int32"), p3: T.Buffer((1, 1, 1, 256), "int32"), p4: T.Buffer(256, "int32"), p5: T.Buffer(256, "int32"), p6: T.Buffer(256, "int32"), p7: T.Buffer((), "int32"), p8: T.Buffer(1, "int32"), compute: T.Buffer((16, 56, 56, 256), "int32")) -> None:
# function attr dict
T.func_attr({"tir.noalias": True, "global_symbol": "main"})
# body
# with T.block("root")
pad_temp = T.alloc_buffer([16, 56, 56, 64], dtype="int8")
conv2d_nhwc = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_subtract = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_2 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_subtract_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 64):
with T.block("pad_temp"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(p0[i0_1, i1_1, i2_1, i3_1])
T.writes(pad_temp[i0_1, i1_1, i2_1, i3_1])
pad_temp[i0_1, i1_1, i2_1, i3_1] = p0[i0_1, i1_1, i2_1, i3_1]
for i0, i1, i2, i3, i4, i5, i6 in T.grid(16, 56, 56, 256, 1, 1, 64):
with T.block("conv2d_nhwc"):
nn, yy, xx, ff, ry, rx, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
T.reads(pad_temp[nn, yy + ry, xx + rx, rc], p1[ff, ry, rx, rc])
T.writes(conv2d_nhwc[nn, yy, xx, ff])
with T.init():
conv2d_nhwc[nn, yy, xx, ff] = 0
conv2d_nhwc[nn, yy, xx, ff] = conv2d_nhwc[nn, yy, xx, ff] + T.cast(pad_temp[nn, yy + ry, xx + rx, rc], "int32") * T.cast(p1[ff, ry, rx, rc], "int32")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_subtract"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(conv2d_nhwc[ax0, ax1, ax2, ax3], p2[0, 0, 0, ax3])
T.writes(T_subtract[ax0, ax1, ax2, ax3])
T_subtract[ax0, ax1, ax2, ax3] = conv2d_nhwc[ax0, ax1, ax2, ax3] - p2[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_add"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_subtract[ax0, ax1, ax2, ax3], p3[0, 0, 0, ax3])
T.writes(T_add[ax0, ax1, ax2, ax3])
T_add[ax0, ax1, ax2, ax3] = T_subtract[ax0, ax1, ax2, ax3] + p3[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("compute"):
i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add[i0_2, i1_2, i2_2, i3_2], p4[i3_2], p5[i3_2], p6[i3_2])
T.writes(compute_1[i0_2, i1_2, i2_2, i3_2])
compute_1[i0_2, i1_2, i2_2, i3_2] = T.q_multiply_shift_per_axis(T_add[i0_2, i1_2, i2_2, i3_2], p4[i3_2], p5[i3_2], p6[i3_2], 31, False, True, dtype="int32")
for i0_3, i1_3, i2_3, i3_3 in T.grid(16, 56, 56, 256):
with T.block("T_add_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_3, i1_3, i2_3, i3_3])
T.reads(p7[()], compute_1[ax0, ax1, ax2, ax3])
T.writes(T_add_1[ax0, ax1, ax2, ax3])
T_add_1[ax0, ax1, ax2, ax3] = p7[()] + compute_1[ax0, ax1, ax2, ax3]
for i0_4, i1_4, i2_4, i3_4 in T.grid(16, 56, 56, 256):
with T.block("compute_1"):
i0_5, i1_5, i2_5, i3_5 = T.axis.remap("SSSS", [i0_4, i1_4, i2_4, i3_4])
T.reads(T_add_1[i0_5, i1_5, i2_5, i3_5])
T.writes(compute_2[i0_5, i1_5, i2_5, i3_5])
compute_2[i0_5, i1_5, i2_5, i3_5] = T.max(T.min(T_add_1[i0_5, i1_5, i2_5, i3_5], 255), 0)
for i0_6, i1_6, i2_6, i3_6 in T.grid(16, 56, 56, 256):
with T.block("T_subtract_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_6, i1_6, i2_6, i3_6])
T.reads(compute_2[ax0, ax1, ax2, ax3], p8[0])
T.writes(T_subtract_1[ax0, ax1, ax2, ax3])
T_subtract_1[ax0, ax1, ax2, ax3] = compute_2[ax0, ax1, ax2, ax3] - p8[0]
for i0_7, i1_7, i2_7, i3_7 in T.grid(16, 56, 56, 256):
with T.block("compute_2"):
i0_8, i1_8, i2_8, i3_8 = T.axis.remap("SSSS", [i0_7, i1_7, i2_7, i3_7])
T.reads(T_subtract_1[i0_8, i1_8, i2_8, i3_8])
T.writes(compute[i0_8, i1_8, i2_8, i3_8])
compute[i0_8, i1_8, i2_8, i3_8] = T.q_multiply_shift(T_subtract_1[i0_8, i1_8, i2_8, i3_8], 1963325822, 31, 1, dtype="int32")
@tvm.script.ir_module
class Conv2dInt8_with_predicate_target:
@T.prim_func
def main(p0: T.Buffer((16, 56, 56, 64), "int8"), p1: T.Buffer((256, 1, 1, 64), "int8"), p2: T.Buffer((1, 1, 1, 256), "int32"), p3: T.Buffer((1, 1, 1, 256), "int32"), p4: T.Buffer(256, "int32"), p5: T.Buffer(256, "int32"), p6: T.Buffer(256, "int32"), p7: T.Buffer((), "int32"), p8: T.Buffer(1, "int32"), p9: T.Buffer((16, 56, 56, 256), "int32"), compute: T.Buffer((16, 56, 56, 256), "int32")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
pad_temp = T.alloc_buffer([16, 56, 56, 64], dtype="int8")
conv2d_nhwc = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_subtract = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_2 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_subtract_1 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_3 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
compute_4 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
T_add_2 = T.alloc_buffer([16, 56, 56, 256], dtype="int32")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 64):
with T.block("pad_temp"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(p0[i0_1, i1_1, i2_1, i3_1])
T.writes(pad_temp[i0_1, i1_1, i2_1, i3_1])
pad_temp[i0_1, i1_1, i2_1, i3_1] = p0[i0_1, i1_1, i2_1, i3_1]
for i0, i1, i2, i3, i4, i5, i6 in T.grid(16, 56, 56, 256, 1, 1, 64):
with T.block("conv2d_nhwc"):
nn, yy, xx, ff, ry, rx, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
T.reads(pad_temp[nn, yy + ry, xx + rx, rc], p1[ff, ry, rx, rc])
T.writes(conv2d_nhwc[nn, yy, xx, ff])
with T.init():
conv2d_nhwc[nn, yy, xx, ff] = 0
conv2d_nhwc[nn, yy, xx, ff] = conv2d_nhwc[nn, yy, xx, ff] + T.cast(pad_temp[nn, yy + ry, xx + rx, rc], "int32") * T.cast(p1[ff, ry, rx, rc], "int32")
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_subtract"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(conv2d_nhwc[ax0, ax1, ax2, ax3], p2[0, 0, 0, ax3])
T.writes(T_subtract[ax0, ax1, ax2, ax3])
T_subtract[ax0, ax1, ax2, ax3] = conv2d_nhwc[ax0, ax1, ax2, ax3] - p2[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("T_add"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_subtract[ax0, ax1, ax2, ax3], p3[0, 0, 0, ax3])
T.writes(T_add[ax0, ax1, ax2, ax3])
T_add[ax0, ax1, ax2, ax3] = T_subtract[ax0, ax1, ax2, ax3] + p3[0, 0, 0, ax3]
for i0, i1, i2, i3 in T.grid(16, 56, 56, 256):
with T.block("compute"):
i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(T_add[i0_2, i1_2, i2_2, i3_2], p4[i3_2], p5[i3_2], p6[i3_2])
T.writes(compute_1[i0_2, i1_2, i2_2, i3_2])
compute_1[i0_2, i1_2, i2_2, i3_2] = T.q_multiply_shift_per_axis(T_add[i0_2, i1_2, i2_2, i3_2], p4[i3_2], p5[i3_2], p6[i3_2], 31, False, True, dtype="int32")
for i0_3, i1_3, i2_3, i3_3 in T.grid(16, 56, 56, 256):
with T.block("T_add_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_3, i1_3, i2_3, i3_3])
T.reads(p7[()], compute_1[ax0, ax1, ax2, ax3])
T.writes(T_add_1[ax0, ax1, ax2, ax3])
T_add_1[ax0, ax1, ax2, ax3] = p7[()] + compute_1[ax0, ax1, ax2, ax3]
for i0_4, i1_4, i2_4, i3_4 in T.grid(16, 56, 56, 256):
with T.block("compute_1"):
i0_5, i1_5, i2_5, i3_5 = T.axis.remap("SSSS", [i0_4, i1_4, i2_4, i3_4])
T.reads(T_add_1[i0_5, i1_5, i2_5, i3_5])
T.writes(compute_2[i0_5, i1_5, i2_5, i3_5])
compute_2[i0_5, i1_5, i2_5, i3_5] = T.max(T.min(T_add_1[i0_5, i1_5, i2_5, i3_5], 255), 0)
for i0_6, i1_6, i2_6, i3_6 in T.grid(16, 56, 56, 256):
with T.block("T_subtract_1"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_6, i1_6, i2_6, i3_6])
T.reads(compute_2[ax0, ax1, ax2, ax3], p8[0])
T.writes(T_subtract_1[ax0, ax1, ax2, ax3])
T_subtract_1[ax0, ax1, ax2, ax3] = compute_2[ax0, ax1, ax2, ax3] - p8[0]
for i0_7, i1_7, i2_7, i3_7 in T.grid(16, 56, 56, 256):
with T.block("compute_2"):
i0_8, i1_8, i2_8, i3_8 = T.axis.remap("SSSS", [i0_7, i1_7, i2_7, i3_7])
T.reads(T_subtract_1[i0_8, i1_8, i2_8, i3_8])
T.writes(compute_3[i0_8, i1_8, i2_8, i3_8])
compute_3[i0_8, i1_8, i2_8, i3_8] = T.q_multiply_shift(T_subtract_1[i0_8, i1_8, i2_8, i3_8], 1457846997, 31, 0, dtype="int32")
for i0_9, i1_9, i2_9, i3_9 in T.grid(16, 56, 56, 256):
with T.block("compute_3"):
i0_10, i1_10, i2_10, i3_10 = T.axis.remap("SSSS", [i0_9, i1_9, i2_9, i3_9])
T.reads(p9[i0_10, i1_10, i2_10, i3_10])
T.writes(compute_4[i0_10, i1_10, i2_10, i3_10])
compute_4[i0_10, i1_10, i2_10, i3_10] = T.q_multiply_shift(p9[i0_10, i1_10, i2_10, i3_10], 2101000910, 31, 0, dtype="int32")
for i0_11, i1_11, i2_11, i3_11 in T.grid(16, 56, 56, 256):
with T.block("T_add_2"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_11, i1_11, i2_11, i3_11])
T.reads(compute_3[ax0, ax1, ax2, ax3], compute_4[ax0, ax1, ax2, ax3])
T.writes(T_add_2[ax0, ax1, ax2, ax3])
T_add_2[ax0, ax1, ax2, ax3] = compute_3[ax0, ax1, ax2, ax3] + compute_4[ax0, ax1, ax2, ax3]
for i0_12, i1_12, i2_12, i3_12 in T.grid(16, 56, 56, 256):
with T.block("compute_4"):
i0_13, i1_13, i2_13, i3_13 = T.axis.remap("SSSS", [i0_12, i1_12, i2_12, i3_12])
T.reads(T_add_2[i0_13, i1_13, i2_13, i3_13])
T.writes(compute[i0_13, i1_13, i2_13, i3_13])
compute[i0_13, i1_13, i2_13, i3_13] = T.max(T.min(T_add_2[i0_13, i1_13, i2_13, i3_13], 255), 0)
@tvm.script.ir_module
class Conv2dInt8_with_predicate_scheduled:
@T.prim_func
def main(p0: T.Buffer((16, 56, 56, 64), "int8"), p1: T.Buffer((256, 1, 1, 64), "int8"), p2: T.Buffer((1, 1, 1, 256), "int32"), p3: T.Buffer((1, 1, 1, 256), "int32"), p4: T.Buffer(256, "int32"), p5: T.Buffer(256, "int32"), p6: T.Buffer(256, "int32"), p7: T.Buffer((), "int32"), p8: T.Buffer(1, "int32"), p9: T.Buffer((16, 56, 56, 256), "int32"), compute: T.Buffer((16, 56, 56, 256), "int32")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
with T.block("root"):
T.reads()
T.writes()
T.block_attr({"meta_schedule.unroll_explicit":1024})
conv2d_nhwc_reindex_shared = T.alloc_buffer([50176, 256], dtype="int32", scope="shared")
conv2d_nhwc_reindex_shared_wmma_accumulator = T.alloc_buffer([50176, 256], dtype="int32", scope="wmma.accumulator")
pad_temp_reindex_shared = T.alloc_buffer([50176, 64], dtype="int8", scope="shared")
p1_reindex_shared = T.alloc_buffer([1, 1, 256, 64], dtype="int8", scope="shared")
pad_temp_reindex_shared_wmma_matrix_a = T.alloc_buffer([50176, 64], dtype="int8", scope="wmma.matrix_a")
p1_reindex_shared_wmma_matrix_b = T.alloc_buffer([1, 1, 256, 64], dtype="int8", scope="wmma.matrix_b")
for ax2_0_0_ax3_0_0_fused in T.thread_binding(32, thread="blockIdx.y"):
for ax2_0_1_ax3_0_1_fused in T.thread_binding(196, thread="blockIdx.x"):
for ax2_0_2_ax3_0_2_fused in T.thread_binding(4, thread="threadIdx.y"):
for ax0_0, ax1_0, ax4_0_0 in T.grid(1, 1, 2):
for ax0_ax1_fused in T.serial(1024):
with T.block("pad_temp_reindex_shared"):
v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused // 4 * 6272 + ax2_0_1_ax3_0_1_fused * 32 + ax0_ax1_fused // 32)
v1 = T.axis.spatial(64, ax4_0_0 * 32 + ax0_ax1_fused % 32)
T.reads(p0[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1])
T.writes(pad_temp_reindex_shared[v0, v1])
T.block_attr({"buffer_dim_align":[[0, 0, 32, 16]], "meta_schedule.cooperative_fetch":4})
pad_temp_reindex_shared[v0, v1] = p0[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1]
for ax0_ax1_ax2_ax3_fused in T.serial(2048):
with T.block("p1_reindex_shared"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(1, 0)
v2 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 4 * 64 + ax0_ax1_ax2_ax3_fused // 32)
v3 = T.axis.spatial(64, ax4_0_0 * 32 + ax0_ax1_ax2_ax3_fused % 32)
T.reads(p1[v2, v0, v1, v3])
T.writes(p1_reindex_shared[v0, v1, v2, v3])
T.block_attr({"buffer_dim_align":[[0, 2, 32, 16]], "meta_schedule.cooperative_fetch":3})
p1_reindex_shared[v0, v1, v2, v3] = p1[v2, v0, v1, v3]
for ax0_1, ax1_1, ax4_0_1 in T.grid(1, 1, 2):
for ax0_0_1, ax1_0_1 in T.grid(1, 1):
with T.block("pad_temp_reindex_shared_wmma.matrix_a_o"):
v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 4 * 392 + ax2_0_1_ax3_0_1_fused * 2 + ax2_0_2_ax3_0_2_fused // 2 + ax0_0_1)
v1_o = T.axis.spatial(4, ax4_0_0 * 2 + ax4_0_1 + ax1_0_1)
T.reads(pad_temp_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_s8_a_shared"})
for ax0_1_1, ax1_1_1 in T.grid(16, 16):
with T.block("pad_temp_reindex_shared_wmma.matrix_a"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1_1, ax1_1_1])
T.reads(pad_temp_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = pad_temp_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0, ax1, ax2_0, ax3_0 in T.grid(1, 1, 2, 1):
with T.block("p1_reindex_shared_wmma.matrix_b_o"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
v2_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax2_0)
v3_o = T.axis.spatial(4, ax4_0_0 * 2 + ax4_0_1 + ax3_0)
T.reads(p1_reindex_shared[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.writes(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_s8_b_trans_shared"})
for ax2_1, ax3_1 in T.grid(16, 16):
with T.block("p1_reindex_shared_wmma.matrix_b"):
v2_i, v3_i = T.axis.remap("SS", [ax2_1, ax3_1])
T.reads(p1_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i])
T.writes(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i])
p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i] = p1_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i]
for ax2_0_3, ax3_0_3, ax0_2, ax1_2, ax4_0_2, ax2_0_4, ax3_0_4 in T.grid(1, 1, 1, 1, 1, 1, 2):
with T.block("conv2d_nhwc_o"):
v0 = T.axis.reduce(1, ax0_0 + ax0_1 + ax0_2)
v1 = T.axis.reduce(1, ax1_0 + ax1_1 + ax1_2)
v2_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 4 * 392 + ax2_0_1_ax3_0_1_fused * 2 + ax2_0_2_ax3_0_2_fused // 2 + ax2_0_3 + ax2_0_4)
v3_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax3_0_3 * 2 + ax3_0_4)
v4_o = T.axis.reduce(4, ax4_0_0 * 2 + ax4_0_1 + ax4_0_2)
T.reads(pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 : v2_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16], p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 : v3_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_s8s8s32_trans", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_s32", "meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "warp_execution":1})
with T.init():
for ax2_1, ax3_1 in T.grid(16, 16):
with T.block("conv2d_nhwc_init"):
v2_i_init, v3_i_init = T.axis.remap("SS", [ax2_1, ax3_1])
T.reads()
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init])
conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init] = 0
for ax2_1, ax3_1, ax4_1 in T.grid(16, 16, 16):
with T.block("conv2d_nhwc"):
v2_i, v3_i, v4_i = T.axis.remap("SSR", [ax2_1, ax3_1, ax4_1])
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i], pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i], p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 + v3_i, v4_o * 16 + v4_i])
T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i])
T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"})
conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i] + T.Cast("int32", pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i]) * T.Cast("int32", p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 + v3_i, v4_o * 16 + v4_i])
for ax0_0, ax1_0 in T.grid(1, 2):
with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator_o"):
v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 4 * 392 + ax2_0_1_ax3_0_1_fused * 2 + ax2_0_2_ax3_0_2_fused // 2 + ax0_0)
v1_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax1_0)
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_store_16x16x16_s32_shared"})
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i]
for ax0, ax1_0, ax1_1, ax1_2, ax1_3 in T.grid(32, 1, 4, 32, 2):
with T.block("conv2d_nhwc_reindex_shared"):
T.where(((ax1_0 * 4 + ax1_1) * 32 + ax1_2) * 2 + ax1_3 < 64)
v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused // 4 * 6272 + ax2_0_1_ax3_0_1_fused * 32 + ax0)
v1 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 4 * 64 + (ax1_0 * 256 + ax1_1 * 64 + ax1_2 * 2 + ax1_3))
T.reads(p7[()], conv2d_nhwc_reindex_shared[v0, v1], p2[0, 0, 0, v1], p3[0, 0, 0, v1], p4[v1], p5[v1], p6[v1], p8[0], p9[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1])
T.writes(compute[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1])
compute[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1] = T.max(T.min(T.q_multiply_shift(T.max(T.min(p7[()] + T.q_multiply_shift_per_axis(conv2d_nhwc_reindex_shared[v0, v1] - p2[0, 0, 0, v1] + p3[0, 0, 0, v1], p4[v1], p5[v1], p6[v1], 31, False, True, dtype="int32"), 255), 0) - p8[0], 1457846997, 31, 0, dtype="int32") + T.q_multiply_shift(p9[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1], 2101000910, 31, 0, dtype="int32"), 255), 0)
# fmt: on
def verify(anchor_mod, anchor_trace_fun, target_mod, target, ref):
anchor_sch = Schedule(anchor_mod)
anchor_trace_fun(anchor_sch)
anchor_trace = anchor_sch.trace
sch = Schedule(target_mod)
ms.trace_apply.schedule_using_anchor_trace(sch, anchor_trace, Target(target))
tvm.ir.assert_structural_equal(ref, sch.mod)
def test_dense_add_cpu():
def apply_anchor_trace(sch: Schedule) -> None:
b0 = sch.get_block(name="T_matmul_NT", func_name="main")
b1 = sch.get_block(name="root", func_name="main")
sch.annotate(block_or_loop=b0, ann_key="meta_schedule.tiling_structure", ann_val="SSRSRS")
l2, l3, l4 = sch.get_loops(block=b0)
v5, v6, v7, v8 = sch.sample_perfect_tile(
loop=l2, n=4, max_innermost_factor=64, decision=[2, 8, 4, 2]
)
l9, l10, l11, l12 = sch.split(loop=l2, factors=[v5, v6, v7, v8], preserve_unit_iters=True)
v13, v14, v15, v16 = sch.sample_perfect_tile(
loop=l3, n=4, max_innermost_factor=64, decision=[2, 1, 1, 64]
)
l17, l18, l19, l20 = sch.split(
loop=l3, factors=[v13, v14, v15, v16], preserve_unit_iters=True
)
v21, v22 = sch.sample_perfect_tile(loop=l4, n=2, max_innermost_factor=64, decision=[128, 1])
l23, l24 = sch.split(loop=l4, factors=[v21, v22], preserve_unit_iters=True)
sch.reorder(l9, l17, l10, l18, l23, l11, l19, l24, l12, l20)
b25 = sch.cache_write(block=b0, write_buffer_index=0, storage_scope="global")
sch.reverse_compute_at(block=b25, loop=l17, preserve_unit_loops=True, index=-1)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.parallel", ann_val=160)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.vectorize", ann_val=64)
v26 = sch.sample_categorical(
candidates=[0, 16, 64, 512], probs=[0.25, 0.25, 0.25, 0.25], decision=0
)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.unroll_explicit", ann_val=v26)
sch.enter_postproc()
b27 = sch.get_block(name="root", func_name="main")
sch.unannotate(block_or_loop=b27, ann_key="meta_schedule.parallel")
sch.unannotate(block_or_loop=b27, ann_key="meta_schedule.vectorize")
sch.unannotate(block_or_loop=b27, ann_key="meta_schedule.unroll_explicit")
b28, b29 = sch.get_child_blocks(b27)
l30, l31, l32, l33, l34, l35, l36, l37, l38, l39 = sch.get_loops(block=b28)
l40 = sch.fuse(l30, l31, preserve_unit_iters=True)
sch.parallel(loop=l40)
l41 = sch.fuse(l39, preserve_unit_iters=True)
sch.vectorize(loop=l41)
l42, l43, l44 = sch.get_loops(block=b29)
l45 = sch.fuse(l42, preserve_unit_iters=True)
sch.parallel(loop=l45)
l46 = sch.fuse(l44, preserve_unit_iters=True)
sch.vectorize(loop=l46)
b47 = sch.get_block(name="T_matmul_NT", func_name="main")
l48, l49, l50, l51, l52, l53, l54, l55, l56 = sch.get_loops(block=b47)
b57 = sch.decompose_reduction(block=b47, loop=l51)
b58 = sch.get_block(name="T_matmul_NT_update", func_name="main")
b59 = sch.cache_read(block=b58, read_buffer_index=2, storage_scope="global")
sch.transform_layout(
block=b58,
buffer=("read", 2),
index_map=tvm.tir.IndexMap.from_func(
lambda i0, i1: (
floordiv(i0, 64),
i1,
floormod(i0, 64),
),
inverse_index_map=lambda i0, i1, i2: (
((i0 * 64) + i2),
i1,
),
index_dtype="int32",
),
pad_value=None,
)
sch.annotate(block_or_loop=b59, ann_key="meta_schedule.layout_rewrite_preproc", ann_val=1)
verify(Dense, apply_anchor_trace, DenseAdd, "llvm", DenseAdd_scheduled_cpu)
def test_dense_add_cpu_no_write_cache():
def apply_trace(sch):
b0 = sch.get_block(name="T_matmul_NT", func_name="main")
b1 = sch.get_block(name="root", func_name="main")
sch.annotate(block_or_loop=b0, ann_key="meta_schedule.tiling_structure", ann_val="SSRSRS")
l2, l3, l4 = sch.get_loops(block=b0)
v5, v6, v7, v8 = sch.sample_perfect_tile(
loop=l2, n=4, max_innermost_factor=64, decision=[4, 4, 4, 2]
)
l9, l10, l11, l12 = sch.split(loop=l2, factors=[v5, v6, v7, v8], preserve_unit_iters=True)
v13, v14, v15, v16 = sch.sample_perfect_tile(
loop=l3, n=4, max_innermost_factor=64, decision=[1, 1, 4, 32]
)
l17, l18, l19, l20 = sch.split(
loop=l3, factors=[v13, v14, v15, v16], preserve_unit_iters=True
)
v21, v22 = sch.sample_perfect_tile(loop=l4, n=2, max_innermost_factor=64, decision=[8, 16])
l23, l24 = sch.split(loop=l4, factors=[v21, v22], preserve_unit_iters=True)
sch.reorder(l9, l17, l10, l18, l23, l11, l19, l24, l12, l20)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.parallel", ann_val=160)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.vectorize", ann_val=64)
v25 = sch.sample_categorical(
candidates=[0, 16, 64, 512], probs=[0.25, 0.25, 0.25, 0.25], decision=1
)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.unroll_explicit", ann_val=v25)
sch.enter_postproc()
b26 = sch.get_block(name="root", func_name="main")
sch.unannotate(block_or_loop=b26, ann_key="meta_schedule.parallel")
sch.unannotate(block_or_loop=b26, ann_key="meta_schedule.vectorize")
sch.unannotate(block_or_loop=b26, ann_key="meta_schedule.unroll_explicit")
(b27,) = sch.get_child_blocks(b26)
l28, l29, l30, l31, l32, l33, l34, l35, l36, l37 = sch.get_loops(block=b27)
l38 = sch.fuse(l28, l29, l30, l31, preserve_unit_iters=True)
sch.parallel(loop=l38)
l39 = sch.fuse(l37, preserve_unit_iters=True)
sch.vectorize(loop=l39)
sch.annotate(block_or_loop=l38, ann_key="pragma_auto_unroll_max_step", ann_val=16)
sch.annotate(block_or_loop=l38, ann_key="pragma_unroll_explicit", ann_val=1)
b40 = sch.get_block(name="T_matmul_NT", func_name="main")
l41, l42, l43, l44, l45, l46, l47 = sch.get_loops(block=b40)
b48 = sch.decompose_reduction(block=b40, loop=l42)
b49 = sch.get_block(name="T_matmul_NT_update", func_name="main")
b50 = sch.cache_read(block=b49, read_buffer_index=2, storage_scope="global")
sch.transform_layout(
block=b49,
buffer=("read", 2),
index_map=tvm.tir.IndexMap.from_func(
lambda i0, i1: (
floordiv(i1, 16),
floordiv(i0, 32),
floormod(i1, 16),
floormod(i0, 32),
),
inverse_index_map=lambda i0, i1, i2, i3: (
((i1 * 32) + i3),
((i0 * 16) + i2),
),
index_dtype="int32",
),
pad_value=None,
)
sch.annotate(block_or_loop=b50, ann_key="meta_schedule.layout_rewrite_preproc", ann_val=1)
verify(Dense, apply_trace, DenseAdd, "llvm", DenseAdd_cpu_no_write_cache)
def test_dense_add_gpu():
def apply_anchor_trace(sch: Schedule) -> None:
b0 = sch.get_block(name="T_matmul_NT", func_name="main")
b1 = sch.get_block(name="root", func_name="main")
sch.annotate(block_or_loop=b0, ann_key="meta_schedule.tiling_structure", ann_val="SSSRRSRS")
l2, l3, l4 = sch.get_loops(block=b0)
v5, v6, v7, v8, v9 = sch.sample_perfect_tile(
loop=l2, n=5, max_innermost_factor=64, decision=[8, 1, 16, 1, 1]
)
l10, l11, l12, l13, l14 = sch.split(
loop=l2, factors=[v5, v6, v7, v8, v9], preserve_unit_iters=True
)
v15, v16, v17, v18, v19 = sch.sample_perfect_tile(
loop=l3, n=5, max_innermost_factor=64, decision=[4, 1, 8, 4, 1]
)
l20, l21, l22, l23, l24 = sch.split(
loop=l3, factors=[v15, v16, v17, v18, v19], preserve_unit_iters=True
)
v25, v26, v27 = sch.sample_perfect_tile(
loop=l4, n=3, max_innermost_factor=64, decision=[32, 1, 4]
)
l28, l29, l30 = sch.split(loop=l4, factors=[v25, v26, v27], preserve_unit_iters=True)
sch.reorder(l10, l20, l11, l21, l12, l22, l28, l29, l13, l23, l30, l14, l24)
l31 = sch.fuse(l10, l20, preserve_unit_iters=True)
sch.bind(loop=l31, thread_axis="blockIdx.x")
l32 = sch.fuse(l11, l21, preserve_unit_iters=True)
sch.bind(loop=l32, thread_axis="vthread.x")
l33 = sch.fuse(l12, l22, preserve_unit_iters=True)
sch.bind(loop=l33, thread_axis="threadIdx.x")
sch.annotate(
block_or_loop=b0, ann_key="meta_schedule.thread_extent_low_inclusive", ann_val=16
)
sch.annotate(
block_or_loop=b0, ann_key="meta_schedule.thread_extent_high_inclusive", ann_val=256
)
b34 = sch.cache_write(block=b0, write_buffer_index=0, storage_scope="local")
sch.reverse_compute_at(block=b34, loop=l33, preserve_unit_loops=True, index=-1)
b35 = sch.cache_read(
block=b0, read_buffer_index=0, storage_scope="shared", consumer_blocks=[b0]
)
sch.compute_at(block=b35, loop=l28, preserve_unit_loops=True, index=-1)
l36, l37, l38, l39, l40, l41 = sch.get_loops(block=b35)
l42 = sch.fuse(l40, l41, preserve_unit_iters=True)
v43 = sch.sample_categorical(
candidates=[1, 2, 3, 4], probs=[0.25, 0.25, 0.25, 0.25], decision=1
)
sch.annotate(block_or_loop=b35, ann_key="meta_schedule.cooperative_fetch", ann_val=v43)
b44 = sch.cache_read(
block=b0, read_buffer_index=1, storage_scope="shared", consumer_blocks=[b0]
)
sch.compute_at(block=b44, loop=l28, preserve_unit_loops=True, index=-1)
l45, l46, l47, l48, l49, l50 = sch.get_loops(block=b44)
l51 = sch.fuse(l49, l50, preserve_unit_iters=True)
v52 = sch.sample_categorical(
candidates=[1, 2, 3, 4], probs=[0.25, 0.25, 0.25, 0.25], decision=3
)
sch.annotate(block_or_loop=b44, ann_key="meta_schedule.cooperative_fetch", ann_val=v52)
v53 = sch.sample_categorical(
candidates=[0, 16, 64, 512, 1024],
probs=[
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
],
decision=2,
)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.unroll_explicit", ann_val=v53)
sch.enter_postproc()
sch.unannotate(block_or_loop=b35, ann_key="meta_schedule.cooperative_fetch")
l54, l55, l56, l57, l58 = sch.get_loops(block=b35)
l59, l60, l61 = sch.split(loop=l58, factors=[None, 128, 2], preserve_unit_iters=True)
sch.vectorize(loop=l61)
sch.bind(loop=l60, thread_axis="threadIdx.x")
sch.unannotate(block_or_loop=b44, ann_key="meta_schedule.cooperative_fetch")
l62, l63, l64, l65, l66 = sch.get_loops(block=b44)
l67, l68, l69 = sch.split(loop=l66, factors=[None, 128, 4], preserve_unit_iters=True)
sch.vectorize(loop=l69)
sch.bind(loop=l68, thread_axis="threadIdx.x")
b70 = sch.get_block(name="root", func_name="main")
sch.unannotate(block_or_loop=b70, ann_key="meta_schedule.unroll_explicit")
b71, b72, b73, b74 = sch.get_child_blocks(b70)
l75, l76, l77, l78, l79, l80, l81 = sch.get_loops(block=b71)
sch.annotate(block_or_loop=l75, ann_key="pragma_auto_unroll_max_step", ann_val=64)
sch.annotate(block_or_loop=l75, ann_key="pragma_unroll_explicit", ann_val=1)
l82, l83, l84, l85, l86, l87, l88 = sch.get_loops(block=b72)
sch.annotate(block_or_loop=l82, ann_key="pragma_auto_unroll_max_step", ann_val=64)
sch.annotate(block_or_loop=l82, ann_key="pragma_unroll_explicit", ann_val=1)
l89, l90, l91, l92, l93, l94, l95, l96, l97, l98 = sch.get_loops(block=b73)
sch.annotate(block_or_loop=l89, ann_key="pragma_auto_unroll_max_step", ann_val=64)
sch.annotate(block_or_loop=l89, ann_key="pragma_unroll_explicit", ann_val=1)
l99, l100, l101, l102, l103 = sch.get_loops(block=b74)
sch.annotate(block_or_loop=l99, ann_key="pragma_auto_unroll_max_step", ann_val=64)
sch.annotate(block_or_loop=l99, ann_key="pragma_unroll_explicit", ann_val=1)
b104 = sch.get_block(name="T_matmul_NT", func_name="main")
l105, l106, l107, l108, l109, l110, l111, l112, l113, l114 = sch.get_loops(block=b104)
b115 = sch.decompose_reduction(block=b104, loop=l108)
verify(Dense, apply_anchor_trace, DenseAdd, "cuda", DenseAdd_scheduled_gpu)
def test_conv2d_int8_tensorcore():
def apply_trace(sch):
b0 = sch.get_block(name="pad_temp", func_name="main")
b1 = sch.get_block(name="conv2d_nhwc", func_name="main")
b2 = sch.get_block(name="T_subtract", func_name="main")
b3 = sch.get_block(name="T_add", func_name="main")
b4 = sch.get_block(name="T_cast", func_name="main")
b5 = sch.get_block(name="T_multiply", func_name="main")
b6 = sch.get_block(name="T_add_1", func_name="main")
b7 = sch.get_block(name="T_right_shift", func_name="main")
b8 = sch.get_block(name="T_cast_1", func_name="main")
b9 = sch.get_block(name="T_add_2", func_name="main")
b10 = sch.get_block(name="compute", func_name="main")
b11 = sch.get_block(name="T_cast_2", func_name="main")
b12 = sch.get_block(name="T_cast_3", func_name="main")
b13 = sch.get_block(name="T_subtract_1", func_name="main")
b14 = sch.get_block(name="compute_1", func_name="main")
b15 = sch.get_block(name="root", func_name="main")
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.tiling_structure", ann_val="SSSRRSRS")
b16 = sch.reindex(block=b1, buffer=("write", 0))
b17 = sch.reindex(block=b1, buffer=("read", 0))
b18 = sch.reindex(block=b1, buffer=("read", 1))
sch.transform_layout(
block=b1,
buffer=("read", 0),
index_map=lambda nn, yy, xx, rc: (
(((nn * 3136) + (yy * 56)) + xx),
rc,
),
pad_value=None,
)
sch.transform_layout(
block=b1,
buffer=("read", 1),
index_map=lambda ff, ry, rx, rc: (
ry,
rx,
ff,
rc,
),
pad_value=None,
)
sch.transform_layout(
block=b1,
buffer=("write", 0),
index_map=lambda nn, yy, xx, ff: (
(((nn * 3136) + (yy * 56)) + xx),
ff,
),
pad_value=None,
)
sch.transform_block_layout(
block=b16,
index_map=lambda nn, yy, xx, ff: (
(((nn * 3136) + (yy * 56)) + xx),
ff,
),
)
sch.transform_block_layout(
block=b17,
index_map=lambda nn, yy, xx, rc: (
(((nn * 3136) + (yy * 56)) + xx),
rc,
),
)
sch.transform_block_layout(
block=b18,
index_map=lambda ff, ry, rx, rc: (
ry,
rx,
ff,
rc,
),
)
sch.transform_block_layout(
block=b1,
index_map=lambda nn, yy, xx, ff, ry, rx, rc: (
ry,
rx,
(((nn * 3136) + (yy * 56)) + xx),
ff,
rc,
),
)
l19, l20, l21, l22, l23 = sch.get_loops(block=b1)
l24, l25 = sch.split(loop=l23, factors=[None, 16], preserve_unit_iters=True)
l26, l27 = sch.split(loop=l22, factors=[None, 16], preserve_unit_iters=True)
l28, l29 = sch.split(loop=l21, factors=[None, 16], preserve_unit_iters=True)
l30, l31, l32, l33, l34, l35, l36, l37 = sch.get_loops(block=b1)
sch.reorder(l34, l36, l29, l27, l25)
b38 = sch.blockize(target=l29)
sch.annotate(
block_or_loop=b38,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_sync_16x16x16_s8s8s32_trans",
)
sch.annotate(
block_or_loop=b38,
ann_key="meta_schedule.auto_tensorize_init",
ann_val="wmma_fill_16x16x16_s32",
)
sch.annotate(block_or_loop=b38, ann_key="warp_execution", ann_val=1)
l39, l40, l41, l42, l43 = sch.get_loops(block=b38)
v44, v45, v46 = sch.sample_perfect_tile(
loop=l39, n=3, max_innermost_factor=4, decision=[1, 1, 1]
)
l47, l48, l49 = sch.split(loop=l39, factors=[v44, v45, v46], preserve_unit_iters=True)
v50, v51, v52 = sch.sample_perfect_tile(
loop=l40, n=3, max_innermost_factor=4, decision=[1, 1, 1]
)
l53, l54, l55 = sch.split(loop=l40, factors=[v50, v51, v52], preserve_unit_iters=True)
v56, v57, v58, v59, v60 = sch.sample_perfect_tile(
loop=l41, n=5, max_innermost_factor=4, decision=[392, 1, 8, 1, 1]
)
l61, l62, l63, l64, l65 = sch.split(
loop=l41, factors=[v56, v57, v58, v59, v60], preserve_unit_iters=True
)
v66, v67, v68, v69, v70 = sch.sample_perfect_tile(
loop=l42, n=5, max_innermost_factor=4, decision=[8, 1, 2, 1, 1]
)
l71, l72, l73, l74, l75 = sch.split(
loop=l42, factors=[v66, v67, v68, v69, v70], preserve_unit_iters=True
)
v76, v77, v78 = sch.sample_perfect_tile(
loop=l43, n=3, max_innermost_factor=4, decision=[2, 1, 2]
)
l79, l80, l81 = sch.split(loop=l43, factors=[v76, v77, v78], preserve_unit_iters=True)
sch.reorder(
l61,
l71,
l62,
l72,
l63,
l73,
l47,
l53,
l79,
l48,
l54,
l80,
l64,
l74,
l49,
l55,
l81,
l65,
l75,
)
l82 = sch.fuse(l61, l71, preserve_unit_iters=True)
sch.bind(loop=l82, thread_axis="blockIdx.x")
l83 = sch.fuse(l62, l72, preserve_unit_iters=True)
sch.bind(loop=l83, thread_axis="vthread.x")
l84 = sch.fuse(l63, l73, preserve_unit_iters=True)
sch.bind(loop=l84, thread_axis="threadIdx.x")
sch.annotate(
block_or_loop=b38, ann_key="meta_schedule.thread_extent_low_inclusive", ann_val=32
)
sch.annotate(
block_or_loop=b38, ann_key="meta_schedule.thread_extent_high_inclusive", ann_val=1024
)
b85 = sch.cache_write(block=b38, write_buffer_index=0, storage_scope="shared")
sch.reverse_compute_at(block=b85, loop=l83, preserve_unit_loops=True, index=-1)
b86 = sch.cache_write(block=b38, write_buffer_index=0, storage_scope="wmma.accumulator")
sch.reverse_compute_at(block=b86, loop=l84, preserve_unit_loops=True, index=-1)
v87 = sch.sample_categorical(
candidates=[1, 2, 3, 4, 8, 16],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=0,
)
sch.annotate(block_or_loop=b85, ann_key="meta_schedule.cooperative_fetch", ann_val=v87)
sch.reverse_compute_inline(block=b16)
l88, l89, l90, l91, l92 = sch.get_loops(block=b86)
l93, l94 = sch.split(loop=l92, factors=[None, 16], preserve_unit_iters=True)
l95, l96 = sch.split(loop=l91, factors=[None, 16], preserve_unit_iters=True)
l97, l98, l99, l100, l101, l102, l103 = sch.get_loops(block=b86)
sch.reorder(l102, l96, l94)
b104 = sch.blockize(target=l96)
sch.annotate(
block_or_loop=b104,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_store_16x16x16_s32_shared",
)
b105 = sch.cache_read(
block=b38, read_buffer_index=0, storage_scope="shared", consumer_blocks=[b38]
)
sch.compute_at(block=b105, loop=l79, preserve_unit_loops=True, index=-1)
l106, l107, l108, l109, l110, l111, l112, l113 = sch.get_loops(block=b105)
l114 = sch.fuse(l112, l113, preserve_unit_iters=True)
v115 = sch.sample_categorical(
candidates=[1, 2, 3, 4, 8, 16],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=5,
)
sch.annotate(block_or_loop=b105, ann_key="meta_schedule.cooperative_fetch", ann_val=v115)
b116 = sch.cache_read(
block=b38, read_buffer_index=1, storage_scope="shared", consumer_blocks=[b38]
)
sch.compute_at(block=b116, loop=l79, preserve_unit_loops=True, index=-1)
l117, l118, l119, l120, l121, l122, l123, l124, l125, l126 = sch.get_loops(block=b116)
l127 = sch.fuse(l123, l124, l125, l126, preserve_unit_iters=True)
v128 = sch.sample_categorical(
candidates=[1, 2, 3, 4, 8, 16],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=4,
)
sch.annotate(block_or_loop=b116, ann_key="meta_schedule.cooperative_fetch", ann_val=v128)
b129 = sch.cache_read(block=b38, read_buffer_index=0, storage_scope="wmma.matrix_a")
sch.compute_at(block=b129, loop=l80, preserve_unit_loops=True, index=-1)
l130, l131, l132, l133, l134, l135, l136, l137, l138, l139, l140 = sch.get_loops(block=b129)
l141, l142 = sch.split(loop=l140, factors=[None, 16], preserve_unit_iters=True)
l143, l144 = sch.split(loop=l139, factors=[None, 16], preserve_unit_iters=True)
(
l145,
l146,
l147,
l148,
l149,
l150,
l151,
l152,
l153,
l154,
l155,
l156,
l157,
) = sch.get_loops(block=b129)
sch.reorder(l156, l144, l142)
b158 = sch.blockize(target=l144)
sch.annotate(
block_or_loop=b158,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_load_16x16x16_s8_a_shared",
)
b159 = sch.cache_read(block=b38, read_buffer_index=1, storage_scope="wmma.matrix_b")
sch.compute_at(block=b159, loop=l80, preserve_unit_loops=True, index=-1)
(
l160,
l161,
l162,
l163,
l164,
l165,
l166,
l167,
l168,
l169,
l170,
l171,
l172,
) = sch.get_loops(block=b159)
l173, l174 = sch.split(loop=l172, factors=[None, 16], preserve_unit_iters=True)
l175, l176 = sch.split(loop=l171, factors=[None, 16], preserve_unit_iters=True)
(
l177,
l178,
l179,
l180,
l181,
l182,
l183,
l184,
l185,
l186,
l187,
l188,
l189,
l190,
l191,
) = sch.get_loops(block=b159)
sch.reorder(l190, l176, l174)
b192 = sch.blockize(target=l176)
sch.annotate(
block_or_loop=b192,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_load_16x16x16_s8_b_trans_shared",
)
sch.compute_inline(block=b17)
sch.compute_inline(block=b18)
sch.storage_align(block=b105, buffer_index=0, axis=-2, factor=32, offset=16)
sch.storage_align(block=b116, buffer_index=0, axis=-2, factor=32, offset=16)
sch.reverse_compute_inline(block=b14)
sch.reverse_compute_inline(block=b13)
sch.reverse_compute_inline(block=b12)
sch.reverse_compute_inline(block=b11)
sch.reverse_compute_inline(block=b10)
sch.reverse_compute_inline(block=b9)
sch.reverse_compute_inline(block=b8)
sch.reverse_compute_inline(block=b7)
sch.reverse_compute_inline(block=b6)
sch.reverse_compute_inline(block=b5)
sch.reverse_compute_inline(block=b4)
sch.reverse_compute_inline(block=b3)
sch.reverse_compute_inline(block=b2)
sch.compute_inline(block=b0)
v193 = sch.sample_categorical(
candidates=[0, 16, 64, 512, 1024],
probs=[
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
],
decision=3,
)
sch.annotate(block_or_loop=b15, ann_key="meta_schedule.unroll_explicit", ann_val=v193)
sch.enter_postproc()
sch.unannotate(block_or_loop=b85, ann_key="meta_schedule.cooperative_fetch")
l194, l195, l196, l197 = sch.get_loops(block=b85)
l198, l199 = sch.split(loop=l197, factors=[None, 16], preserve_unit_iters=True)
sch.bind(loop=l199, thread_axis="threadIdx.x")
sch.unannotate(block_or_loop=b105, ann_key="meta_schedule.cooperative_fetch")
l200, l201, l202, l203, l204, l205, l206 = sch.get_loops(block=b105)
l207, l208, l209 = sch.split(loop=l206, factors=[None, 16, 16], preserve_unit_iters=True)
sch.vectorize(loop=l209)
sch.bind(loop=l208, thread_axis="threadIdx.x")
sch.unannotate(block_or_loop=b116, ann_key="meta_schedule.cooperative_fetch")
l210, l211, l212, l213, l214, l215, l216 = sch.get_loops(block=b116)
l217, l218, l219 = sch.split(loop=l216, factors=[None, 16, 8], preserve_unit_iters=True)
sch.vectorize(loop=l219)
sch.bind(loop=l218, thread_axis="threadIdx.x")
b220 = sch.get_block(name="root", func_name="main")
sch.unannotate(block_or_loop=b220, ann_key="meta_schedule.unroll_explicit")
b221, b222, b223, b224, b225, b226, b227 = sch.get_child_blocks(b220)
l228, l229, l230, l231, l232, l233, l234, l235, l236 = sch.get_loops(block=b221)
sch.annotate(block_or_loop=l228, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l228, ann_key="pragma_unroll_explicit", ann_val=1)
l237, l238, l239, l240, l241, l242, l243, l244, l245 = sch.get_loops(block=b222)
sch.annotate(block_or_loop=l237, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l237, ann_key="pragma_unroll_explicit", ann_val=1)
l246, l247, l248, l249, l250, l251, l252, l253, l254, l255, l256 = sch.get_loops(block=b223)
sch.annotate(block_or_loop=l246, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l246, ann_key="pragma_unroll_explicit", ann_val=1)
(
l257,
l258,
l259,
l260,
l261,
l262,
l263,
l264,
l265,
l266,
l267,
l268,
l269,
) = sch.get_loops(block=b224)
sch.annotate(block_or_loop=l257, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l257, ann_key="pragma_unroll_explicit", ann_val=1)
(
l270,
l271,
l272,
l273,
l274,
l275,
l276,
l277,
l278,
l279,
l280,
l281,
l282,
l283,
l284,
l285,
) = sch.get_loops(block=b225)
sch.annotate(block_or_loop=l270, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l270, ann_key="pragma_unroll_explicit", ann_val=1)
l286, l287, l288, l289, l290 = sch.get_loops(block=b226)
sch.annotate(block_or_loop=l286, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l286, ann_key="pragma_unroll_explicit", ann_val=1)
l291, l292, l293, l294, l295 = sch.get_loops(block=b227)
sch.annotate(block_or_loop=l291, ann_key="pragma_auto_unroll_max_step", ann_val=512)
sch.annotate(block_or_loop=l291, ann_key="pragma_unroll_explicit", ann_val=1)
b296 = sch.get_block(name="conv2d_nhwc_o", func_name="main")
(
l297,
l298,
l299,
l300,
l301,
l302,
l303,
l304,
l305,
l306,
l307,
l308,
l309,
l310,
l311,
l312,
) = sch.get_loops(block=b296)
b313 = sch.decompose_reduction(block=b296, loop=l300)
sch.unannotate(block_or_loop=b313, ann_key="meta_schedule.auto_tensorize")
sch.annotate(
block_or_loop=b313,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_fill_16x16x16_s32",
)
sch.unannotate(block_or_loop=b296, ann_key="meta_schedule.auto_tensorize_init")
sch.unannotate(block_or_loop=b313, ann_key="meta_schedule.auto_tensorize_init")
b314 = sch.get_block(name="conv2d_nhwc_o_init", func_name="main")
sch.unannotate(block_or_loop=b314, ann_key="meta_schedule.auto_tensorize")
sch.tensorize(block_or_loop=b314, tensor_intrin="wmma_fill_16x16x16_s32")
b315 = sch.get_block(name="pad_temp_reindex_shared_wmma.matrix_a_o", func_name="main")
sch.unannotate(block_or_loop=b315, ann_key="meta_schedule.auto_tensorize")
sch.tensorize(block_or_loop=b315, tensor_intrin="wmma_load_16x16x16_s8_a_shared")
b316 = sch.get_block(name="p1_reindex_shared_wmma.matrix_b_o", func_name="main")
sch.unannotate(block_or_loop=b316, ann_key="meta_schedule.auto_tensorize")
sch.tensorize(block_or_loop=b316, tensor_intrin="wmma_load_16x16x16_s8_b_trans_shared")
b317 = sch.get_block(name="conv2d_nhwc_o_update", func_name="main")
sch.unannotate(block_or_loop=b317, ann_key="meta_schedule.auto_tensorize")
sch.tensorize(block_or_loop=b317, tensor_intrin="wmma_sync_16x16x16_s8s8s32_trans")
b318 = sch.get_block(name="conv2d_nhwc_reindex_shared_wmma.accumulator_o", func_name="main")
sch.unannotate(block_or_loop=b318, ann_key="meta_schedule.auto_tensorize")
sch.tensorize(block_or_loop=b318, tensor_intrin="wmma_store_16x16x16_s32_shared")
verify(Conv2dInt8, apply_trace, Conv2dInt8_target, "cuda", Conv2dInt8_tensorcore_scheduled)
def test_conv2d_int8_vnni():
def apply_trace(sch):
b0 = sch.get_block(name="compile_engine_const", func_name="main")
b1 = sch.get_block(name="conv2d_NCHWc_int8", func_name="main")
b2 = sch.get_block(name="T_add", func_name="main")
b3 = sch.get_block(name="T_cast", func_name="main")
b4 = sch.get_block(name="T_multiply", func_name="main")
b5 = sch.get_block(name="compile_engine_const_1", func_name="main")
b6 = sch.get_block(name="T_add_1", func_name="main")
b7 = sch.get_block(name="T_floor", func_name="main")
b8 = sch.get_block(name="T_cast_1", func_name="main")
b9 = sch.get_block(name="compute", func_name="main")
b10 = sch.get_block(name="T_cast_2", func_name="main")
b11 = sch.get_block(name="T_cast_3", func_name="main")
b12 = sch.get_block(name="T_subtract", func_name="main")
b13 = sch.get_block(name="T_multiply_1", func_name="main")
b14 = sch.get_block(name="compile_engine_const_2", func_name="main")
b15 = sch.get_block(name="T_add_2", func_name="main")
b16 = sch.get_block(name="T_floor_1", func_name="main")
b17 = sch.get_block(name="T_cast_4", func_name="main")
b18 = sch.get_block(name="T_add_3", func_name="main")
b19 = sch.get_block(name="compute_1", func_name="main")
b20 = sch.get_block(name="T_cast_5", func_name="main")
b21 = sch.get_block(name="root", func_name="main")
sch.compute_inline(block=b20)
sch.compute_inline(block=b19)
sch.compute_inline(block=b18)
sch.compute_inline(block=b17)
sch.compute_inline(block=b16)
sch.compute_inline(block=b15)
sch.compute_inline(block=b14)
sch.compute_inline(block=b13)
sch.compute_inline(block=b12)
sch.compute_inline(block=b11)
sch.compute_inline(block=b10)
sch.compute_inline(block=b9)
sch.compute_inline(block=b8)
sch.compute_inline(block=b7)
sch.compute_inline(block=b6)
sch.compute_inline(block=b5)
sch.compute_inline(block=b4)
sch.compute_inline(block=b3)
sch.compute_inline(block=b2)
sch.compute_inline(block=b0)
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.tiling_structure", ann_val="SSRSRS")
l22, l23, l24, l25, l26, l27, l28, l29, l30, l31 = sch.get_loops(block=b1)
l32, l33 = sch.split(loop=l31, factors=[None, 4], preserve_unit_iters=True)
l34, l35 = sch.split(loop=l26, factors=[None, 16], preserve_unit_iters=True)
l36, l37, l38, l39, l40, l41, l42, l43, l44, l45, l46, l47 = sch.get_loops(block=b1)
sch.reorder(l42, l43, l44, l45, l46, l35, l33)
b48 = sch.blockize(target=l35)
sch.annotate(block_or_loop=b48, ann_key="meta_schedule.auto_tensorize", ann_val=VNNI_INTRIN)
l49, l50, l51, l52, l53, l54, l55, l56, l57, l58 = sch.get_loops(block=b48)
v59, v60, v61, v62 = sch.sample_perfect_tile(
loop=l49, n=4, max_innermost_factor=64, decision=[1, 1, 1, 1]
)
l63, l64, l65, l66 = sch.split(
loop=l49, factors=[v59, v60, v61, v62], preserve_unit_iters=True
)
v67, v68, v69, v70 = sch.sample_perfect_tile(
loop=l50, n=4, max_innermost_factor=64, decision=[4, 32, 1, 1]
)
l71, l72, l73, l74 = sch.split(
loop=l50, factors=[v67, v68, v69, v70], preserve_unit_iters=True
)
v75, v76, v77, v78 = sch.sample_perfect_tile(
loop=l51, n=4, max_innermost_factor=64, decision=[1, 7, 1, 1]
)
l79, l80, l81, l82 = sch.split(
loop=l51, factors=[v75, v76, v77, v78], preserve_unit_iters=True
)
v83, v84, v85, v86 = sch.sample_perfect_tile(
loop=l52, n=4, max_innermost_factor=64, decision=[1, 1, 1, 7]
)
l87, l88, l89, l90 = sch.split(
loop=l52, factors=[v83, v84, v85, v86], preserve_unit_iters=True
)
v91, v92, v93, v94 = sch.sample_perfect_tile(
loop=l53, n=4, max_innermost_factor=64, decision=[1, 1, 1, 1]
)
l95, l96, l97, l98 = sch.split(
loop=l53, factors=[v91, v92, v93, v94], preserve_unit_iters=True
)
v99, v100 = sch.sample_perfect_tile(loop=l54, n=2, max_innermost_factor=64, decision=[1, 1])
l101, l102 = sch.split(loop=l54, factors=[v99, v100], preserve_unit_iters=True)
v103, v104 = sch.sample_perfect_tile(
loop=l55, n=2, max_innermost_factor=64, decision=[1, 1]
)
l105, l106 = sch.split(loop=l55, factors=[v103, v104], preserve_unit_iters=True)
v107, v108 = sch.sample_perfect_tile(
loop=l56, n=2, max_innermost_factor=64, decision=[4, 8]
)
l109, l110 = sch.split(loop=l56, factors=[v107, v108], preserve_unit_iters=True)
v111, v112 = sch.sample_perfect_tile(
loop=l57, n=2, max_innermost_factor=64, decision=[4, 1]
)
l113, l114 = sch.split(loop=l57, factors=[v111, v112], preserve_unit_iters=True)
v115, v116 = sch.sample_perfect_tile(
loop=l58, n=2, max_innermost_factor=64, decision=[1, 1]
)
l117, l118 = sch.split(loop=l58, factors=[v115, v116], preserve_unit_iters=True)
sch.reorder(
l63,
l71,
l79,
l87,
l95,
l64,
l72,
l80,
l88,
l96,
l101,
l105,
l109,
l113,
l117,
l65,
l73,
l81,
l89,
l97,
l102,
l106,
l110,
l114,
l118,
l66,
l74,
l82,
l90,
l98,
)
(b119,) = sch.get_consumers(block=b48)
sch.reverse_compute_at(block=b119, loop=l96, preserve_unit_loops=True, index=-1)
sch.annotate(block_or_loop=b21, ann_key="meta_schedule.parallel", ann_val=96)
sch.annotate(block_or_loop=b21, ann_key="meta_schedule.vectorize", ann_val=64)
v120 = sch.sample_categorical(
candidates=[0, 16, 64, 512], probs=[0.25, 0.25, 0.25, 0.25], decision=2
)
sch.annotate(block_or_loop=b21, ann_key="meta_schedule.unroll_explicit", ann_val=v120)
sch.enter_postproc()
b121 = sch.get_block(name="root", func_name="main")
sch.unannotate(block_or_loop=b121, ann_key="meta_schedule.parallel")
sch.unannotate(block_or_loop=b121, ann_key="meta_schedule.vectorize")
sch.unannotate(block_or_loop=b121, ann_key="meta_schedule.unroll_explicit")
b122, b123 = sch.get_child_blocks(b121)
(
l124,
l125,
l126,
l127,
l128,
l129,
l130,
l131,
l132,
l133,
l134,
l135,
l136,
l137,
l138,
l139,
l140,
l141,
l142,
l143,
l144,
l145,
l146,
l147,
l148,
l149,
l150,
l151,
l152,
l153,
) = sch.get_loops(block=b122)
l154 = sch.fuse(l124, l125, l126, l127, l128, l129, l130, preserve_unit_iters=True)
sch.parallel(loop=l154)
sch.annotate(block_or_loop=l154, ann_key="pragma_auto_unroll_max_step", ann_val=64)
sch.annotate(block_or_loop=l154, ann_key="pragma_unroll_explicit", ann_val=1)
l155, l156, l157, l158, l159, l160, l161, l162, l163 = sch.get_loops(block=b123)
l164 = sch.fuse(l163, preserve_unit_iters=True)
sch.vectorize(loop=l164)
sch.annotate(block_or_loop=l155, ann_key="pragma_auto_unroll_max_step", ann_val=64)
sch.annotate(block_or_loop=l155, ann_key="pragma_unroll_explicit", ann_val=1)
b165 = sch.get_block(name="conv2d_NCHWc_int8_o", func_name="main")
(
l166,
l167,
l168,
l169,
l170,
l171,
l172,
l173,
l174,
l175,
l176,
l177,
l178,
l179,
l180,
l181,
l182,
l183,
l184,
l185,
l186,
l187,
l188,
l189,
) = sch.get_loops(block=b165)
b190 = sch.decompose_reduction(block=b165, loop=l170)
sch.unannotate(block_or_loop=b190, ann_key="meta_schedule.auto_tensorize")
sch.annotate(block_or_loop=b190, ann_key="meta_schedule.auto_tensorize", ann_val="")
b191 = sch.get_block(name="conv2d_NCHWc_int8_o_init", func_name="main")
sch.unannotate(block_or_loop=b191, ann_key="meta_schedule.auto_tensorize")
(b192,) = sch.get_child_blocks(b191)
(l193,) = sch.get_loops(block=b192)
sch.vectorize(loop=l193)
b194 = sch.get_block(name="conv2d_NCHWc_int8_o_update", func_name="main")
sch.unannotate(block_or_loop=b194, ann_key="meta_schedule.auto_tensorize")
sch.tensorize(block_or_loop=b194, tensor_intrin=VNNI_INTRIN)
vnni_id = llvm_lookup_intrinsic_id("llvm.x86.avx512.vpdpbusd.512")
verify(
Conv2dInt8_NCHWc,
apply_trace,
Conv2dInt8_NCHWc_target,
"llvm -mcpu=cascadelake",
get_conv2d_vnni_mod(vnni_id),
)
def test_winograd_gpu():
def apply_trace(sch):
b0 = sch.get_block(name="B", func_name="main")
b1 = sch.get_block(name="data_pack", func_name="main")
b2 = sch.get_block(name="bgemm", func_name="main")
b3 = sch.get_block(name="A", func_name="main")
b4 = sch.get_block(name="inverse", func_name="main")
b5 = sch.get_block(name="conv2d_winograd", func_name="main")
b6 = sch.get_block(name="T_add", func_name="main")
b7 = sch.get_block(name="T_relu", func_name="main")
b8 = sch.get_block(name="root", func_name="main")
sch.compute_inline(block=b0)
(b9,) = sch.get_producers(block=b1)
(b10,) = sch.get_producers(block=b9)
l11, l12, l13, l14, l15, l16 = sch.get_loops(block=b1)
v17, v18 = sch.sample_perfect_tile(
loop=l13, n=2, max_innermost_factor=64, decision=[14, 14]
)
l19, l20 = sch.split(loop=l13, factors=[v17, v18], preserve_unit_iters=True)
v21, v22 = sch.sample_perfect_tile(loop=l14, n=2, max_innermost_factor=64, decision=[8, 8])
l23, l24 = sch.split(loop=l14, factors=[v21, v22], preserve_unit_iters=True)
sch.unroll(loop=l11)
sch.unroll(loop=l12)
sch.unroll(loop=l15)
sch.unroll(loop=l16)
sch.reorder(l19, l23, l20, l24, l11, l12, l15, l16)
sch.compute_at(block=b9, loop=l24, preserve_unit_loops=True, index=-1)
sch.set_scope(block=b9, buffer_index=0, storage_scope="local")
sch.compute_inline(block=b10)
l25, l26, l27, l28, l29, l30, l31, l32 = sch.get_loops(block=b1)
l33 = sch.fuse(l25, l26, l27, l28, preserve_unit_iters=True)
v34 = sch.sample_categorical(
candidates=[32, 64, 128, 256, 512, 1024],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=2,
)
l35, l36 = sch.split(loop=l33, factors=[None, v34], preserve_unit_iters=True)
sch.bind(loop=l35, thread_axis="blockIdx.x")
sch.bind(loop=l36, thread_axis="threadIdx.x")
sch.compute_inline(block=b3)
l37, l38, l39, l40, l41, l42 = sch.get_loops(block=b4)
v43, v44 = sch.sample_perfect_tile(loop=l39, n=2, max_innermost_factor=64, decision=[28, 7])
l45, l46 = sch.split(loop=l39, factors=[v43, v44], preserve_unit_iters=True)
v47, v48 = sch.sample_perfect_tile(loop=l40, n=2, max_innermost_factor=64, decision=[2, 32])
l49, l50 = sch.split(loop=l40, factors=[v47, v48], preserve_unit_iters=True)
sch.unroll(loop=l37)
sch.unroll(loop=l38)
sch.unroll(loop=l41)
sch.unroll(loop=l42)
sch.reorder(l45, l49, l46, l50, l37, l38, l41, l42)
l51, l52, l53, l54, l55, l56, l57, l58 = sch.get_loops(block=b4)
l59 = sch.fuse(l51, l52, l53, l54, preserve_unit_iters=True)
v60 = sch.sample_categorical(
candidates=[32, 64, 128, 256, 512, 1024],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=4,
)
l61, l62 = sch.split(loop=l59, factors=[None, v60], preserve_unit_iters=True)
sch.bind(loop=l61, thread_axis="blockIdx.x")
sch.bind(loop=l62, thread_axis="threadIdx.x")
sch.annotate(block_or_loop=b2, ann_key="meta_schedule.tiling_structure", ann_val="SSSRRSRS")
l63, l64, l65, l66, l67 = sch.get_loops(block=b2)
v68, v69, v70, v71, v72 = sch.sample_perfect_tile(
loop=l63, n=5, max_innermost_factor=64, decision=[1, 2, 3, 1, 1]
)
l73, l74, l75, l76, l77 = sch.split(
loop=l63, factors=[v68, v69, v70, v71, v72], preserve_unit_iters=True
)
v78, v79, v80, v81, v82 = sch.sample_perfect_tile(
loop=l64, n=5, max_innermost_factor=64, decision=[6, 1, 1, 1, 1]
)
l83, l84, l85, l86, l87 = sch.split(
loop=l64, factors=[v78, v79, v80, v81, v82], preserve_unit_iters=True
)
v88, v89, v90, v91, v92 = sch.sample_perfect_tile(
loop=l65, n=5, max_innermost_factor=64, decision=[7, 2, 1, 14, 1]
)
l93, l94, l95, l96, l97 = sch.split(
loop=l65, factors=[v88, v89, v90, v91, v92], preserve_unit_iters=True
)
v98, v99, v100, v101, v102 = sch.sample_perfect_tile(
loop=l66, n=5, max_innermost_factor=64, decision=[4, 1, 16, 1, 1]
)
l103, l104, l105, l106, l107 = sch.split(
loop=l66, factors=[v98, v99, v100, v101, v102], preserve_unit_iters=True
)
v108, v109, v110 = sch.sample_perfect_tile(
loop=l67, n=3, max_innermost_factor=64, decision=[2, 2, 16]
)
l111, l112, l113 = sch.split(loop=l67, factors=[v108, v109, v110], preserve_unit_iters=True)
sch.reorder(
l73,
l83,
l93,
l103,
l74,
l84,
l94,
l104,
l75,
l85,
l95,
l105,
l111,
l112,
l76,
l86,
l96,
l106,
l113,
l77,
l87,
l97,
l107,
)
l114 = sch.fuse(l73, l83, l93, l103, preserve_unit_iters=True)
sch.bind(loop=l114, thread_axis="blockIdx.x")
l115 = sch.fuse(l74, l84, l94, l104, preserve_unit_iters=True)
sch.bind(loop=l115, thread_axis="vthread.x")
l116 = sch.fuse(l75, l85, l95, l105, preserve_unit_iters=True)
sch.bind(loop=l116, thread_axis="threadIdx.x")
sch.annotate(
block_or_loop=b2, ann_key="meta_schedule.thread_extent_low_inclusive", ann_val=32
)
sch.annotate(
block_or_loop=b2, ann_key="meta_schedule.thread_extent_high_inclusive", ann_val=1024
)
b117 = sch.cache_write(block=b2, write_buffer_index=0, storage_scope="local")
sch.reverse_compute_at(block=b117, loop=l116, preserve_unit_loops=True, index=-1)
b118 = sch.cache_read(
block=b2, read_buffer_index=0, storage_scope="shared", consumer_blocks=[b2]
)
sch.compute_at(block=b118, loop=l111, preserve_unit_loops=True, index=-1)
l119, l120, l121, l122, l123, l124, l125, l126 = sch.get_loops(block=b118)
l127 = sch.fuse(l123, l124, l125, l126, preserve_unit_iters=True)
v128 = sch.sample_categorical(
candidates=[1, 2, 3, 4], probs=[0.25, 0.25, 0.25, 0.25], decision=3
)
sch.annotate(block_or_loop=b118, ann_key="meta_schedule.cooperative_fetch", ann_val=v128)
b129 = sch.cache_read(
block=b2, read_buffer_index=1, storage_scope="shared", consumer_blocks=[b2]
)
sch.compute_at(block=b129, loop=l111, preserve_unit_loops=True, index=-1)
l130, l131, l132, l133, l134, l135, l136, l137 = sch.get_loops(block=b129)
l138 = sch.fuse(l134, l135, l136, l137, preserve_unit_iters=True)
v139 = sch.sample_categorical(
candidates=[1, 2, 3, 4], probs=[0.25, 0.25, 0.25, 0.25], decision=3
)
sch.annotate(block_or_loop=b129, ann_key="meta_schedule.cooperative_fetch", ann_val=v139)
sch.reverse_compute_inline(block=b7)
sch.reverse_compute_inline(block=b6)
v140 = sch.sample_categorical(
candidates=[0, 16, 64, 512, 1024],
probs=[
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
],
decision=4,
)
sch.annotate(block_or_loop=b8, ann_key="meta_schedule.unroll_explicit", ann_val=v140)
l141, l142, l143, l144 = sch.get_loops(block=b5)
l145 = sch.fuse(l141, l142, l143, l144, preserve_unit_iters=True)
v146 = sch.sample_categorical(
candidates=[32, 64, 128, 256, 512, 1024],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=2,
)
l147, l148 = sch.split(loop=l145, factors=[None, v146], preserve_unit_iters=True)
sch.bind(loop=l147, thread_axis="blockIdx.x")
sch.bind(loop=l148, thread_axis="threadIdx.x")
sch.enter_postproc()
sch.unannotate(block_or_loop=b118, ann_key="meta_schedule.cooperative_fetch")
l149, l150, l151, l152, l153 = sch.get_loops(block=b118)
l154, l155, l156 = sch.split(loop=l153, factors=[None, 48, 4], preserve_unit_iters=True)
sch.vectorize(loop=l156)
sch.bind(loop=l155, thread_axis="threadIdx.x")
sch.unannotate(block_or_loop=b129, ann_key="meta_schedule.cooperative_fetch")
l157, l158, l159, l160, l161 = sch.get_loops(block=b129)
l162, l163, l164 = sch.split(loop=l161, factors=[None, 48, 4], preserve_unit_iters=True)
sch.vectorize(loop=l164)
sch.bind(loop=l163, thread_axis="threadIdx.x")
b165 = sch.get_block(name="root", func_name="main")
sch.unannotate(block_or_loop=b165, ann_key="meta_schedule.unroll_explicit")
b166, b167, b168, b169, b170, b171, b172, b173 = sch.get_child_blocks(b165)
l174, l175, l176, l177, l178, l179 = sch.get_loops(block=b166)
sch.annotate(block_or_loop=l174, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l174, ann_key="pragma_unroll_explicit", ann_val=1)
l180, l181, l182, l183, l184, l185 = sch.get_loops(block=b167)
sch.annotate(block_or_loop=l180, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l180, ann_key="pragma_unroll_explicit", ann_val=1)
l186, l187, l188, l189, l190, l191, l192 = sch.get_loops(block=b168)
sch.annotate(block_or_loop=l186, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l186, ann_key="pragma_unroll_explicit", ann_val=1)
l193, l194, l195, l196, l197, l198, l199 = sch.get_loops(block=b169)
sch.annotate(block_or_loop=l193, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l193, ann_key="pragma_unroll_explicit", ann_val=1)
(
l200,
l201,
l202,
l203,
l204,
l205,
l206,
l207,
l208,
l209,
l210,
l211,
l212,
l213,
) = sch.get_loops(block=b170)
sch.annotate(block_or_loop=l200, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l200, ann_key="pragma_unroll_explicit", ann_val=1)
l214, l215, l216, l217, l218, l219, l220 = sch.get_loops(block=b171)
sch.annotate(block_or_loop=l214, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l214, ann_key="pragma_unroll_explicit", ann_val=1)
l221, l222, l223, l224, l225, l226 = sch.get_loops(block=b172)
sch.annotate(block_or_loop=l221, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l221, ann_key="pragma_unroll_explicit", ann_val=1)
l227, l228 = sch.get_loops(block=b173)
sch.annotate(block_or_loop=l227, ann_key="pragma_auto_unroll_max_step", ann_val=1024)
sch.annotate(block_or_loop=l227, ann_key="pragma_unroll_explicit", ann_val=1)
b229 = sch.get_block(name="data_pack", func_name="main")
l230, l231, l232, l233, l234, l235 = sch.get_loops(block=b229)
b236 = sch.decompose_reduction(block=b229, loop=l234)
b237 = sch.get_block(name="bgemm", func_name="main")
(
l238,
l239,
l240,
l241,
l242,
l243,
l244,
l245,
l246,
l247,
l248,
l249,
l250,
l251,
) = sch.get_loops(block=b237)
b252 = sch.decompose_reduction(block=b237, loop=l241)
b253 = sch.get_block(name="inverse", func_name="main")
l254, l255, l256, l257, l258, l259 = sch.get_loops(block=b253)
b260 = sch.decompose_reduction(block=b253, loop=l258)
verify(
Conv2dWinogradAddRelu,
apply_trace,
Conv2dWinogradAddResidualRelu,
"cuda",
Conv2dWinogradAddResidualRelu_scheduled,
)
def test_inline_order():
# In this test, the order of applying AutoInline is tested.
# We need to make sure that the last block in Conv2dInt8_with_predicate_target,
# "compute_4", is AutoInline-ed after all other blocks have been processed.
#
# Otherwise, if the order is "T_add_2" -> "compute_4" -> "compute_3", "compute_4" is neither
# inlined (because this is the last block) nor reverse-inlined
# (because it has multiple producers). This results in the "compute_4" block being
# reverse-inlined at the very end of ScheduleUsingAnchorTrace, where its producer block
# "conv2d_nhwc_reindex_shared" has the predicate
# T.where(((ax1_0 * 4 + ax1_1) * 32 + ax1_2) * 2 + ax1_3 < 64) due to anchor-block scheduling
# (see Conv2dInt8_with_predicate_scheduled). ReverseComputeInline cannot be applied in
# such cases.
def apply_trace(sch: Schedule) -> None:
b0 = sch.get_block(name="pad_temp", func_name="main")
b1 = sch.get_block(name="conv2d_nhwc", func_name="main")
b2 = sch.get_block(name="T_subtract", func_name="main")
b3 = sch.get_block(name="T_add", func_name="main")
b4 = sch.get_block(name="compute", func_name="main")
b5 = sch.get_block(name="T_add_1", func_name="main")
b6 = sch.get_block(name="compute_1", func_name="main")
b7 = sch.get_block(name="T_subtract_1", func_name="main")
b8 = sch.get_block(name="compute_2", func_name="main")
b9 = sch.get_block(name="root", func_name="main")
sch.annotate(block_or_loop=b1, ann_key="meta_schedule.tiling_structure", ann_val="SSSRRSRS")
b10 = sch.reindex(block=b1, buffer=("write", 0))
b11 = sch.reindex(block=b1, buffer=("read", 0))
b12 = sch.reindex(block=b1, buffer=("read", 1))
sch.transform_layout(
block=b1,
buffer=("read", 0),
index_map=lambda nn, yy, xx, rc: (
(((nn * 3136) + (yy * 56)) + xx),
rc,
),
pad_value=None,
)
sch.transform_layout(
block=b1,
buffer=("read", 1),
index_map=lambda ff, ry, rx, rc: (
ry,
rx,
ff,
rc,
),
pad_value=None,
)
sch.transform_layout(
block=b1,
buffer=("write", 0),
index_map=lambda nn, yy, xx, ff: (
(((nn * 3136) + (yy * 56)) + xx),
ff,
),
pad_value=None,
)
sch.transform_block_layout(
block=b10,
index_map=lambda nn, yy, xx, ff: (
(((nn * 3136) + (yy * 56)) + xx),
ff,
),
)
sch.transform_block_layout(
block=b11,
index_map=lambda nn, yy, xx, rc: (
(((nn * 3136) + (yy * 56)) + xx),
rc,
),
)
sch.transform_block_layout(
block=b12,
index_map=lambda ff, ry, rx, rc: (
ry,
rx,
ff,
rc,
),
)
sch.transform_block_layout(
block=b1,
index_map=lambda nn, yy, xx, ff, ry, rx, rc: (
ry,
rx,
(((nn * 3136) + (yy * 56)) + xx),
ff,
rc,
),
)
l13, l14, l15, l16, l17 = sch.get_loops(block=b1)
l18, l19 = sch.split(loop=l17, factors=[None, 16], preserve_unit_iters=True)
l20, l21 = sch.split(loop=l16, factors=[None, 16], preserve_unit_iters=True)
l22, l23 = sch.split(loop=l15, factors=[None, 16], preserve_unit_iters=True)
l24, l25, l26, l27, l28, l29, l30, l31 = sch.get_loops(block=b1)
sch.reorder(l28, l30, l23, l21, l19)
b32 = sch.blockize(target=l23)
sch.annotate(
block_or_loop=b32,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_sync_16x16x16_s8s8s32_trans",
)
sch.annotate(
block_or_loop=b32,
ann_key="meta_schedule.auto_tensorize_init",
ann_val="wmma_fill_16x16x16_s32",
)
sch.annotate(block_or_loop=b32, ann_key="warp_execution", ann_val=1)
l33, l34, l35, l36, l37 = sch.get_loops(block=b32)
v38, v39, v40 = sch.sample_perfect_tile(
loop=l33, n=3, max_innermost_factor=4, decision=[1, 1, 1]
)
l41, l42, l43 = sch.split(loop=l33, factors=[v38, v39, v40], preserve_unit_iters=True)
v44, v45, v46 = sch.sample_perfect_tile(
loop=l34, n=3, max_innermost_factor=4, decision=[1, 1, 1]
)
l47, l48, l49 = sch.split(loop=l34, factors=[v44, v45, v46], preserve_unit_iters=True)
v50, v51, v52, v53, v54 = sch.sample_perfect_tile(
loop=l35, n=5, max_innermost_factor=4, decision=[8, 196, 2, 1, 1]
)
l55, l56, l57, l58, l59 = sch.split(
loop=l35, factors=[v50, v51, v52, v53, v54], preserve_unit_iters=True
)
v60, v61, v62, v63, v64 = sch.sample_perfect_tile(
loop=l36, n=5, max_innermost_factor=4, decision=[4, 1, 2, 1, 2]
)
l65, l66, l67, l68, l69 = sch.split(
loop=l36, factors=[v60, v61, v62, v63, v64], preserve_unit_iters=True
)
v70, v71, v72 = sch.sample_perfect_tile(
loop=l37, n=3, max_innermost_factor=4, decision=[2, 2, 1]
)
l73, l74, l75 = sch.split(loop=l37, factors=[v70, v71, v72], preserve_unit_iters=True)
sch.reorder(
l55,
l65,
l56,
l66,
l57,
l67,
l41,
l47,
l73,
l42,
l48,
l74,
l58,
l68,
l43,
l49,
l75,
l59,
l69,
)
l76 = sch.fuse(l55, l65, preserve_unit_iters=True)
sch.bind(loop=l76, thread_axis="blockIdx.y")
l77 = sch.fuse(l56, l66, preserve_unit_iters=True)
sch.bind(loop=l77, thread_axis="blockIdx.x")
l78 = sch.fuse(l57, l67, preserve_unit_iters=True)
sch.bind(loop=l78, thread_axis="threadIdx.y")
sch.annotate(
block_or_loop=b32, ann_key="meta_schedule.thread_extent_low_inclusive", ann_val=32
)
sch.annotate(
block_or_loop=b32, ann_key="meta_schedule.thread_extent_high_inclusive", ann_val=1024
)
b79 = sch.cache_write(block=b32, write_buffer_index=0, storage_scope="shared")
sch.reverse_compute_at(block=b79, loop=l77, preserve_unit_loops=True, index=-1)
b80 = sch.cache_write(block=b32, write_buffer_index=0, storage_scope="wmma.accumulator")
sch.reverse_compute_at(block=b80, loop=l78, preserve_unit_loops=True, index=-1)
v81 = sch.sample_categorical(
candidates=[1, 2, 3, 4, 8, 16],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=1,
)
sch.annotate(block_or_loop=b79, ann_key="meta_schedule.cooperative_fetch", ann_val=v81)
sch.reverse_compute_inline(block=b10)
l82, l83, l84, l85, l86 = sch.get_loops(block=b80)
l87, l88 = sch.split(loop=l86, factors=[None, 16], preserve_unit_iters=True)
l89, l90 = sch.split(loop=l85, factors=[None, 16], preserve_unit_iters=True)
l91, l92, l93, l94, l95, l96, l97 = sch.get_loops(block=b80)
sch.reorder(l96, l90, l88)
b98 = sch.blockize(target=l90)
sch.annotate(
block_or_loop=b98,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_store_16x16x16_s32_shared",
)
b99 = sch.cache_read(
block=b32, read_buffer_index=0, storage_scope="shared", consumer_blocks=[b32]
)
sch.compute_at(block=b99, loop=l73, preserve_unit_loops=True, index=-1)
l100, l101, l102, l103, l104, l105, l106, l107 = sch.get_loops(block=b99)
l108 = sch.fuse(l106, l107, preserve_unit_iters=True)
v109 = sch.sample_categorical(
candidates=[1, 2, 3, 4, 8, 16],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=3,
)
sch.annotate(block_or_loop=b99, ann_key="meta_schedule.cooperative_fetch", ann_val=v109)
b110 = sch.cache_read(
block=b32, read_buffer_index=1, storage_scope="shared", consumer_blocks=[b32]
)
sch.compute_at(block=b110, loop=l73, preserve_unit_loops=True, index=-1)
l111, l112, l113, l114, l115, l116, l117, l118, l119, l120 = sch.get_loops(block=b110)
l121 = sch.fuse(l117, l118, l119, l120, preserve_unit_iters=True)
v122 = sch.sample_categorical(
candidates=[1, 2, 3, 4, 8, 16],
probs=[
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
0.16666666666666666,
],
decision=2,
)
sch.annotate(block_or_loop=b110, ann_key="meta_schedule.cooperative_fetch", ann_val=v122)
b123 = sch.cache_read(block=b32, read_buffer_index=0, storage_scope="wmma.matrix_a")
sch.compute_at(block=b123, loop=l74, preserve_unit_loops=True, index=-1)
l124, l125, l126, l127, l128, l129, l130, l131, l132, l133, l134 = sch.get_loops(block=b123)
l135, l136 = sch.split(loop=l134, factors=[None, 16], preserve_unit_iters=True)
l137, l138 = sch.split(loop=l133, factors=[None, 16], preserve_unit_iters=True)
(
l139,
l140,
l141,
l142,
l143,
l144,
l145,
l146,
l147,
l148,
l149,
l150,
l151,
) = sch.get_loops(block=b123)
sch.reorder(l150, l138, l136)
b152 = sch.blockize(target=l138)
sch.annotate(
block_or_loop=b152,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_load_16x16x16_s8_a_shared",
)
b153 = sch.cache_read(block=b32, read_buffer_index=1, storage_scope="wmma.matrix_b")
sch.compute_at(block=b153, loop=l74, preserve_unit_loops=True, index=-1)
(
l154,
l155,
l156,
l157,
l158,
l159,
l160,
l161,
l162,
l163,
l164,
l165,
l166,
) = sch.get_loops(block=b153)
l167, l168 = sch.split(loop=l166, factors=[None, 16], preserve_unit_iters=True)
l169, l170 = sch.split(loop=l165, factors=[None, 16], preserve_unit_iters=True)
(
l171,
l172,
l173,
l174,
l175,
l176,
l177,
l178,
l179,
l180,
l181,
l182,
l183,
l184,
l185,
) = sch.get_loops(block=b153)
sch.reorder(l184, l170, l168)
b186 = sch.blockize(target=l170)
sch.annotate(
block_or_loop=b186,
ann_key="meta_schedule.auto_tensorize",
ann_val="wmma_load_16x16x16_s8_b_trans_shared",
)
sch.compute_inline(block=b11)
sch.compute_inline(block=b12)
sch.storage_align(block=b99, buffer_index=0, axis=-2, factor=32, offset=16)
sch.storage_align(block=b110, buffer_index=0, axis=-2, factor=32, offset=16)
sch.reverse_compute_inline(block=b8)
sch.reverse_compute_inline(block=b7)
sch.reverse_compute_inline(block=b6)
sch.reverse_compute_inline(block=b5)
sch.reverse_compute_inline(block=b4)
sch.reverse_compute_inline(block=b3)
sch.reverse_compute_inline(block=b2)
sch.compute_inline(block=b0)
v187 = sch.sample_categorical(
candidates=[0, 16, 64, 512, 1024],
probs=[
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
0.20000000000000001,
],
decision=4,
)
sch.annotate(block_or_loop=b9, ann_key="meta_schedule.unroll_explicit", ann_val=v187)
sch.enter_postproc()
sch.unannotate(block_or_loop=b79, ann_key="meta_schedule.cooperative_fetch")
l188, l189, l190, l191 = sch.get_loops(block=b79)
l192, l193, l194, l195 = sch.split(
loop=l191, factors=[None, 4, 32, 2], preserve_unit_iters=True
)
verify(
Conv2dInt8_with_predicate,
apply_trace,
Conv2dInt8_with_predicate_target,
"cuda",
Conv2dInt8_with_predicate_scheduled,
)
if __name__ == "__main__":
tvm.testing.main()
| 214,210 | 62.50756 | 4,139 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_tensorize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import te, tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
from tvm.tir.tensor_intrin.arm_cpu import (
DP4A_INTRIN,
ARM_DOT_4x4_i8_NEON_INTRIN,
ARM_DOT_4x4_i8_SDOT_INTRIN,
)
from tvm.tir.tensor_intrin.rocm import AMDGPU_SDOT4_INTRIN
from tvm.tir.tensor_intrin.x86 import VNNI_DOT_16x4_INTRIN, AVX512_DOT_16x4_INTRIN
from tvm.tir.tensor_intrin.hexagon import VRMPY_u8u8i32_INTRIN, VDMPY_i16i16i32_INTRIN
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
@T.prim_func
def mma_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=64, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=64, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=64, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
for i, j, k in T.grid(16, 16, 16):
with T.block("update"):
vii, vjj, vkk = T.axis.remap("SSR", [i, j, k])
C[vii, vjj] = C[vii, vjj] + A[vii, vkk] * B[vjj, vkk]
@T.prim_func
def mma_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=64, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=64, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=64, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
T.evaluate(
T.tvm_mma_sync(
C.data,
C.elem_offset // 256,
A.data,
A.elem_offset // 256,
B.data,
B.elem_offset // 256,
C.data,
C.elem_offset // 256,
dtype="handle",
)
)
@T.prim_func
def dot_product_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,))
B = T.match_buffer(b, (4,))
C = T.match_buffer(c, ())
with T.block("root"):
T.reads(C[()], A[0 : 4], B[0 : 4])
T.writes(C[()])
for i in range(0, 4):
with T.block("update"):
vi = T.axis.remap("R", [i])
C[()] = C[()] + A[vi] * B[vi]
@T.prim_func
def dot_product_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,), offset_factor=1)
B = T.match_buffer(b, (4,), offset_factor=1)
C = T.match_buffer(c, (), offset_factor=1)
with T.block("root"):
T.reads(C[()], A[0 : 4], B[0 : 4])
T.writes(C[()])
T.evaluate(
T.call_extern(
"vec4add",
C.data,
C.elem_offset,
A.data,
A.elem_offset,
B.data,
B.elem_offset,
dtype="int32",
)
)
@T.prim_func
def dot_product_intrin_annotated(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (4,), offset_factor=1)
B = T.match_buffer(b, (4,), offset_factor=1)
C = T.match_buffer(c, (), offset_factor=1)
with T.block("root"):
T.reads(C[()], A[0 : 4], B[0 : 4])
T.writes(C[()])
T.block_attr({"test_annotation": True})
T.evaluate(
T.call_extern(
"vec4add",
C.data,
C.elem_offset,
A.data,
A.elem_offset,
B.data,
B.elem_offset,
dtype="int32",
)
)
@T.prim_func
def outer_product_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 1), offset_factor=1)
B = T.match_buffer(b, (16, 1), offset_factor=1)
C = T.match_buffer(c, (16, 16), offset_factor=1)
with T.block("root"):
T.reads(
C[0 : 16, 0 : 16],
A[0 : 16, 0 : 1],
B[0 : 16, 0 : 1],
)
T.writes(C[0 : 16, 0 : 16])
for i, j in T.grid(16, 16):
with T.block("update"):
vii, vjj = T.axis.remap("SS", [i, j])
C[vii, vjj] = C[vii, vjj] + A[vii, 0] * B[vjj, 0]
@T.prim_func
def outer_product_intrin(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 1), offset_factor=1)
B = T.match_buffer(b, (16, 1), offset_factor=1)
C = T.match_buffer(c, (16, 16), offset_factor=1)
with T.block("root"):
T.reads(
C[0 : 16, 0 : 16],
A[0 : 16, 0 : 1],
B[0 : 16, 0 : 1],
)
T.writes(C[0 : 16, 0 : 16])
T.evaluate(
T.call_extern(
"outer_product",
C.data,
C.elem_offset,
A.data,
A.elem_offset,
B.data,
B.elem_offset,
dtype="int32",
)
)
@T.prim_func
def matmul(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32"),
C: T.Buffer((128, 128), "float32"),
) -> None:
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def tensorized_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
for i_outer, j_outer in T.grid(8, 8):
for i_inner_init, j_inner_init in T.grid(16, 16):
with T.block("init"):
vi_init = T.axis.S(128, ((i_outer * 16) + i_inner_init))
vj_init = T.axis.S(128, ((j_outer * 16) + j_inner_init))
C[vi_init, vj_init] = T.float32(0)
for k_outer in T.grid(8):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i_outer, j_outer, k_outer])
T.reads(
[
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
]
)
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A_elem_offset = T.int32()
B_elem_offset = T.int32()
C_elem_offset = T.int32()
A_sub = T.match_buffer(
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
[16, 16],
elem_offset=C_elem_offset,
)
T.evaluate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
A_sub.data,
T.floordiv(A_sub.elem_offset, 256),
B_sub.data,
T.floordiv(B_sub.elem_offset, 256),
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
dtype="handle",
)
)
@T.prim_func
def batch_matmul(
A: T.Buffer((16, 128, 128), "float32"),
B: T.Buffer((16, 128, 128), "float32"),
C: T.Buffer((16, 128, 128), "float32"),
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
C[vn, vi, vj] = T.float32(0)
for n, i, j, k in T.grid(16, 128, 128, 128):
with T.block("update"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k])
C[vn, vi, vj] = C[vn, vi, vj] + A[vn, vi, vk] * B[vn, vj, vk]
@T.prim_func
def tensorized_batch_matmul_mma(
A: T.Buffer((16, 128, 128), "float32"),
B: T.Buffer((16, 128, 128), "float32"),
C: T.Buffer((16, 128, 128), "float32"),
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
T.reads()
T.writes(C[vn, vi, vj])
C[vn, vi, vj] = T.float32(0)
for n in range(0, 16):
for i, j, k in T.grid(8, 8, 8):
with T.block("update"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k])
T.reads(
C[vn : vn + 1, vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
A[vn : vn + 1, vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
B[vn : vn + 1, vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
)
T.writes(C[vn : vn + 1, vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A_elem_offset = T.int32()
B_elem_offset = T.int32()
C_elem_offset = T.int32()
A_sub = T.match_buffer(
A[vn : vn + 1, vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
(16, 16),
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vn : vn + 1, vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
(16, 16),
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vn : vn + 1, vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
(16, 16),
elem_offset=C_elem_offset,
)
T.evaluate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
A_sub.data,
T.floordiv(A_sub.elem_offset, 256),
B_sub.data,
T.floordiv(B_sub.elem_offset, 256),
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
dtype="handle",
)
)
@T.prim_func
def tensorized_batch_matmul_dot_product(
A: T.Buffer((16, 128, 128), "float32"),
B: T.Buffer((16, 128, 128), "float32"),
C: T.Buffer((16, 128, 128), "float32"),
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
T.reads()
T.writes(C[vn, vi, vj])
C[vn, vi, vj] = T.float32(0)
for n, i, j, k_0 in T.grid(16, 128, 128, 32):
with T.block("blockized_update"):
vn, vi, vj, vko = T.axis.remap("SSSR", [n, i, j, k_0])
T.reads(
C[vn, vi, vj], A[vn, vi, vko * 4 : vko * 4 + 4], B[vn, vj, vko * 4 : vko * 4 + 4]
)
T.writes(C[vn, vi, vj])
A_1 = T.match_buffer(
A[vn, vi, vko * 4 : vko * 4 + 4], [4], dtype="float32", offset_factor=1
)
B_1 = T.match_buffer(
B[vn, vj, vko * 4 : vko * 4 + 4], [4], dtype="float32", offset_factor=1
)
C_1 = T.match_buffer(C[vn, vi, vj], [], dtype="float32", offset_factor=1)
T.evaluate(
T.call_extern(
"vec4add",
C_1.data,
C_1.elem_offset,
A_1.data,
A_1.elem_offset,
B_1.data,
B_1.elem_offset,
dtype="int32",
)
)
@T.prim_func
def tensorized_batch_matmul_outer_product(
A: T.Buffer((16, 128, 128), "float32"),
B: T.Buffer((16, 128, 128), "float32"),
C: T.Buffer((16, 128, 128), "float32"),
) -> None:
for n, i, j in T.grid(16, 128, 128):
with T.block("init"):
vn, vi, vj = T.axis.remap("SSS", [n, i, j])
T.reads()
T.writes(C[vn, vi, vj])
C[vn, vi, vj] = T.float32(0)
for n, i_0, j_0, k in T.grid(16, 8, 8, 128):
with T.block("blockized_update"):
vn, vio, vjo, vk = T.axis.remap("SSSR", [n, i_0, j_0, k])
T.reads(
C[vn, vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16],
A[vn, vio * 16 : vio * 16 + 16, vk],
B[vn, vjo * 16 : vjo * 16 + 16, vk],
)
T.writes(C[vn, vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16])
A_1 = T.match_buffer(A[vn, vio * 16 : vio * 16 + 16, vk], [16, 1], dtype="float32", offset_factor=1)
B_1 = T.match_buffer(B[vn, vjo * 16 : vjo * 16 + 16, vk], [16, 1], dtype="float32", offset_factor=1
)
C_1 = T.match_buffer(
C[vn, vio * 16 : vio * 16 + 16, vjo * 16 : vjo * 16 + 16], [16, 16], dtype="float32", offset_factor=1
)
T.evaluate(
T.call_extern("outer_product", C_1.data, C_1.elem_offset, A_1.data, A_1.elem_offset,
B_1.data, B_1.elem_offset, dtype="int32"
)
)
@T.prim_func
def annotated_mma_desc(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (16, 16), align=64, offset_factor=1)
B = T.match_buffer(b, (16, 16), align=64, offset_factor=1)
C = T.match_buffer(c, (16, 16), align=64, offset_factor=1)
with T.block("root"):
T.reads(C[0 : 16, 0 : 16], A[0 : 16, 0 : 16], B[0 : 16, 0 : 16])
T.writes(C[0 : 16, 0 : 16])
for i, j, k in T.grid(16, 16, 16):
with T.block("update"):
T.block_attr({"test_annotation": True})
vii, vjj, vkk = T.axis.remap("SSR", [i, j, k])
C[vii, vjj] = C[vii, vjj] + A[vii, vkk] * B[vjj, vkk]
@T.prim_func
def annotated_matmul(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32"),
C: T.Buffer((128, 128), "float32"),
) -> None:
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
T.block_attr({"test_annotation": True})
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def annotated_tensorized_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1)
B = T.match_buffer(b, [128, 128], elem_offset=0, align=64, offset_factor=1)
A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1)
for i_outer, j_outer in T.grid(8, 8):
for i_inner_init, j_inner_init in T.grid(16, 16):
with T.block("init"):
vi_init = T.axis.S(128, ((i_outer * 16) + i_inner_init))
vj_init = T.axis.S(128, ((j_outer * 16) + j_inner_init))
T.block_attr({"test_annotation": True})
C[vi_init, vj_init] = T.float32(0)
for k_outer in T.grid(8):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i_outer, j_outer, k_outer])
T.reads(
[
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
]
)
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A_elem_offset = T.int32()
B_elem_offset = T.int32()
C_elem_offset = T.int32()
A_sub = T.match_buffer(
A[vi * 16 : vi * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vj * 16 : vj * 16 + 16, vk * 16 : vk * 16 + 16],
[16, 16],
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16],
[16, 16],
elem_offset=C_elem_offset,
)
T.evaluate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
A_sub.data,
T.floordiv(A_sub.elem_offset, 256),
B_sub.data,
T.floordiv(B_sub.elem_offset, 256),
C_sub.data,
T.floordiv(C_sub.elem_offset, 256),
dtype="handle",
)
)
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks
tir.TensorIntrin.register("test_mma_intrin", mma_desc, mma_intrin)
tir.TensorIntrin.register("test_annotated_mma_intrin", annotated_mma_desc, mma_intrin)
tir.TensorIntrin.register("test_dot_product_intrin", dot_product_desc, dot_product_intrin)
tir.TensorIntrin.register("test_outer_product_intrin", outer_product_desc, outer_product_intrin)
tir.TensorIntrin.register("test_dot_product_intrin_annotated", dot_product_desc, dot_product_intrin_annotated)
def test_tensorize_matmul():
func = matmul
# schedule
s = tir.Schedule(func, debug_mask="all")
update = s.get_block("update")
i, j, k = s.get_loops(update)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
ko, ki = s.split(k, factors=[None, 16])
s.reorder(io, jo, ko, ii, ji, ki)
s.decompose_reduction(update, ko)
s.tensorize(ii, "test_mma_intrin")
tvm.ir.assert_structural_equal(tensorized_matmul, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_tensorize_batch_matmul():
func = batch_matmul
s = tir.Schedule(func, debug_mask="all")
update = s.get_block("update")
_, i, j, k = s.get_loops(update)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
ko, ki = s.split(k, factors=[None, 16])
s.reorder(io, jo, ko, ii, ji, ki)
s.tensorize(ii, "test_mma_intrin")
tvm.ir.assert_structural_equal(tensorized_batch_matmul_mma, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=batch_matmul)
def test_tensorize_dot_product():
func = batch_matmul
s = tir.Schedule(func, debug_mask="all")
C = s.get_block("update")
_, _, _, k = s.get_loops(C)
_, ki = s.split(k, factors=[None, 4])
s.tensorize(ki, "test_dot_product_intrin")
tvm.ir.assert_structural_equal(tensorized_batch_matmul_dot_product, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_tensorize_outer_product():
func = batch_matmul
s = tir.Schedule(func, debug_mask="all")
C = s.get_block("update")
_, i, j, k = s.get_loops(C)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
s.reorder(io, jo, k, ii, ji)
s.tensorize(ii, "test_outer_product_intrin")
tvm.ir.assert_structural_equal(tensorized_batch_matmul_outer_product, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_tensorize_with_annotation():
func = annotated_matmul
s = tir.Schedule(func, debug_mask="all")
update = s.get_block("update")
i, j, k = s.get_loops(update)
io, ii = s.split(i, factors=[None, 16])
jo, ji = s.split(j, factors=[None, 16])
ko, ki = s.split(k, factors=[None, 16])
s.reorder(io, jo, ko, ii, ji, ki)
s.decompose_reduction(update, ko)
s.tensorize(ii, "test_annotated_mma_intrin")
tvm.ir.assert_structural_equal(annotated_tensorized_matmul, s.mod["main"])
verify_trace_roundtrip(sch=s, mod=func)
def test_tensorize_intrinsic_with_annotation():
func = matmul
s = tir.Schedule(func, debug_mask="all")
update = s.get_block("update")
_, _, k = s.get_loops(update)
ko, ki = s.split(k, factors=[None, 4])
s.decompose_reduction(update, ko)
s.tensorize(ki, "test_dot_product_intrin_annotated")
b = s.get(s.get_block("update_update_o"))
assert b.annotations["test_annotation"] == T.bool(True)
verify_trace_roundtrip(sch=s, mod=func)
def get_matmul_packed(m, n, k, lhs_type, rhs_dtype="int8"):
X = te.placeholder((m, k), name="X", dtype=lhs_type)
W = te.placeholder((n, k), name="W", dtype=rhs_dtype)
ak = te.reduce_axis((0, k), name="k")
matmul = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype("int32") * W[j, ak].astype("int32"),
axis=ak,
),
name="compute",
)
return te.create_prim_func([X, W, matmul])
def tensorize_16x4_test(intrin=VNNI_DOT_16x4_INTRIN):
m, n, k = 128, 128, 128
func = get_matmul_packed(m, n, k, "uint8")
sch = tir.Schedule(func, debug_mask="all")
block = sch.get_block("compute")
sch.transform_layout(block, "W", lambda i, j: [i//16, j//4, i%16, j%4])
_, j, k = sch.get_loops(block)
_, ji = sch.split(j, factors=[None, 16])
ko, ki = sch.split(k, factors=[None, 4])
sch.reorder(ko, ji, ki)
sch.decompose_reduction(block, ko)
sch.tensorize(ji, intrin)
verify_trace_roundtrip(sch=sch, mod=func)
def test_tensorize_vnni():
tensorize_16x4_test()
def test_tensorize_avx512():
tensorize_16x4_test(AVX512_DOT_16x4_INTRIN)
def test_tensorize_arm_dot():
m, n, k = 128, 128, 128
func = get_matmul_packed(m, n, k, "int8")
for intrin in [ARM_DOT_4x4_i8_SDOT_INTRIN, ARM_DOT_4x4_i8_NEON_INTRIN]:
sch = tir.Schedule(func, debug_mask="all")
block = sch.get_block("compute")
sch.transform_layout(block, "W", lambda i, j: [i//4, j//4, i%4, j%4])
_, j, k = sch.get_loops(block)
_, ji = sch.split(j, factors=[None, 4])
ko, ki = sch.split(k, factors=[None, 4])
sch.reorder(ko, ji, ki)
sch.decompose_reduction(block, ko)
sch.tensorize(ji, intrin)
verify_trace_roundtrip(sch=sch, mod=func)
def test_tensorize_vrmpy():
m, n, k = 128, 128, 128
func = get_matmul_packed(m, n, k, "uint8", "uint8")
sch = tir.Schedule(func, debug_mask="all")
block = sch.get_block("compute")
sch.transform_layout(block, "W", lambda i, j: [i//32, j//4, i%32, j%4])
_, j, k = sch.get_loops(block)
_, ji = sch.split(j, factors=[None, 32])
ko, ki = sch.split(k, factors=[None, 4])
sch.reorder(ko, ji, ki)
sch.decompose_reduction(block, ko)
sch.tensorize(ji, VRMPY_u8u8i32_INTRIN)
verify_trace_roundtrip(sch=sch, mod=func)
def test_tensorize_vdmpy():
m, n, k = 128, 128, 128
func = get_matmul_packed(m, n, k, "int16", "int16")
sch = tir.Schedule(func, debug_mask="all")
block = sch.get_block("compute")
sch.transform_layout(block, "W", lambda i, j: [i//32, j//2, i%32, j%2])
_, j, k = sch.get_loops(block)
_, ji = sch.split(j, factors=[None, 32])
ko, ki = sch.split(k, factors=[None, 2])
sch.reorder(ko, ji, ki)
sch.decompose_reduction(block, ko)
sch.tensorize(ji, VDMPY_i16i16i32_INTRIN)
verify_trace_roundtrip(sch=sch, mod=func)
def test_tensorize_dpa4():
m, n, k = 128, 128, 128
X = te.placeholder((m, k), name="X", dtype="int8")
W = te.placeholder((n, k), name="W", dtype="int8")
ak = te.reduce_axis((0, k), name="k")
matmul = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype("int32")
* W[j, ak].astype("int32"),
axis=ak,
),
name="compute",
)
func = te.create_prim_func([X, W, matmul])
for intrin in [AMDGPU_SDOT4_INTRIN, DP4A_INTRIN]:
sch = tir.Schedule(func, debug_mask="all")
block = sch.get_block("compute")
i, j, k = sch.get_loops(block)
by, ty, yi = sch.split(i, factors=sch.sample_perfect_tile(i, n=3))
bx, tx, xi = sch.split(j, factors=sch.sample_perfect_tile(j, n=3))
ko, ki = sch.split(k, [None, 4])
ko, kt = sch.split(ko, factors=sch.sample_perfect_tile(ko, n=2))
sch.reorder(by, bx, ty, tx, yi, xi)
CC = sch.cache_write(block, 0, "local")
sch.reverse_compute_at(CC, tx)
def fetch_to_shared(block, idx):
block_read = sch.cache_read(block, idx, "shared")
sch.compute_at(block_read, ko, True)
return block_read
fetch_to_shared(block, 0)
fetch_to_shared(block, 1)
sch.decompose_reduction(block, ko)
sch.tensorize(ki, intrin)
verify_trace_roundtrip(sch=sch, mod=func)
def test_tensor_intrin_look_up():
intrin_name = 'non_existent_intrin'
assert tir.TensorIntrin.get(intrin_name, allow_missing=True) is None
with pytest.raises(ValueError):
tir.TensorIntrin.get(intrin_name)
def test_tensorize_matmul_mixed_dtype():
# fmt: off
@T.prim_func
def matmul_int64_shape(
A: T.Buffer((T.int64(128), T.int64(128)), "float32"),
B: T.Buffer((T.int64(128), T.int64(128)), "float32"),
C: T.Buffer((T.int64(128), T.int64(128)), "float32")
) -> None:
for i_0, j_0 in T.grid(T.int64(8), T.int64(8)):
for i_1_init, j_1_init in T.grid(T.int64(16), T.int64(16)):
with T.block("init"):
vi = T.axis.spatial(T.int64(128), i_0 * T.int64(16) + i_1_init)
vj = T.axis.spatial(T.int64(128), j_0 * T.int64(16) + j_1_init)
C[vi, vj] = T.float32(0)
for k_0, i_1, j_1, k_1 in T.grid(T.int64(8), T.int64(16), T.int64(16), T.int64(16)):
with T.block("update"):
vi = T.axis.spatial(T.int64(128), i_0 * T.int64(16) + i_1)
vj = T.axis.spatial(T.int64(128), j_0 * T.int64(16) + j_1)
vk = T.axis.reduce(T.int64(128), k_0 * T.int64(16) + k_1)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def tensorized_matmul_int64_shape(
A: T.Buffer((T.int64(128), T.int64(128)), "float32"),
B: T.Buffer((T.int64(128), T.int64(128)), "float32"),
C: T.Buffer((T.int64(128), T.int64(128)), "float32")
) -> None:
for i_outer, j_outer in T.grid(T.int64(8), T.int64(8)):
for i_inner_init, j_inner_init in T.grid(T.int64(16), T.int64(16)):
with T.block("init"):
vi = T.axis.spatial(T.int64(128), i_outer * T.int64(16) + i_inner_init)
vj = T.axis.spatial(T.int64(128), j_outer * T.int64(16) + j_inner_init)
C[vi, vj] = T.float32(0)
for k_outer in T.grid(T.int64(8)):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i_outer, j_outer, k_outer])
T.reads(
[
C[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vj * T.int64(16) : vj * T.int64(16) + T.int64(16)],
A[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vk * T.int64(16) : vk * T.int64(16) + T.int64(16)],
B[vj * T.int64(16) : vj * T.int64(16) + T.int64(16), vk * T.int64(16) : vk * T.int64(16) + T.int64(16)],
]
)
T.writes(C[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vj * T.int64(16) : vj * T.int64(16) + T.int64(16)])
A_elem_offset = T.int64()
B_elem_offset = T.int64()
C_elem_offset = T.int64()
A_sub = T.match_buffer(
A[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vk * T.int64(16) : vk * T.int64(16) + T.int64(16)],
[T.int64(16), T.int64(16)],
elem_offset=A_elem_offset,
)
B_sub = T.match_buffer(
B[vj * T.int64(16) : vj * T.int64(16) + T.int64(16), vk * T.int64(16) : vk * T.int64(16) + T.int64(16)],
[T.int64(16), T.int64(16)],
elem_offset=B_elem_offset,
)
C_sub = T.match_buffer(
C[vi * T.int64(16) : vi * T.int64(16) + T.int64(16), vj * T.int64(16) : vj * T.int64(16) + T.int64(16)],
[T.int64(16), T.int64(16)],
elem_offset=C_elem_offset,
)
T.evaluate(
T.tvm_mma_sync(
C_sub.data,
T.floordiv(C_sub.elem_offset, T.int64(256)),
A_sub.data,
T.floordiv(A_sub.elem_offset, T.int64(256)),
B_sub.data,
T.floordiv(B_sub.elem_offset, T.int64(256)),
C_sub.data,
T.floordiv(C_sub.elem_offset, T.int64(256)),
dtype="handle",
)
)
# fmt: on
s = tir.Schedule(matmul_int64_shape, debug_mask="all")
update = s.get_block("update")
ii = s.get_loops(update)[-3]
s.tensorize(ii, "test_mma_intrin")
tvm.ir.assert_structural_equal(s.mod["main"], tensorized_matmul_int64_shape)
verify_trace_roundtrip(sch=s, mod=matmul_int64_shape)
if __name__ == "__main__":
tvm.testing.main()
| 31,115 | 36.534379 | 137 | py |
tvm | tvm-main/tests/python/unittest/test_autotvm_graph_tuner_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# NOTE: We name this test file to start with test_graph_tuner
# to make it execute after zero_rank tensor test cases. This
# helps avoid topi arithmetic operator overloading issue:
# https://github.com/apache/tvm/issues/3240
# TODO: restore the file name after this issue is resolved.
import pytest
import tvm
from tvm import te
from tvm import autotvm, relay
from tvm.relay.testing import synthetic
from tvm.autotvm.graph_tuner.utils import (
has_multiple_inputs,
get_direct_ancestor,
get_in_nodes,
get_out_nodes,
expr2graph,
bind_inputs,
)
from tvm.autotvm.graph_tuner._base import OPT_OUT_OP
from tvm.autotvm.graph_tuner.utils.traverse_graph import _replace_device_with_tracing
from tvm.relay.expr import Call, TupleGetItem, Tuple, Var
def verify_has_multiple_inputs(node_list, node_idx, input_names, expected_result):
out = has_multiple_inputs(node_list, node_idx, input_names, OPT_OUT_OP)
assert out == expected_result, "Output mismatch: expecting checking %s to be %s but got %s." % (
node_list[node_idx]["op"],
str(expected_result),
str(out),
)
def test_has_multiple_inputs():
data = relay.var("data")
out1 = data * relay.expr.const(3.0)
w0 = relay.var("w0")
out2 = relay.nn.conv2d(data, w0)
out = relay.add(out1, out2)
net = relay.Function(relay.analysis.free_vars(out), out)
net = bind_inputs(net, {"data": (1, 16, 224, 224), "w0": (16, 16, 1, 1)})
target_ops = [relay.op.get("nn.conv2d")]
node_list = []
node_dict = {}
expr2graph(net, target_ops, node_dict, node_list, tvm.target.Target("llvm"))
input_names = ["data"]
verify_has_multiple_inputs(node_list, 2, input_names, False)
verify_has_multiple_inputs(node_list, 4, input_names, False)
verify_has_multiple_inputs(node_list, 5, input_names, True)
def test_expr2graph():
mod, _ = synthetic.get_workload()
node_dict = {}
node_list = []
target_ops = [relay.op.get("nn.conv2d")]
op_name_list = []
def _count_node(node):
if isinstance(node, Call):
op_name_list.append(node.op)
elif isinstance(node, (Var, TupleGetItem, Tuple)):
op_name_list.append(None)
relay.analysis.post_order_visit(mod["main"], _count_node)
expr2graph(mod["main"], target_ops, node_dict, node_list, tvm.target.Target("llvm"))
assert len(node_list) == len(op_name_list)
for i, item in enumerate(zip(op_name_list, node_list)):
op_name, node = item
assert op_name == node["op"], "%dth Node operator mismatch: expecting %s but got %s" % (
i,
str(op_name),
str(node["op"]),
)
def test_get_direct_ancestor():
data = relay.var("data")
w0 = relay.var("w0")
out1 = relay.nn.conv2d(data, w0)
out2 = relay.add(out1, data * relay.expr.const(5.0))
out3 = out2 + relay.expr.const(2.5)
w1 = relay.var("w1")
out = relay.nn.conv2d(out3, w1)
net = relay.Function(relay.analysis.free_vars(out), out)
net = bind_inputs(net, {"data": (1, 16, 224, 224), "w0": (16, 16, 1, 1), "w1": (16, 16, 1, 1)})
target_ops = [relay.op.get("nn.conv2d")]
node_list = []
node_dict = {}
expr2graph(net, target_ops, node_dict, node_list, tvm.target.Target("llvm"))
visited_dict = {}
input_names = ["data"]
out = get_direct_ancestor(node_list, visited_dict, target_ops, 5, input_names)
assert out == [0], "Output mismatch: expecting [0] but got %s." % str(out)
# non-regression test
out = relay.add(relay.log(data), relay.sqrt(data))
net = relay.Function(relay.analysis.free_vars(out), out)
net = bind_inputs(net, {"data": (1, 16, 224, 224)})
node_list = []
node_dict = {}
expr2graph(net, target_ops, node_dict, node_list, tvm.target.Target("llvm"))
out = get_direct_ancestor(node_list, visited_dict, target_ops, 3, input_names)
assert out == [0], "Output mismatch: expecting [0] but got %s." % str(out)
def test_get_in_nodes():
data = relay.var("data")
w0 = relay.var("w0")
out1 = relay.nn.conv2d(data, w0)
out2 = relay.add(out1, data)
out3 = out2 + relay.expr.const(2.5)
w1 = relay.var("w1")
out = relay.nn.conv2d(out3, w1)
net = relay.Function(relay.analysis.free_vars(out), out)
net = bind_inputs(net, {"data": (1, 16, 224, 224), "w0": (16, 16, 1, 1), "w1": (16, 16, 1, 1)})
target_ops = [relay.op.get("nn.conv2d")]
input_names = ["data"]
node_list = []
node_dict = {}
expr2graph(net, target_ops, node_dict, node_list, tvm.target.Target("llvm"))
out = get_in_nodes(node_list, target_ops, input_names)
expected_out = {3: [0], 4: [3, 0], 7: [4]}
diff_set = set(out) ^ set(expected_out)
if len(diff_set) != 0:
raise RuntimeError(
"Output mismatch: expecting %s but got %s." % (str(expected_out), str(out))
)
def test_get_out_nodes():
in_nodes_dict = {8: [4], 4: [3, 0], 3: [0]}
expected_out = {0: [3, 4], 3: [4], 4: [8], 8: []}
out = get_out_nodes(in_nodes_dict)
diff_set = set(out) ^ set(expected_out)
if len(diff_set) != 0:
raise RuntimeError(
"Output mismatch: expecting %s but got %s." % (str(expected_out), str(out))
)
def test_target_device_replacement():
assert _replace_device_with_tracing("cuda") == "cuda -device=tracing"
assert (
_replace_device_with_tracing("cuda -device=some_device -libs=cudnn")
== "cuda -device=tracing -libs=cudnn"
)
assert (
_replace_device_with_tracing("llvm -device=arm_cpu -arg=xxx")
== "llvm -device=tracing -arg=xxx"
)
assert _replace_device_with_tracing("llvm -device=arm_cpu") == "llvm -device=tracing"
assert _replace_device_with_tracing("llvm -device=abc, def") == "llvm -device=tracing"
if __name__ == "__main__":
test_has_multiple_inputs()
test_expr2graph()
test_get_direct_ancestor()
test_get_in_nodes()
test_get_out_nodes()
| 6,761 | 36.359116 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_ptx_mma_sp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
import numpy as np
import tvm.testing
def gen_2in4_mask(m: int, n: int):
assert n % 4 == 0
return np.array(
[[np.sort(np.random.choice(4, 2, replace=False)) for _ in range(n // 4)] for _ in range(m)]
).astype("uint8")
def get_dense_mat_by_mask(val, mask):
m, n_chunks, _ = mask.shape
val = val.reshape(m, n_chunks, 2)
ret = np.zeros((m, n_chunks, 4)).astype(val.dtype)
for i in range(m):
for j in range(n_chunks):
for k in range(2):
ret[i, j, mask[i, j, k]] = val[i, j, k]
return ret.reshape(m, n_chunks * 4)
@T.prim_func
def mma_sp_m16n8k16_f16f16f16(a: T.handle, b: T.handle, c: T.handle, _metadata: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 8], dtype="float16")
B = T.match_buffer(b, [16, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float16")
metadata = T.match_buffer(_metadata, [8], dtype="uint32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
multi_a = T.decl_buffer([4], "float16", scope="local")
multi_b = T.decl_buffer([4], "float16", scope="local")
accum = T.decl_buffer([4], "float16", scope="local")
meta_local = T.decl_buffer([1], "uint32", scope="local")
for i in range(4):
accum[i] = T.float16(0)
for i in range(4):
multi_a[i] = A[tx // 4 + i // 2 * 8, tx % 4 * 2 + i % 2]
for i in range(4):
multi_b[i] = B[tx % 4 * 2 + i % 2 + i // 2 * 8, tx // 4]
meta_local[0] = metadata[tx // 4]
T.evaluate(
T.ptx_mma_sp(
"m16n8k16",
"row",
"col",
"fp16",
"fp16",
"fp16",
multi_a.data,
0,
multi_b.data,
0,
accum.data,
0,
meta_local.data,
0,
0,
False,
dtype="float16",
)
)
for i in range(4):
C[i // 2 * 8 + tx // 4, tx % 4 * 2 + i % 2] = accum[i]
@T.prim_func
def mma_sp_m16n8k16_f16f16f32(a: T.handle, b: T.handle, c: T.handle, _metadata: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 8], dtype="float16")
B = T.match_buffer(b, [16, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float32")
metadata = T.match_buffer(_metadata, [8], dtype="uint32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
multi_a = T.decl_buffer([4], "float16", scope="local")
multi_b = T.decl_buffer([4], "float16", scope="local")
accum = T.decl_buffer([4], "float32", scope="local")
meta_local = T.decl_buffer([1], "uint32", scope="local")
for i in range(4):
accum[i] = T.float16(0)
for i in range(4):
multi_a[i] = A[tx // 4 + i // 2 * 8, tx % 4 * 2 + i % 2]
for i in range(4):
multi_b[i] = B[tx % 4 * 2 + i % 2 + i // 2 * 8, tx // 4]
meta_local[0] = metadata[tx // 4]
T.evaluate(
T.ptx_mma_sp(
"m16n8k16",
"row",
"col",
"fp16",
"fp16",
"fp32",
multi_a.data,
0,
multi_b.data,
0,
accum.data,
0,
meta_local.data,
0,
0,
False,
dtype="float32",
)
)
for i in range(4):
C[i // 2 * 8 + tx // 4, tx % 4 * 2 + i % 2] = accum[i]
@T.prim_func
def mma_sp_m16n8k32_f16f16f16(a: T.handle, b: T.handle, c: T.handle, _metadata: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="float16")
B = T.match_buffer(b, [32, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float16")
metadata = T.match_buffer(_metadata, [16], dtype="uint32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
multi_a = T.decl_buffer([8], "float16", scope="local")
multi_b = T.decl_buffer([8], "float16", scope="local")
accum = T.decl_buffer([4], "float16", scope="local")
meta_local = T.decl_buffer([1], "uint32", scope="local")
for i in range(4):
accum[i] = T.float16(0)
for i in range(8):
multi_a[i] = A[(i % 4) // 2 * 8 + tx // 4, i // 4 * 8 + tx % 4 * 2 + i % 2]
for i in range(8):
multi_b[i] = B[i // 2 * 8 + tx % 4 * 2 + i % 2, tx // 4]
meta_local[0] = metadata[tx // 4 * 2 + tx % 2]
T.evaluate(
T.ptx_mma_sp(
"m16n8k32",
"row",
"col",
"fp16",
"fp16",
"fp16",
multi_a.data,
0,
multi_b.data,
0,
accum.data,
0,
meta_local.data,
0,
0,
False,
dtype="float16",
)
)
for i in range(4):
C[i // 2 * 8 + tx // 4, tx % 4 * 2 + i % 2] = accum[i]
@T.prim_func
def mma_sp_m16n8k32_f16f16f32(a: T.handle, b: T.handle, c: T.handle, _metadata: T.handle):
T.func_attr({"global_symbol": "default_function", "tir.noalias": True})
A = T.match_buffer(a, [16, 16], dtype="float16")
B = T.match_buffer(b, [32, 8], dtype="float16")
C = T.match_buffer(c, [16, 8], dtype="float32")
metadata = T.match_buffer(_metadata, [16], dtype="uint32")
brow = T.env_thread("blockIdx.y")
bcol = T.env_thread("blockIdx.x")
tx = T.env_thread("threadIdx.x")
T.launch_thread(brow, 1)
T.launch_thread(bcol, 1)
T.launch_thread(tx, 32)
multi_a = T.decl_buffer([8], "float16", scope="local")
multi_b = T.decl_buffer([8], "float16", scope="local")
accum = T.decl_buffer([4], "float32", scope="local")
meta_local = T.decl_buffer([1], "uint32", scope="local")
for i in range(4):
accum[i] = T.float16(0)
for i in range(8):
multi_a[i] = A[(i % 4) // 2 * 8 + tx // 4, i // 4 * 8 + tx % 4 * 2 + i % 2]
for i in range(8):
multi_b[i] = B[i // 2 * 8 + tx % 4 * 2 + i % 2, tx // 4]
meta_local[0] = metadata[tx // 4 * 2 + tx % 2]
T.evaluate(
T.ptx_mma_sp(
"m16n8k32",
"row",
"col",
"fp16",
"fp16",
"fp32",
multi_a.data,
0,
multi_b.data,
0,
accum.data,
0,
meta_local.data,
0,
0,
False,
dtype="float32",
)
)
for i in range(4):
C[i // 2 * 8 + tx // 4, tx % 4 * 2 + i % 2] = accum[i]
@tvm.testing.requires_cuda_compute_version(8)
def test_mma_sp_m16n8k16_f16():
def get_meta_m16n8k16_half(mask):
assert mask.shape == (16, 4, 2)
mask = mask.reshape(16, 8)
ret = np.zeros((8,)).astype("uint32")
for i in range(8):
base = 1
for blk in range(2):
for j in range(8):
ret[i] |= int(mask[blk * 8 + i, j]) * base
base = base << 2
return ret
for out_dtype in ["float16", "float32"]:
func = mma_sp_m16n8k16_f16f16f16 if out_dtype == "float16" else mma_sp_m16n8k16_f16f16f32
sch = tvm.tir.Schedule(func)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 8]).astype("float16")
B_np = np.random.uniform(-1, 1, [16, 8]).astype("float16")
mask = gen_2in4_mask(16, 16)
A_dense_np = get_dense_mat_by_mask(A_np, mask)
C_np = np.matmul(A_dense_np, B_np).astype(out_dtype)
meta = get_meta_m16n8k16_half(mask)
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(np.zeros_like(C_np), ctx)
meta_tvm = tvm.nd.array(meta, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm, meta_tvm)
tvm.testing.assert_allclose(C_tvm.numpy(), C_np, atol=1e-3, rtol=1e-3)
@tvm.testing.requires_cuda_compute_version(8)
def test_mma_sp_m16n8k32_f16():
def get_meta_m16n8k32_half(mask):
assert mask.shape == (16, 8, 2)
mask = mask.reshape(16, 2, 8)
ret = np.zeros((8, 2)).astype("uint32")
for i in range(8):
for k in range(2):
base = 1
for blk in range(2):
for j in range(8):
ret[i, k] |= int(mask[blk * 8 + i, k, j]) * base
base = base << 2
return ret.reshape(16)
for out_dtype in ["float16", "float32"]:
func = mma_sp_m16n8k32_f16f16f16 if out_dtype == "float16" else mma_sp_m16n8k32_f16f16f32
sch = tvm.tir.Schedule(func)
cuda_mod = tvm.build(sch.mod, target="cuda")
A_np = np.random.uniform(-1, 1, [16, 16]).astype("float16")
B_np = np.random.uniform(-1, 1, [32, 8]).astype("float16")
mask = gen_2in4_mask(16, 32)
A_dense_np = get_dense_mat_by_mask(A_np, mask)
C_np = np.matmul(A_dense_np, B_np).astype(out_dtype)
meta = get_meta_m16n8k32_half(mask)
ctx = tvm.cuda()
A_tvm = tvm.nd.array(A_np, ctx)
B_tvm = tvm.nd.array(B_np, ctx)
C_tvm = tvm.nd.array(np.zeros_like(C_np), ctx)
meta_tvm = tvm.nd.array(meta, ctx)
cuda_mod(A_tvm, B_tvm, C_tvm, meta_tvm)
tvm.testing.assert_allclose(C_tvm.numpy(), C_np, atol=1e-3, rtol=1e-3)
if __name__ == "__main__":
test_mma_sp_m16n8k16_f16()
test_mma_sp_m16n8k32_f16()
| 10,791 | 31.023739 | 99 | py |
tvm | tvm-main/tests/python/unittest/test_micro_model_library_format.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pathlib
import sys
import datetime
import json
import os
import tarfile
import numpy as np
import pytest
import platform
pytest.importorskip("tvm.micro")
import tvm
import tvm.relay
from tvm.relay.backend import Executor, Runtime
from tvm.relay.testing import byoc
import tvm.runtime.module
import tvm.testing
from tvm.contrib import utils
import tvm.micro as micro
from tvm.micro.testing.utils import get_conv2d_relay_module
import tvm.micro.model_library_format as model_library_format
from tvm.micro.model_library_format import _GENERATED_VERSION
@tvm.testing.requires_micro
def test_export_operator_model_library_format():
target = tvm.target.target.micro("host")
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
A = tvm.te.placeholder((2,), dtype="int8")
B = tvm.te.placeholder((1,), dtype="int8")
C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
sched = tvm.te.create_schedule(C.op)
mod = tvm.build(
sched,
[A, B, C],
tvm.target.Target(target, target),
runtime=Runtime("crt", {"system-lib": True}),
name="add",
)
temp_dir = utils.tempdir()
mlf_tar_path = temp_dir.relpath("lib.tar")
micro.export_model_library_format(mod, mlf_tar_path)
tf = tarfile.open(mlf_tar_path)
extract_dir = temp_dir.relpath("extract")
os.mkdir(extract_dir)
tf.extractall(extract_dir)
with open(os.path.join(extract_dir, "metadata.json")) as json_f:
metadata = json.load(json_f)
assert metadata["version"] == _GENERATED_VERSION
assert metadata["model_name"] == "add"
export_datetime = datetime.datetime.strptime(
metadata["export_datetime"], "%Y-%m-%d %H:%M:%SZ"
)
assert (datetime.datetime.now() - export_datetime) < datetime.timedelta(seconds=60 * 5)
assert metadata["target"] == [str(target)]
assert metadata["memory"]["add"][0]["dtype"] == "int8"
assert metadata["memory"]["add"][0]["shape"] == [2]
assert metadata["memory"]["add"][0]["size_bytes"] == 2
assert metadata["memory"]["add"][1]["dtype"] == "int8"
assert metadata["memory"]["add"][1]["shape"] == [1]
assert metadata["memory"]["add"][1]["size_bytes"] == 1
assert metadata["memory"]["add"][2]["dtype"] == "int8"
assert metadata["memory"]["add"][2]["shape"] == [2]
assert metadata["memory"]["add"][2]["size_bytes"] == 2
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "lib0.c"))
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "lib1.c"))
assert (
len(mod.ir_module_by_target) == 1
), f"expect 1 ir_model_by_target: {mod.ir_module_by_target!r}"
for target, ir_mod in mod.ir_module_by_target.items():
assert int(tvm.runtime.ndarray.device(str(target)).device_type) == 1
with open(os.path.join(extract_dir, "src", "tir-1.txt")) as tir_f:
assert tir_f.read() == str(ir_mod)
@tvm.testing.requires_micro
def test_export_multiple_operator_model_library_format():
target = tvm.target.target.micro("host")
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
A = tvm.te.placeholder((2,), dtype="int8")
B = tvm.te.placeholder((1,), dtype="int8")
C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
sched = tvm.te.create_schedule(C.op)
mod = tvm.build(
sched,
[A, B, C],
tvm.target.Target(target, target),
runtime=Runtime("crt", {"system-lib": True}),
name="add",
)
temp_dir = utils.tempdir()
mlf_tar_path = temp_dir.relpath("lib.tar")
with pytest.raises(RuntimeError) as exc:
micro.export_model_library_format([mod, mod], mlf_tar_path)
assert str(exc.exception) == ("Multiple operator is not supported.")
def validate_graph_json(extract_dir, factory):
with open(
os.path.join(extract_dir, "executor-config", "graph", f"{factory.libmod_name}.graph")
) as graph_f:
graph_json = graph_f.read()
assert graph_json == factory.graph_json
# Just check it parses and looks roughly right.
graph = json.loads(graph_json)
assert "nodes" in graph
assert len(graph["nodes"]) == 4
assert "attrs" in graph
@tvm.testing.requires_micro
@pytest.mark.parametrize(
"executor,runtime,should_generate_interface,json_constants_size_bytes",
[
(Executor("graph"), Runtime("crt", {"system-lib": True}), False, 8),
(Executor("aot", {"link-params": True}), Runtime("crt"), False, 0),
(
Executor("aot", {"unpacked-api": True, "interface-api": "c"}),
Runtime("crt"),
True,
0,
),
],
)
def test_export_model_library_format_c(
executor, runtime, should_generate_interface, json_constants_size_bytes
):
target = tvm.target.target.micro("host")
with utils.TempDirectory.set_keep_for_debug(True):
with tvm.transform.PassContext(
opt_level=3, config={"tir.disable_vectorize": True, "tir.usmp.enable": False}
):
relay_mod = tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), float32], %c : Tensor[(1, 2), float32]) {
%0 = cast(%a, dtype="float32") + %b * %c;
%0
}"""
)
factory = tvm.relay.build(
relay_mod,
target,
executor=executor,
runtime=runtime,
mod_name="add",
params={"c": np.array([[2.0, 4.0]], dtype="float32")},
)
temp_dir = utils.tempdir()
mlf_tar_path = temp_dir.relpath("lib.tar")
micro.export_model_library_format(factory, mlf_tar_path)
tf = tarfile.open(mlf_tar_path)
extract_dir = temp_dir.relpath("extract")
os.mkdir(extract_dir)
tf.extractall(extract_dir)
with open(os.path.join(extract_dir, "metadata.json")) as json_f:
metadata = json.load(json_f)
module_name = factory.libmod_name
assert metadata["version"] == _GENERATED_VERSION
assert metadata["modules"][module_name]["model_name"] == "add"
export_datetime = datetime.datetime.strptime(
metadata["modules"][module_name]["export_datetime"], "%Y-%m-%d %H:%M:%SZ"
)
assert (datetime.datetime.now() - export_datetime) < datetime.timedelta(seconds=60 * 5)
assert metadata["modules"][module_name]["target"] == [str(target)]
if executor.name == "graph":
assert metadata["modules"][module_name]["memory"]["sids"] == [
{"storage_id": 0, "size_bytes": 2, "input_binding": "a"},
{"storage_id": 1, "size_bytes": 8, "input_binding": "b"},
{"storage_id": 2, "size_bytes": 8, "input_binding": "p0"},
{"storage_id": 3, "size_bytes": 8},
]
assert metadata["modules"][module_name]["memory"]["functions"]["main"] == [
{
"constants_size_bytes": json_constants_size_bytes,
"device": 1,
"inputs": {
"a": {"dtype": "uint8", "size": 2},
"b": {"dtype": "float32", "size": 8},
},
"io_size_bytes": 18,
"outputs": {"output": {"dtype": "float32", "size": 8}},
"workspace_size_bytes": 0,
}
]
assert metadata["modules"][module_name]["memory"]["functions"]["operator_functions"][0][
"workspace"
] == [{"device": 1, "workspace_size_bytes": 0}]
assert (
"fused_cast_multiply_add"
in metadata["modules"][module_name]["memory"]["functions"]["operator_functions"][0][
"function_name"
]
)
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "add_lib0.c"))
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "add_lib1.c"))
assert should_generate_interface == os.path.exists(
os.path.join(extract_dir, "codegen", "host", "include", "tvmgen_add.h")
)
if executor.name == "graph":
validate_graph_json(extract_dir, factory)
with open(os.path.join(extract_dir, "src", f"{module_name}.relay")) as relay_f:
assert relay_f.read() == str(relay_mod)
with open(os.path.join(extract_dir, "parameters", "add.params"), "rb") as params_f:
params = tvm.relay.load_param_dict(params_f.read())
if json_constants_size_bytes != 0:
assert "p0" in params
else:
assert len(params) == 0
@tvm.testing.requires_micro
def test_export_model_library_format_llvm():
with utils.TempDirectory.set_keep_for_debug(True):
target = tvm.target.target.micro("host")
assert str(target)[:2] == "c "
target = tvm.target.Target("llvm " + str(target)[2:])
with tvm.transform.PassContext(opt_level=3):
relay_mod = tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), float32], %c : Tensor[(1, 2), float32]) {
%0 = cast(%a, dtype="float32") + %b * %c;
%0
}"""
)
factory = tvm.relay.build(
relay_mod,
target,
runtime=Runtime("crt", {"system-lib": True}),
mod_name="add",
params={"c": np.array([[2.0, 4.0]], dtype="float32")},
)
temp_dir = utils.tempdir()
mlf_tar_path = temp_dir.relpath("lib.tar")
micro.export_model_library_format(factory, mlf_tar_path)
tf = tarfile.open(mlf_tar_path)
extract_dir = temp_dir.relpath("extract")
os.mkdir(extract_dir)
tf.extractall(extract_dir)
with open(os.path.join(extract_dir, "metadata.json")) as json_f:
metadata = json.load(json_f)
module_name = factory.libmod_name
assert metadata["version"] == _GENERATED_VERSION
assert metadata["modules"][module_name]["model_name"] == "add"
export_datetime = datetime.datetime.strptime(
metadata["modules"][module_name]["export_datetime"], "%Y-%m-%d %H:%M:%SZ"
)
assert (datetime.datetime.now() - export_datetime) < datetime.timedelta(seconds=60 * 5)
assert metadata["modules"][module_name]["target"] == [str(target)]
assert metadata["modules"][module_name]["memory"]["sids"] == [
{"storage_id": 0, "size_bytes": 2, "input_binding": "a"},
{"storage_id": 1, "size_bytes": 8, "input_binding": "b"},
{"storage_id": 2, "size_bytes": 8, "input_binding": "p0"},
{"storage_id": 3, "size_bytes": 8},
]
assert metadata["modules"][module_name]["memory"]["functions"]["main"] == [
{
"constants_size_bytes": 8,
"device": 1,
"inputs": {
"a": {"dtype": "uint8", "size": 2},
"b": {"dtype": "float32", "size": 8},
},
"io_size_bytes": 18,
"outputs": {"output": {"dtype": "float32", "size": 8}},
"workspace_size_bytes": 0,
}
]
assert metadata["modules"][module_name]["memory"]["functions"]["operator_functions"][0][
"workspace"
] == [{"device": 1, "workspace_size_bytes": 0}]
assert (
"fused_cast_multiply_add"
in metadata["modules"][module_name]["memory"]["functions"]["operator_functions"][0][
"function_name"
]
)
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "lib", "add_lib0.o"))
validate_graph_json(extract_dir, factory)
with open(os.path.join(extract_dir, "src", f"{module_name}.relay")) as relay_f:
assert relay_f.read() == str(relay_mod)
with open(os.path.join(extract_dir, "parameters", "add.params"), "rb") as params_f:
params = tvm.relay.load_param_dict(params_f.read())
assert "p0" in params
@tvm.testing.requires_micro
@pytest.mark.parametrize(
"executor,runtime",
[(Executor("graph"), Runtime("crt", {"system-lib": True})), (Executor("aot"), Runtime("crt"))],
)
def test_export_model_library_format_workspace(executor, runtime):
target = tvm.target.target.micro("host")
with tvm.transform.PassContext(
opt_level=3, config={"tir.disable_vectorize": True, "tir.usmp.enable": False}
):
relay_mod = tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%p0: Tensor[(1, 56, 56, 128), int16], %p1: Tensor[(3, 3, 128, 1), int16], %p2: Tensor[(1, 1, 1, 128), int32]){
%0 = nn.conv2d(%p0, %p1, padding=[1, 1, 1, 1], groups=128, channels=128, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWOI", out_dtype="int32") /* ty=Tensor[(1, 56, 56, 128), int32] */;
%1 = add(%0, %p2) /* ty=Tensor[(1, 56, 56, 128), int32] */;
%2 = fixed_point_multiply(%1, multiplier=2080045879, shift=-4) /* ty=Tensor[(1, 56, 56, 128), int32] */;
%3 = clip(%2, a_min=0f, a_max=255f) /* ty=Tensor[(1, 56, 56, 128), int32] */;
cast(%3, dtype="uint8") /* ty=Tensor[(1, 56, 56, 128), uint8] */
}
"""
)
factory = tvm.relay.build(
relay_mod,
target,
executor=executor,
runtime=runtime,
mod_name="qnn_conv2d",
)
temp_dir = utils.tempdir()
mlf_tar_path = temp_dir.relpath("lib.tar")
micro.export_model_library_format(factory, mlf_tar_path)
tf = tarfile.open(mlf_tar_path)
extract_dir = temp_dir.relpath("extract")
os.mkdir(extract_dir)
tf.extractall(extract_dir)
with open(os.path.join(extract_dir, "metadata.json")) as json_f:
metadata = json.load(json_f)
module_name = factory.libmod_name
assert metadata["version"] == _GENERATED_VERSION
assert metadata["modules"][module_name]["model_name"] == "qnn_conv2d"
export_datetime = datetime.datetime.strptime(
metadata["modules"][module_name]["export_datetime"], "%Y-%m-%d %H:%M:%SZ"
)
assert (datetime.datetime.now() - export_datetime) < datetime.timedelta(seconds=60 * 5)
assert metadata["modules"][module_name]["target"] == [str(target)]
assert metadata["modules"][module_name]["memory"]["functions"]["main"] == [
{
"constants_size_bytes": 0,
"device": 1,
"inputs": {
"p0": {"dtype": "int16", "size": 802816},
"p1": {"dtype": "int16", "size": 2304},
"p2": {"dtype": "int32", "size": 512},
},
"io_size_bytes": 1207040,
"outputs": {"output": {"dtype": "uint8", "size": 401408}},
"workspace_size_bytes": 2466816,
}
]
assert metadata["modules"][module_name]["memory"]["functions"]["operator_functions"][0][
"workspace"
] == [{"device": 1, "workspace_size_bytes": 2466816}]
assert (
"fused_nn_conv2d_add_fixed_point_multiply_clip_cast"
in metadata["modules"][module_name]["memory"]["functions"]["operator_functions"][0][
"function_name"
]
)
@tvm.testing.requires_micro
def test_export_non_dso_exportable():
module = tvm.support.FrontendTestModule()
temp_dir = utils.tempdir()
with pytest.raises(AssertionError) as exc:
model_library_format._populate_codegen_dir([module], temp_dir.relpath("codegen"))
assert str(exc.exception) == (
"Don't know how to export non-c or non-llvm modules; found: ffi_testing"
)
@tvm.testing.requires_micro
def test_export_byoc_c_module():
"""Test BYOC flow when it produces DSO-exportable modules.
NOTE the general BYOC flow is not fully supported by Model Library Format right now.
"""
x = tvm.relay.var("x", shape=(10, 10))
w0 = tvm.relay.var("w0", shape=(10, 10))
w1 = tvm.relay.var("w1", shape=(10, 10))
w2 = tvm.relay.var("w2", shape=(10, 10))
w3 = tvm.relay.var("w3", shape=(10, 10))
w4 = tvm.relay.var("w4", shape=(10, 10))
w5 = tvm.relay.var("w5", shape=(10, 10))
w6 = tvm.relay.var("w6", shape=(10, 10))
w7 = tvm.relay.var("w7", shape=(10, 10))
# C compiler
z0 = tvm.relay.add(x, w0)
p0 = tvm.relay.subtract(z0, w1)
q0 = tvm.relay.multiply(p0, w2)
z1 = tvm.relay.add(x, w3)
p1 = tvm.relay.subtract(z1, w4)
q1 = tvm.relay.multiply(p1, w5)
# Other parts on TVM
z2 = tvm.relay.add(x, w6)
q2 = tvm.relay.subtract(z2, w7)
r = tvm.relay.concatenate((q0, q1, q2), axis=0)
f = tvm.relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
mod = tvm.IRModule()
ann = byoc.CcompilerAnnotator()
mod["main"] = ann.visit(f)
mod = tvm.relay.transform.PartitionGraph("mod_name")(mod)
mod = tvm.relay.transform.InferType()(mod)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
factory = tvm.relay.build(mod, tvm.target.target.micro("host"), runtime=Runtime("crt"))
temp_dir = utils.tempdir()
mlf_tar_path = temp_dir.relpath("lib.tar")
micro.export_model_library_format(factory, mlf_tar_path)
with tarfile.open(mlf_tar_path, "r:*") as tf:
tar_members = [ti.name for ti in tf.getmembers()]
print("tar members", tar_members)
assert "./metadata.json" in tar_members
with tf.extractfile("./metadata.json") as f:
metadata = json.load(f)
main_md = metadata["modules"][factory.libmod_name]["memory"]["functions"]["main"]
assert main_md == [
{
"constants_size_bytes": 0,
"device": 1,
"inputs": {
"w0": {"dtype": "float32", "size": 400},
"w1": {"dtype": "float32", "size": 400},
"w2": {"dtype": "float32", "size": 400},
"w3": {"dtype": "float32", "size": 400},
"w4": {"dtype": "float32", "size": 400},
"w5": {"dtype": "float32", "size": 400},
"w6": {"dtype": "float32", "size": 400},
"w7": {"dtype": "float32", "size": 400},
"x": {"dtype": "float32", "size": 400},
},
"io_size_bytes": 4800,
"outputs": {"output": {"dtype": "float32", "size": 1200}},
"workspace_size_bytes": 1200,
}
]
@tvm.testing.requires_micro
def test_multiple_relay_modules_same_module_name():
mod = get_conv2d_relay_module()
executor = Executor("graph")
runtime = Runtime("crt")
target = tvm.target.target.micro("host")
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
factory1 = tvm.relay.build(mod, target, runtime=runtime, executor=executor, mod_name="mod")
factory2 = tvm.relay.build(mod, target, runtime=runtime, executor=executor, mod_name="mod")
temp_dir = utils.tempdir()
mlf_tar_path = temp_dir.relpath("lib.tar")
with pytest.raises(AssertionError, match="Multiple modules should have unique names"):
micro.export_model_library_format([factory1, factory2], mlf_tar_path)
@tvm.testing.requires_micro
def test_multiple_relay_modules_graph():
mod = get_conv2d_relay_module()
executor = Executor("graph")
runtime = Runtime("crt")
target = tvm.target.target.micro("host")
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
factory1 = tvm.relay.build(mod, target, runtime=runtime, executor=executor, mod_name="mod1")
factory2 = tvm.relay.build(mod, target, runtime=runtime, executor=executor, mod_name="mod2")
temp_dir = utils.tempdir()
mlf_tar_path = temp_dir.relpath("lib.tar")
micro.export_model_library_format([factory1, factory2], mlf_tar_path)
with tarfile.open(mlf_tar_path, "r:*") as tf:
tar_members = [ti.name for ti in tf.getmembers()]
print("tar members", tar_members)
assert "./metadata.json" in tar_members
assert "./codegen/host/src/mod1_lib0.c" in tar_members
assert "./codegen/host/src/mod2_lib0.c" in tar_members
with tf.extractfile("./metadata.json") as f:
metadata = json.load(f)
mod2_main_md = metadata["modules"]["mod2"]["memory"]["functions"]["main"]
assert mod2_main_md == [
{
"constants_size_bytes": 0,
"device": 1,
"inputs": {
"data": {"dtype": "int8", "size": 12288},
"weight": {"dtype": "int8", "size": 600},
},
"io_size_bytes": 143960,
"outputs": {"output": {"dtype": "int32", "size": 131072}},
"workspace_size_bytes": 158088,
}
]
assert metadata["modules"]["mod1"]["model_name"] == "mod1"
assert metadata["modules"]["mod2"]["model_name"] == "mod2"
@tvm.testing.requires_micro
def test_multiple_relay_modules_c():
mod = get_conv2d_relay_module()
executor = Executor("aot", {"unpacked-api": True, "interface-api": "c"})
runtime = Runtime("crt")
target = tvm.target.target.micro("host")
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
factory1 = tvm.relay.build(mod, target, runtime=runtime, executor=executor, mod_name="mod1")
factory2 = tvm.relay.build(mod, target, runtime=runtime, executor=executor, mod_name="mod2")
temp_dir = utils.tempdir()
mlf_tar_path = temp_dir.relpath("lib.tar")
micro.export_model_library_format([factory1, factory2], mlf_tar_path)
tf = tarfile.open(mlf_tar_path)
extract_dir = temp_dir.relpath("extract")
os.mkdir(extract_dir)
tf.extractall(extract_dir)
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "mod1_lib0.c"))
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "mod1_lib1.c"))
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "mod2_lib0.c"))
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "mod2_lib1.c"))
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "include", "tvmgen_mod1.h"))
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "include", "tvmgen_mod2.h"))
# check CRT runtime directory
assert os.path.exists(os.path.join(extract_dir, "runtime"))
@tvm.testing.requires_micro
def test_multiple_relay_modules_aot_graph():
mod = get_conv2d_relay_module()
executor1 = Executor("graph")
executor2 = Executor("aot", {"unpacked-api": True, "interface-api": "c"})
runtime = Runtime("crt")
target = tvm.target.target.micro("host")
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
factory1 = tvm.relay.build(
mod, target, runtime=runtime, executor=executor1, mod_name="mod1"
)
factory2 = tvm.relay.build(
mod, target, runtime=runtime, executor=executor2, mod_name="mod2"
)
temp_dir = utils.tempdir()
mlf_tar_path = temp_dir.relpath("lib.tar")
micro.export_model_library_format([factory1, factory2], mlf_tar_path)
tf = tarfile.open(mlf_tar_path)
extract_dir = temp_dir.relpath("extract")
os.mkdir(extract_dir)
tf.extractall(extract_dir)
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "mod1_lib0.c"))
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "mod1_lib1.c"))
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "mod2_lib0.c"))
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "src", "mod2_lib1.c"))
assert os.path.exists(os.path.join(extract_dir, "codegen", "host", "include", "tvmgen_mod2.h"))
with open(os.path.join(extract_dir, "metadata.json")) as f:
metadata = json.load(f)
assert metadata["modules"]["mod1"]["executors"] == ["graph"]
assert metadata["modules"]["mod2"]["executors"] == ["aot"]
assert metadata["version"] == _GENERATED_VERSION
@tvm.testing.requires_micro
def test_output_name_single():
"""Generate a conv2d Relay module for testing."""
input_a = tvm.relay.var("input_a", shape=(3, 4, 5), dtype="int64")
output_1 = input_a + tvm.relay.const(1, "int64")
attrs = tvm.ir.make_node("DictAttrs", output_tensor_names=["test_output_a"])
main_func = tvm.relay.Function([input_a], output_1, attrs=attrs)
mod = tvm.IRModule.from_expr(main_func)
mod = tvm.relay.transform.InferType()(mod)
executor = Executor("aot", {"unpacked-api": True, "interface-api": "c"})
runtime = Runtime("crt")
target = tvm.target.target.micro("host")
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
factory = tvm.relay.build(mod, target, runtime=runtime, executor=executor, mod_name="mod1")
temp_dir = utils.tempdir()
mlf_tar_path = temp_dir.relpath("lib.tar")
micro.export_model_library_format(factory, mlf_tar_path)
tf = tarfile.open(mlf_tar_path)
extract_dir = temp_dir.relpath("extract")
os.mkdir(extract_dir)
tf.extractall(extract_dir)
with open(os.path.join(extract_dir, "metadata.json")) as f:
metadata = json.load(f)
assert metadata["modules"]["mod1"]["memory"]["functions"]["main"][0]["outputs"] == {
"test_output_a": {"size": 480, "dtype": "int64"}
}
@tvm.testing.requires_micro
def test_output_names_many():
"""Generate a conv2d Relay module for testing."""
input_a = tvm.relay.var("input_a", shape=(3, 4, 5), dtype="int64")
input_b = tvm.relay.var("input_b", shape=(3, 4), dtype="int32")
input_c = tvm.relay.var("input_c", shape=(3,), dtype="float32")
output_1 = input_a + tvm.relay.const(1, "int64")
output_2 = input_b + tvm.relay.const(2)
output_3 = input_b + tvm.relay.const(3)
output_4 = input_c + tvm.relay.const(4.0)
full_output = tvm.relay.Tuple(
[output_1, tvm.relay.Tuple([tvm.relay.Tuple([output_2, output_3]), output_4])]
)
attrs = tvm.ir.make_node(
"DictAttrs",
output_tensor_names=["test_output_a", "test_output_b", "test_output_c", "test_output_d"],
)
main_func = tvm.relay.Function([input_a, input_b, input_c], full_output, attrs=attrs)
mod = tvm.IRModule.from_expr(main_func)
mod = tvm.relay.transform.InferType()(mod)
executor = Executor("aot", {"unpacked-api": True, "interface-api": "c"})
runtime = Runtime("crt")
target = tvm.target.target.micro("host")
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
factory = tvm.relay.build(mod, target, runtime=runtime, executor=executor, mod_name="mod1")
temp_dir = utils.tempdir()
mlf_tar_path = temp_dir.relpath("lib.tar")
micro.export_model_library_format(factory, mlf_tar_path)
tf = tarfile.open(mlf_tar_path)
extract_dir = temp_dir.relpath("extract")
os.mkdir(extract_dir)
tf.extractall(extract_dir)
with open(os.path.join(extract_dir, "metadata.json")) as f:
metadata = json.load(f)
assert metadata["modules"]["mod1"]["memory"]["functions"]["main"][0]["outputs"] == {
"test_output_a": {"size": 480, "dtype": "int64"},
"test_output_b": {"size": 48, "dtype": "int32"},
"test_output_c": {"size": 48, "dtype": "int32"},
"test_output_d": {"size": 12, "dtype": "float32"},
}
@tvm.testing.requires_micro
def test_template_files():
"""Check template files in generated model library format."""
mod = get_conv2d_relay_module()
executor = Executor("aot", {"unpacked-api": True, "interface-api": "c"})
runtime = Runtime("crt")
target = tvm.target.target.micro("host")
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
factory = tvm.relay.build(mod, target, runtime=runtime, executor=executor, mod_name="mod")
temp_dir = utils.tempdir()
mlf_tar_path = temp_dir / "lib.tar"
micro.export_model_library_format(factory, mlf_tar_path)
tf = tarfile.open(mlf_tar_path)
extract_dir = temp_dir / "extract"
os.mkdir(extract_dir)
tf.extractall(extract_dir)
assert (extract_dir / "templates" / "crt_config.h.template").is_file()
assert (extract_dir / "templates" / "platform.c.template").is_file()
if __name__ == "__main__":
tvm.testing.main()
| 30,277 | 39.424566 | 209 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_postproc_disallow_dynamic_loop.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import tvm
from tvm import meta_schedule as ms
from tvm import tir
from tvm.script import tir as T
from tvm.target import Target
def _target() -> Target:
return Target("cuda", host="llvm")
def _create_context(mod, target) -> ms.TuneContext:
ctx = ms.TuneContext(
mod=mod,
target=target,
space_generator=ms.space_generator.PostOrderApply(
sch_rules=[],
postprocs=[
ms.postproc.DisallowDynamicLoop(),
],
mutator_probs={},
),
task_name="test",
)
return ctx
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
# fmt: off
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class DynamicLoop:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j in T.grid(1024, 1024):
for k in T.serial(0, i):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
# fmt: on
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
def test_postproc_disallow_dynamic_loops():
mod = Matmul
ctx = _create_context(mod, target=_target())
sch = tir.Schedule(mod, debug_mask="all")
assert ctx.space_generator.postprocs[0].apply(sch)
def test_postproc_disallow_dynamic_loops_fail():
mod = DynamicLoop
ctx = _create_context(mod, target=_target())
sch = tir.Schedule(mod, debug_mask="all")
assert not ctx.space_generator.postprocs[0].apply(sch)
if __name__ == "__main__":
test_postproc_disallow_dynamic_loops()
test_postproc_disallow_dynamic_loops_fail()
| 3,487 | 33.196078 | 94 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_mma_m16n8k8_auto_tensorization.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for MMA m16n8k8 Auto Tensorization"""
import tempfile
import numpy as np
import tvm
from tvm import te
from tvm import meta_schedule as ms
from tvm._ffi import register_func
from tvm.meta_schedule.testing.space_generation import (
check_sketches,
generate_design_space,
)
from tvm.meta_schedule.builder import LocalBuilder
from tvm.script import ir as I
from tvm.script import tir as T
from tvm.target import Target
from tvm.tir import Schedule
from tvm.tir.schedule import Trace
# get tensor intrin
from tvm.tir.tensor_intrin import cuda # pylint: disable=unused-import
import tvm.testing
@I.ir_module
class MmaModule:
@T.prim_func
def main(
X: T.Buffer((4096, 4096), "float16"),
Y: T.Buffer((4096, 4096), "float16"),
C: T.Buffer((4096, 4096), "float16"),
):
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
# with T.block("root"):
C_reindex_m16n8k8_matrixC = T.alloc_buffer((4096, 4096), "float16", scope="m16n8k8.matrixC")
X_reindex_shared_dyn = T.alloc_buffer((4096, 4096), "float16", scope="shared.dyn")
Y_reindex_shared_dyn = T.alloc_buffer((4096, 4096), "float16", scope="shared.dyn")
X_reindex_shared_dyn_m16n8k8_matrixA = T.alloc_buffer(
(4096, 4096), "float16", scope="m16n8k8.matrixA"
)
Y_reindex_shared_dyn_m16n8k8_matrixB = T.alloc_buffer(
(4096, 4096), "float16", scope="m16n8k8.matrixB"
)
for ax0_0_0_ax1_0_0_fused in T.thread_binding(4, thread="blockIdx.x"):
for ax0_0_1_ax1_0_1_fused in T.thread_binding(256, thread="blockIdx.y"):
for ax0_0_2_ax1_0_2_fused in T.thread_binding(4, thread="threadIdx.y"):
for ax2_0_0 in T.serial(
128,
annotations={
"software_pipeline_async_stages": [0],
"software_pipeline_order": [0, 1, 3, 2, 4],
"software_pipeline_stage": [0, 0, 1, 2, 2],
},
):
with T.block("X_reindex_shared.dyn"):
v0, v1 = T.axis.remap("SS", [ax0_0_1_ax1_0_1_fused, ax2_0_0])
T.reads(X[v0 // 8 * 128 : v0 // 8 * 128 + 128, v1 * 32 : v1 * 32 + 32])
T.writes(
X_reindex_shared_dyn[
v0 // 8 * 128 : v0 // 8 * 128 + 128, v1 * 32 : v1 * 32 + 32
]
)
T.block_attr(
{
"auto_copy": 1,
"buffer_dim_align": [[0, 0, 32, 8]],
"permuted_layout": "g2s_A",
"vector_bytes": 16,
}
)
for ax0, ax1 in T.grid(128, 32):
X_reindex_shared_dyn[v0 // 8 * 128 + ax0, v1 * 32 + ax1] = X[
v0 // 8 * 128 + ax0, v1 * 32 + ax1
]
with T.block("Y_reindex_shared.dyn"):
v0, v1, v2 = T.axis.remap(
"SSS", [ax2_0_0, ax0_0_0_ax1_0_0_fused, ax0_0_1_ax1_0_1_fused]
)
T.reads(
Y[
v0 * 32 : v0 * 32 + 32,
v1 * 1024 + v2 % 8 * 128 : v1 * 1024 + v2 % 8 * 128 + 128,
]
)
T.writes(
Y_reindex_shared_dyn[
v0 * 32 : v0 * 32 + 32,
v1 * 1024 + v2 % 8 * 128 : v1 * 1024 + v2 % 8 * 128 + 128,
]
)
T.block_attr(
{
"auto_copy": 1,
"buffer_dim_align": [[0, 0, 32, 8]],
"permuted_layout": "g2s_B",
"vector_bytes": 16,
}
)
for ax0, ax1 in T.grid(32, 128):
Y_reindex_shared_dyn[
v0 * 32 + ax0, v1 * 1024 + v2 % 8 * 128 + ax1
] = Y[v0 * 32 + ax0, v1 * 1024 + v2 % 8 * 128 + ax1]
for ax2_0_1 in T.serial(
4,
annotations={
"software_pipeline_order": [0, 1, 2],
"software_pipeline_stage": [0, 0, 1],
},
):
for ax0_0, ax1_0 in T.grid(2, 1):
with T.block("X_reindex_shared.dyn_m16n8k8.matrixA_o"):
v0_o = T.axis.spatial(
128,
ax0_0_1_ax1_0_1_fused // 8 * 4
+ ax0_0_2_ax1_0_2_fused // 2 * 2
+ ax0_0,
)
v1_o = T.axis.spatial(512, ax2_0_0 * 4 + ax2_0_1 + ax1_0)
T.reads(
X_reindex_shared_dyn[
v0_o * 32 : v0_o * 32 + 32, v1_o * 8 : v1_o * 8 + 8
]
)
T.writes(
X_reindex_shared_dyn_m16n8k8_matrixA[
v0_o * 32 : v0_o * 32 + 32, v1_o * 8 : v1_o * 8 + 8
]
)
T.block_attr(
{
"meta_schedule.auto_tensorize": "mma_load_m16n8k8_f16_A_shared_dyn",
"permuted_layout": "s2l_A",
}
)
for ax0_1, ax1_1 in T.grid(32, 8):
with T.block("X_reindex_shared.dyn_m16n8k8.matrixA"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(
X_reindex_shared_dyn[
v0_o * 32 + v0_i, v1_o * 8 + v1_i
]
)
T.writes(
X_reindex_shared_dyn_m16n8k8_matrixA[
v0_o * 32 + v0_i, v1_o * 8 + v1_i
]
)
X_reindex_shared_dyn_m16n8k8_matrixA[
v0_o * 32 + v0_i, v1_o * 8 + v1_i
] = X_reindex_shared_dyn[
v0_o * 32 + v0_i, v1_o * 8 + v1_i
]
for ax0_0, ax1_0 in T.grid(1, 2):
with T.block("Y_reindex_shared.dyn_m16n8k8.matrixB_o"):
v0_o = T.axis.spatial(512, ax2_0_0 * 4 + ax2_0_1 + ax0_0)
v1_o = T.axis.spatial(
128,
ax0_0_0_ax1_0_0_fused * 32
+ ax0_0_1_ax1_0_1_fused % 8 * 4
+ ax0_0_2_ax1_0_2_fused % 2 * 2
+ ax1_0,
)
T.reads(
Y_reindex_shared_dyn[
v0_o * 8 : v0_o * 8 + 8, v1_o * 32 : v1_o * 32 + 32
]
)
T.writes(
Y_reindex_shared_dyn_m16n8k8_matrixB[
v0_o * 8 : v0_o * 8 + 8, v1_o * 32 : v1_o * 32 + 32
]
)
T.block_attr(
{
"meta_schedule.auto_tensorize": "mma_load_m16n8k8_f16_B_shared_dyn",
"permuted_layout": "s2l_B",
}
)
for ax0_1, ax1_1 in T.grid(8, 32):
with T.block("Y_reindex_shared.dyn_m16n8k8.matrixB"):
v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads(
Y_reindex_shared_dyn[
v0_o * 8 + v0_i, v1_o * 32 + v1_i
]
)
T.writes(
Y_reindex_shared_dyn_m16n8k8_matrixB[
v0_o * 8 + v0_i, v1_o * 32 + v1_i
]
)
Y_reindex_shared_dyn_m16n8k8_matrixB[
v0_o * 8 + v0_i, v1_o * 32 + v1_i
] = Y_reindex_shared_dyn[
v0_o * 8 + v0_i, v1_o * 32 + v1_i
]
for ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(
1, 1, 1, 4, 8
):
with T.block("C_o"):
v0_o = T.axis.spatial(
256,
ax0_0_1_ax1_0_1_fused // 8 * 8
+ ax0_0_2_ax1_0_2_fused // 2 * 4
+ ax0_0_3 * 4
+ ax0_0_4,
)
v1_o = T.axis.spatial(
512,
ax0_0_0_ax1_0_0_fused * 128
+ ax0_0_1_ax1_0_1_fused % 8 * 16
+ ax0_0_2_ax1_0_2_fused % 2 * 8
+ ax1_0_3 * 8
+ ax1_0_4,
)
v2_o = T.axis.reduce(512, ax2_0_0 * 4 + ax2_0_1 + ax2_0_2)
T.reads(
X_reindex_shared_dyn_m16n8k8_matrixA[
v0_o * 16 : v0_o * 16 + 16, v2_o * 8 : v2_o * 8 + 8
],
Y_reindex_shared_dyn_m16n8k8_matrixB[
v2_o * 8 : v2_o * 8 + 8, v1_o * 8 : v1_o * 8 + 8
],
)
T.writes(
C_reindex_m16n8k8_matrixC[
v0_o * 16 : v0_o * 16 + 16, v1_o * 8 : v1_o * 8 + 8
]
)
T.block_attr(
{
"meta_schedule.auto_tensorize": "mma_sync_m16n8k8_f16f16f16",
"meta_schedule.auto_tensorize_init": "mma_init_m16n8k8_f16",
"meta_schedule.thread_extent_high_inclusive": 1024,
"meta_schedule.thread_extent_low_inclusive": 32,
"warp_execution": 1,
}
)
with T.init():
for ax0_1, ax1_1 in T.grid(16, 8):
with T.block("C_init"):
v0_i_init, v1_i_init = T.axis.remap(
"SS", [ax0_1, ax1_1]
)
T.reads()
T.writes(
C_reindex_m16n8k8_matrixC[
v0_o * 16 + v0_i_init, v1_o * 8 + v1_i_init
]
)
C_reindex_m16n8k8_matrixC[
v0_o * 16 + v0_i_init, v1_o * 8 + v1_i_init
] = T.float16(0)
for ax0_1, ax1_1, ax2_1 in T.grid(16, 8, 8):
with T.block("C"):
v0_i, v1_i, v2_i = T.axis.remap(
"SSR", [ax0_1, ax1_1, ax2_1]
)
T.reads(
C_reindex_m16n8k8_matrixC[
v0_o * 16 + v0_i, v1_o * 8 + v1_i
],
X_reindex_shared_dyn_m16n8k8_matrixA[
v0_o * 16 + v0_i, v2_o * 8 + v2_i
],
Y_reindex_shared_dyn_m16n8k8_matrixB[
v2_o * 8 + v2_i, v1_o * 8 + v1_i
],
)
T.writes(
C_reindex_m16n8k8_matrixC[
v0_o * 16 + v0_i, v1_o * 8 + v1_i
]
)
T.block_attr(
{"meta_schedule.tiling_structure": "SSSRRSRS"}
)
C_reindex_m16n8k8_matrixC[
v0_o * 16 + v0_i, v1_o * 8 + v1_i
] = (
C_reindex_m16n8k8_matrixC[
v0_o * 16 + v0_i, v1_o * 8 + v1_i
]
+ X_reindex_shared_dyn_m16n8k8_matrixA[
v0_o * 16 + v0_i, v2_o * 8 + v2_i
]
* Y_reindex_shared_dyn_m16n8k8_matrixB[
v2_o * 8 + v2_i, v1_o * 8 + v1_i
]
)
with T.block("C_reindex_m16n8k8.matrixC"):
v0, v1, v2 = T.axis.remap(
"SSS",
[ax0_0_1_ax1_0_1_fused, ax0_0_2_ax1_0_2_fused, ax0_0_0_ax1_0_0_fused],
)
T.reads(
C_reindex_m16n8k8_matrixC[
v0 // 8 * 128 + v1 // 2 * 64 : v0 // 8 * 128 + v1 // 2 * 64 + 64,
v2 * 1024
+ v0 % 8 * 128
+ v1 % 2 * 64 : v2 * 1024
+ v0 % 8 * 128
+ v1 % 2 * 64
+ 64,
]
)
T.writes(
C[
v0 // 8 * 128 + v1 // 2 * 64 : v0 // 8 * 128 + v1 // 2 * 64 + 64,
v2 * 1024
+ v0 % 8 * 128
+ v1 % 2 * 64 : v2 * 1024
+ v0 % 8 * 128
+ v1 % 2 * 64
+ 64,
]
)
T.block_attr({"auto_copy": 1})
for ax0, ax1 in T.grid(64, 64):
C[
v0 // 8 * 128 + v1 // 2 * 64 + ax0,
v2 * 1024 + v0 % 8 * 128 + v1 % 2 * 64 + ax1,
] = C_reindex_m16n8k8_matrixC[
v0 // 8 * 128 + v1 // 2 * 64 + ax0,
v2 * 1024 + v0 % 8 * 128 + v1 % 2 * 64 + ax1,
]
def matmul_fp16(N: int, M: int, K: int, out_dtype: str):
x = te.placeholder((N, K), name="X", dtype="float16")
y = te.placeholder((K, M), name="Y", dtype="float16")
k = te.reduce_axis((0, K), name="k")
c = te.compute( # pylint: disable=invalid-name
(N, M),
lambda i, j: te.sum(x[i][k].astype(out_dtype) * y[k][j].astype(out_dtype), axis=[k]),
name="C",
)
return (x, y, c)
def multi_level_tiling_mma(out_dtype):
simplify_dict = {"float32": "f32", "float16": "f16"}
out_dtype = simplify_dict[out_dtype]
return ms.schedule_rule.MultiLevelTilingTensorCore(
intrin_groups=[
{
"init": f"mma_init_m16n8k8_{out_dtype}",
"load_a": "mma_load_m16n8k8_f16_A_shared_dyn",
"load_b": "mma_load_m16n8k8_f16_B_shared_dyn",
"compute": f"mma_sync_m16n8k8_f16f16{out_dtype}",
"store": f"mma_store_m16n8k8_{out_dtype}_global",
},
],
structure="SSSRRSRS",
tile_binds=["blockIdx.x", "blockIdx.y", "threadIdx.y"],
max_innermost_factor=4, # 64 // tensor intrin size
vector_load_lens=[1, 2, 3, 4, 8, 16],
reuse_read=ms.schedule_rule.ReuseType(
req="must",
levels=[4],
scope="shared.dyn",
),
reuse_write=ms.schedule_rule.ReuseType(
req="no",
levels=[2],
scope="shared.dyn",
),
use_software_pipeline=True,
)
def _design_space(mod, out_dtype):
return generate_design_space(
kind="cuda-tensorcore",
mod=mod,
target=Target("nvidia/geforce-rtx-3080"),
types=None,
sch_rules=[multi_level_tiling_mma(out_dtype)],
)
gemm_decision = [
("SamplePartitionedTile", [1, 32, 2, 1, 4]),
("SamplePartitionedTile", [4, 8, 2, 1, 8]),
("SamplePerfectTile", [128, 4, 1]),
]
def test_mma_auto_tensorization():
mod = te.create_prim_func(matmul_fp16(M=4096, N=4096, K=4096, out_dtype="float16"))
actual = _design_space(mod, "float16")
check_sketches(
mod,
sketches=actual,
expected_mods=[MmaModule],
expected_decisions=[gemm_decision],
)
expected_cuda_script = r"""#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
#include <cuda_fp16.h>
__device__ half max(half a, half b)
{
return __hgt(__half(a), __half(b)) ? a : b;
}
__device__ half min(half a, half b)
{
return __hlt(__half(a), __half(b)) ? a : b;
}
#else
typedef unsigned short uint16_t;
typedef unsigned char uint8_t;
typedef signed char int8_t;
typedef int int32_t;
typedef unsigned long long uint64_t;
typedef unsigned int uint32_t;
#define TVM_FORCE_INLINE inline __attribute__((always_inline))
#define TVM_XINLINE TVM_FORCE_INLINE __device__ __host__
#define TVM_ALIGNED(x) __attribute__ ((aligned(x)))
#define TVM_HALF_OPERATOR(RTYPE, OP) \
TVM_XINLINE RTYPE operator OP (half a, half b) { \
return RTYPE(float(a) OP float(b)); \
} \
template<typename T> \
TVM_XINLINE RTYPE operator OP (half a, T b) { \
return RTYPE(float(a) OP float(b)); \
} \
template<typename T> \
TVM_XINLINE RTYPE operator OP (T a, half b) { \
return RTYPE(float(a) OP float(b)); \
}
#define TVM_HALF_ASSIGNOP(AOP, OP) \
template<typename T> \
TVM_XINLINE half operator AOP (const T& a) { \
return *this = half(float(*this) OP float(a)); \
} \
template<typename T> \
TVM_XINLINE half operator AOP (const volatile T& a) volatile { \
return *this = half(float(*this) OP float(a)); \
}
class TVM_ALIGNED(2) half {
public:
uint16_t half_;
static TVM_XINLINE half Binary(uint16_t value) {
half res;
res.half_ = value;
return res;
}
TVM_XINLINE half() {}
TVM_XINLINE half(const float& value) { constructor(value); }
TVM_XINLINE explicit half(const double& value) { constructor(value); }
TVM_XINLINE explicit half(const int8_t& value) { constructor(value); }
TVM_XINLINE explicit half(const uint8_t& value) { constructor(value); }
TVM_XINLINE explicit half(const int32_t& value) { constructor(value); }
TVM_XINLINE explicit half(const uint32_t& value) { constructor(value); }
TVM_XINLINE explicit half(const long long& value) { constructor(value); }
TVM_XINLINE explicit half(const uint64_t& value) { constructor(value); }
TVM_XINLINE operator float() const { \
return float(half2float(half_)); \
} \
TVM_XINLINE operator float() const volatile { \
return float(half2float(half_)); \
}
TVM_HALF_ASSIGNOP(+=, +)
TVM_HALF_ASSIGNOP(-=, -)
TVM_HALF_ASSIGNOP(*=, *)
TVM_HALF_ASSIGNOP(/=, /)
TVM_XINLINE half operator+() {
return *this;
}
TVM_XINLINE half operator-() {
return half(-float(*this));
}
TVM_XINLINE half operator=(const half& a) {
half_ = a.half_;
return a;
}
template<typename T>
TVM_XINLINE half operator=(const T& a) {
return *this = half(a);
}
TVM_XINLINE half operator=(const half& a) volatile {
half_ = a.half_;
return a;
}
template<typename T>
TVM_XINLINE half operator=(const T& a) volatile {
return *this = half(a);
}
private:
union Bits {
float f;
int32_t si;
uint32_t ui;
};
static int const fp16FractionBits = 10;
static int const fp32FractionBits = 23;
static int32_t const fp32FractionMask = ~(~0u << fp32FractionBits); // == 0x7fffff
static int32_t const fp32HiddenBit = 1 << fp32FractionBits; // == 0x800000
static int const shift = fp32FractionBits - fp16FractionBits; // == 13
static int const shiftSign = 16;
static int32_t const expAdjust = 127 - 15; // exp32-127 = exp16-15, so exp16 = exp32 - (127-15)
static int32_t const infN = 0x7F800000; // flt32 infinity
static int32_t const maxN = 0x477FFFFF; // max flt32 that's a flt16 normal after >> by shift
static int32_t const minN = 0x38800000; // min flt16 normal as a flt32
static int32_t const maxZ = 0x33000000; // max fp32 number that's still rounded to zero in fp16
static int32_t const signN = 0x80000000; // flt32 sign bit
static int32_t const infC = infN >> shift;
static int32_t const nanN = (infC + 1) << shift; // minimum flt16 nan as a flt32
static int32_t const maxC = maxN >> shift;
static int32_t const minC = minN >> shift;
static int32_t const signC = signN >> shiftSign; // flt16 sign bit
static int32_t const mulN = 0x52000000; // (1 << 23) / minN
static int32_t const mulC = 0x33800000; // minN / (1 << (23 - shift))
static int32_t const subC = 0x003FF; // max flt32 subnormal down shifted
static int32_t const norC = 0x00400; // min flt32 normal down shifted
static int32_t const maxD = infC - maxC - 1;
static int32_t const minD = minC - subC - 1;
TVM_XINLINE uint16_t float2half(const float& value) const {
Bits v;
v.f = value;
uint32_t sign = v.si & signN; // grab sign bit
v.si ^= sign; // clear sign bit from v
sign >>= shiftSign; // logical shift sign to fp16 position
if (v.si <= maxZ) {
// Handle eventual zeros here to ensure
// vshift will not exceed 32 below.
v.ui = 0;
} else if (v.si < minN) {
// Handle denorms
uint32_t exp32 = v.ui >> fp32FractionBits;
int32_t exp16 = exp32 - expAdjust;
// If exp16 == 0 (just into the denorm range), then significant should be shifted right 1.
// Smaller (so negative) exp16 values should result in greater right shifts.
uint32_t vshift = 1 - exp16;
uint32_t significand = fp32HiddenBit | (v.ui & fp32FractionMask);
v.ui = significand >> vshift;
v.ui += (v.ui & 0x3fff) != 0x1000 || (significand & 0x7ff) ? 0x1000 : 0;
} else if (v.si <= maxN) {
// Handle norms
v.ui += (v.ui & 0x3fff) != 0x1000 ? 0x1000 : 0;
v.ui -= expAdjust << fp32FractionBits;
} else if (v.si <= infN) {
v.si = infN;
} else if (v.si < nanN) {
v.si = nanN;
}
v.ui >>= shift;
return sign | (v.ui & 0x7fff);
}
// Same as above routine, except for addition of volatile keyword
TVM_XINLINE uint16_t float2half(
const volatile float& value) const volatile {
Bits v;
v.f = value;
uint32_t sign = v.si & signN; // grab sign bit
v.si ^= sign; // clear sign bit from v
sign >>= shiftSign; // logical shift sign to fp16 position
if (v.si <= maxZ) {
// Handle eventual zeros here to ensure
// vshift will not exceed 32 below.
v.ui = 0;
} else if (v.si < minN) {
// Handle denorms
uint32_t exp32 = v.ui >> fp32FractionBits;
int32_t exp16 = exp32 - expAdjust;
// If exp16 == 0 (just into the denorm range), then significant should be shifted right 1.
// Smaller (so negative) exp16 values should result in greater right shifts.
uint32_t vshift = 1 - exp16;
uint32_t significand = fp32HiddenBit | (v.ui & fp32FractionMask);
v.ui = significand >> vshift;
v.ui += (v.ui & 0x3fff) != 0x1000 || (significand & 0x7ff) ? 0x1000 : 0;
} else if (v.si <= maxN) {
// Handle norms
v.ui += (v.ui & 0x3fff) != 0x1000 ? 0x1000 : 0;
v.ui -= expAdjust << fp32FractionBits;
} else if (v.si <= infN) {
v.si = infN;
} else if (v.si < nanN) {
v.si = nanN;
}
v.ui >>= shift;
return sign | (v.ui & 0x7fff);
}
TVM_XINLINE float half2float(const uint16_t& value) const {
Bits v;
v.ui = value;
int32_t sign = v.si & signC;
v.si ^= sign;
sign <<= shiftSign;
v.si ^= ((v.si + minD) ^ v.si) & -(v.si > subC);
v.si ^= ((v.si + maxD) ^ v.si) & -(v.si > maxC);
Bits s;
s.si = mulC;
s.f *= v.si;
int32_t mask = -(norC > v.si);
v.si <<= shift;
v.si ^= (s.si ^ v.si) & mask;
v.si |= sign;
return v.f;
}
TVM_XINLINE float half2float(
const volatile uint16_t& value) const volatile {
Bits v;
v.ui = value;
int32_t sign = v.si & signC;
v.si ^= sign;
sign <<= shiftSign;
v.si ^= ((v.si + minD) ^ v.si) & -(v.si > subC);
v.si ^= ((v.si + maxD) ^ v.si) & -(v.si > maxC);
Bits s;
s.si = mulC;
s.f *= v.si;
int32_t mask = -(norC > v.si);
v.si <<= shift;
v.si ^= (s.si ^ v.si) & mask;
v.si |= sign;
return v.f;
}
template<typename T>
TVM_XINLINE void constructor(const T& value) {
half_ = float2half(float(value));
}
};
TVM_HALF_OPERATOR(half, +)
TVM_HALF_OPERATOR(half, -)
TVM_HALF_OPERATOR(half, *)
TVM_HALF_OPERATOR(half, /)
TVM_HALF_OPERATOR(bool, >)
TVM_HALF_OPERATOR(bool, <)
TVM_HALF_OPERATOR(bool, >=)
TVM_HALF_OPERATOR(bool, <=)
TVM_XINLINE half __float2half_rn(const float a) {
return half(a);
}
#endif
// Pack two half values.
static inline __device__ __host__ unsigned
__pack_half2(const half x, const half y) {
unsigned v0 = *((unsigned short *)&x);
unsigned v1 = *((unsigned short *)&y);
return (v1 << 16) | v0;
}
// Some fp16 math functions are not supported in cuda_fp16.h,
// so we define them here to make sure the generated CUDA code
// is valid.
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 530)
#define CUDA_UNSUPPORTED_HALF_MATH_BINARY(HALF_MATH_NAME, FP32_MATH_NAME) \
static inline __device__ __host__ half HALF_MATH_NAME(half x, half y) { \
float tmp_x = __half2float(x); \
float tmp_y = __half2float(y); \
float result = FP32_MATH_NAME(tmp_x, tmp_y); \
return __float2half(result); \
}
#define CUDA_UNSUPPORTED_HALF_MATH_UNARY(HALF_MATH_NAME, FP32_MATH_NAME) \
static inline __device__ __host__ half HALF_MATH_NAME(half x) { \
float tmp_x = __half2float(x); \
float result = FP32_MATH_NAME(tmp_x); \
return __float2half(result); \
}
CUDA_UNSUPPORTED_HALF_MATH_BINARY(hpow, powf)
CUDA_UNSUPPORTED_HALF_MATH_UNARY(htanh, tanhf)
CUDA_UNSUPPORTED_HALF_MATH_UNARY(htan, tanf)
CUDA_UNSUPPORTED_HALF_MATH_UNARY(hatan, atanf)
CUDA_UNSUPPORTED_HALF_MATH_UNARY(herf, erf)
#undef CUDA_UNSUPPORTED_HALF_MATH_BINARY
#undef CUDA_UNSUPPORTED_HALF_MATH_UNARY
#endif
#if (((__CUDACC_VER_MAJOR__ == 11) && (__CUDACC_VER_MINOR__ >= 4)) || \
(__CUDACC_VER_MAJOR__ > 11))
#define TVM_ENABLE_L2_PREFETCH 1
#else
#define TVM_ENABLE_L2_PREFETCH 0
#endif
#ifdef _WIN32
using uint = unsigned int;
using uchar = unsigned char;
using ushort = unsigned short;
using int64_t = long long;
using uint64_t = unsigned long long;
#else
#define uint unsigned int
#define uchar unsigned char
#define ushort unsigned short
#define int64_t long long
#define uint64_t unsigned long long
#endif
extern "C" __global__ void __launch_bounds__(128) main_kernel(half* __restrict__ C, half* __restrict__ X, half* __restrict__ Y) {
extern __shared__ uchar buf_dyn_shmem[];
uint1 C_reindex_m16n8k8_matrixC[64];
half X_reindex_shared_dyn_m16n8k8_matrixA[32];
half Y_reindex_shared_dyn_m16n8k8_matrixB[32];
for (int ax0_0_4_init = 0; ax0_0_4_init < 4; ++ax0_0_4_init) {
for (int ax1_0_4_init = 0; ax1_0_4_init < 8; ++ax1_0_4_init) {
for (int b = 0; b < 2; ++b) {
C_reindex_m16n8k8_matrixC[(((ax0_0_4_init * 16) + (ax1_0_4_init * 2)) + b)] = make_uint1(__pack_half2(__float2half_rn(0.000000e+00f), __float2half_rn(0.000000e+00f)));
}
}
}
for (int ax0_ax1_fused_0 = 0; ax0_ax1_fused_0 < 4; ++ax0_ax1_fused_0) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(buf_dyn_shmem + (((((ax0_ax1_fused_0 * 2048) + (((int)threadIdx.y) * 512)) + ((((int)threadIdx.x) >> 2) * 64)) + (((((int)threadIdx.x) & 3) ^ (((int)threadIdx.x) >> 3)) * 16)) + 24576)))
);
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(X + ((((((((int)blockIdx.y) >> 3) * 524288) + (ax0_ax1_fused_0 * 131072)) + (((int)threadIdx.y) * 32768)) + ((((int)threadIdx.x) >> 2) * 4096)) + ((((int)threadIdx.x) & 3) * 8)))), "n"(16)
);
}
}
for (int ax0_ax1_fused_0_1 = 0; ax0_ax1_fused_0_1 < 4; ++ax0_ax1_fused_0_1) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(buf_dyn_shmem + ((((ax0_ax1_fused_0_1 * 2048) + (((int)threadIdx.y) * 512)) + ((((int)threadIdx.x) >> 3) * 128)) + (((((int)threadIdx.x) & 7) ^ ((((int)threadIdx.y) * 2) + (((int)threadIdx.x) >> 4))) * 16))))
);
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(Y + ((((((ax0_ax1_fused_0_1 * 32768) + (((int)threadIdx.y) * 8192)) + ((((int)threadIdx.x) >> 4) * 4096)) + (((int)blockIdx.x) * 1024)) + ((((int)blockIdx.y) & 7) * 128)) + ((((int)threadIdx.x) & 15) * 8)))), "n"(16)
);
}
}
__asm__ __volatile__("cp.async.commit_group;");
for (int ax0_ax1_fused_0_2 = 0; ax0_ax1_fused_0_2 < 4; ++ax0_ax1_fused_0_2) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(buf_dyn_shmem + (((((ax0_ax1_fused_0_2 * 2048) + (((int)threadIdx.y) * 512)) + ((((int)threadIdx.x) >> 2) * 64)) + (((((int)threadIdx.x) & 3) ^ (((int)threadIdx.x) >> 3)) * 16)) + 32768)))
);
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(X + (((((((((int)blockIdx.y) >> 3) * 524288) + (ax0_ax1_fused_0_2 * 131072)) + (((int)threadIdx.y) * 32768)) + ((((int)threadIdx.x) >> 2) * 4096)) + ((((int)threadIdx.x) & 3) * 8)) + 32))), "n"(16)
);
}
}
for (int ax0_ax1_fused_0_3 = 0; ax0_ax1_fused_0_3 < 4; ++ax0_ax1_fused_0_3) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(buf_dyn_shmem + (((((ax0_ax1_fused_0_3 * 2048) + (((int)threadIdx.y) * 512)) + ((((int)threadIdx.x) >> 3) * 128)) + (((((int)threadIdx.x) & 7) ^ ((((int)threadIdx.y) * 2) + (((int)threadIdx.x) >> 4))) * 16)) + 8192)))
);
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(Y + (((((((ax0_ax1_fused_0_3 * 32768) + (((int)threadIdx.y) * 8192)) + ((((int)threadIdx.x) >> 4) * 4096)) + (((int)blockIdx.x) * 1024)) + ((((int)blockIdx.y) & 7) * 128)) + ((((int)threadIdx.x) & 15) * 8)) + 131072))), "n"(16)
);
}
}
__asm__ __volatile__("cp.async.commit_group;");
__asm__ __volatile__("cp.async.wait_group 1;");
__syncthreads();
for (int ax0_0 = 0; ax0_0 < 2; ++ax0_0) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(((half*)buf_dyn_shmem)[12288])) + (((((((int)threadIdx.y) >> 1) * 2048) + (ax0_0 * 1024)) + (((int)threadIdx.x) * 32)) + ((0 ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))))
);
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (ax0_0 * 8)))[0]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (ax0_0 * 8)))[1]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (ax0_0 * 8)))[2]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (ax0_0 * 8)))[3])
: "r"(addr)
);
}
}
for (int ax1_0 = 0; ax1_0 < 2; ++ax1_0) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(((half*)buf_dyn_shmem)[0])) + ((((((int)threadIdx.x) & 7) * 128) + ((((int)threadIdx.y) & 1) * 64)) + ((((ax1_0 * 4) + (((int)threadIdx.x) >> 3)) ^ (((int)threadIdx.x) & 7)) * 8))))
);
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.trans.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (ax1_0 * 8)))[0]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (ax1_0 * 8)))[1]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (ax1_0 * 8)))[2]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (ax1_0 * 8)))[3])
: "r"(addr)
);
}
}
for (int ax2_0_0 = 0; ax2_0_0 < 126; ++ax2_0_0) {
__syncthreads();
for (int ax0_ax1_fused_0_4 = 0; ax0_ax1_fused_0_4 < 4; ++ax0_ax1_fused_0_4) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(buf_dyn_shmem + ((((((((ax2_0_0 + 2) % 3) * 8192) + (ax0_ax1_fused_0_4 * 2048)) + (((int)threadIdx.y) * 512)) + ((((int)threadIdx.x) >> 2) * 64)) + (((((int)threadIdx.x) & 3) ^ (((int)threadIdx.x) >> 3)) * 16)) + 24576)))
);
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(X + ((((((((((int)blockIdx.y) >> 3) * 524288) + (ax0_ax1_fused_0_4 * 131072)) + (((int)threadIdx.y) * 32768)) + ((((int)threadIdx.x) >> 2) * 4096)) + (ax2_0_0 * 32)) + ((((int)threadIdx.x) & 3) * 8)) + 64))), "n"(16)
);
}
}
for (int ax0_ax1_fused_0_5 = 0; ax0_ax1_fused_0_5 < 4; ++ax0_ax1_fused_0_5) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)(buf_dyn_shmem + (((((((ax2_0_0 + 2) % 3) * 8192) + (ax0_ax1_fused_0_5 * 2048)) + (((int)threadIdx.y) * 512)) + ((((int)threadIdx.x) >> 3) * 128)) + (((((int)threadIdx.x) & 7) ^ ((((int)threadIdx.y) * 2) + (((int)threadIdx.x) >> 4))) * 16))))
);
__asm__ __volatile__(
#if TVM_ENABLE_L2_PREFETCH
"cp.async.cg.shared.global.L2::128B [%0], [%1], %2;"
#else
"cp.async.cg.shared.global [%0], [%1], %2;"
#endif
:: "r"(addr), "l"((void*)(Y + ((((((((ax2_0_0 * 131072) + (ax0_ax1_fused_0_5 * 32768)) + (((int)threadIdx.y) * 8192)) + ((((int)threadIdx.x) >> 4) * 4096)) + (((int)blockIdx.x) * 1024)) + ((((int)blockIdx.y) & 7) * 128)) + ((((int)threadIdx.x) & 15) * 8)) + 262144))), "n"(16)
);
}
}
__asm__ __volatile__("cp.async.commit_group;");
__asm__ __volatile__("cp.async.wait_group 1;");
__syncthreads();
for (int ax2_0_1 = 0; ax2_0_1 < 3; ++ax2_0_1) {
for (int ax0_0_1 = 0; ax0_0_1 < 2; ++ax0_0_1) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(((half*)buf_dyn_shmem)[(((ax2_0_0 % 3) * 4096) + 12288)])) + (((((((int)threadIdx.y) >> 1) * 2048) + (ax0_0_1 * 1024)) + (((int)threadIdx.x) * 32)) + (((ax2_0_1 + 1) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))))
);
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((((ax2_0_1 + 1) & 1) * 16) + (ax0_0_1 * 8))))[0]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((((ax2_0_1 + 1) & 1) * 16) + (ax0_0_1 * 8))))[1]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((((ax2_0_1 + 1) & 1) * 16) + (ax0_0_1 * 8))))[2]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((((ax2_0_1 + 1) & 1) * 16) + (ax0_0_1 * 8))))[3])
: "r"(addr)
);
}
}
for (int ax1_0_1 = 0; ax1_0_1 < 2; ++ax1_0_1) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(((half*)buf_dyn_shmem)[((ax2_0_0 % 3) * 4096)])) + (((((ax2_0_1 * 1024) + ((((int)threadIdx.x) & 7) * 128)) + ((((int)threadIdx.y) & 1) * 64)) + ((((ax1_0_1 * 4) + (((int)threadIdx.x) >> 3)) ^ (((int)threadIdx.x) & 7)) * 8)) + 1024)))
);
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.trans.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((((ax2_0_1 + 1) & 1) * 16) + (ax1_0_1 * 8))))[0]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((((ax2_0_1 + 1) & 1) * 16) + (ax1_0_1 * 8))))[1]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((((ax2_0_1 + 1) & 1) * 16) + (ax1_0_1 * 8))))[2]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((((ax2_0_1 + 1) & 1) * 16) + (ax1_0_1 * 8))))[3])
: "r"(addr)
);
}
}
for (int ax0_0_4 = 0; ax0_0_4 < 4; ++ax0_0_4) {
for (int ax1_0_4 = 0; ax1_0_4 < 8; ++ax1_0_4) {
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3}, {%4}, {%5, %6};\n"
: "=r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4 * 16) + (ax1_0_4 * 2))))[0]), "=r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4 * 16) + (ax1_0_4 * 2))))[1])
: "r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (((ax2_0_1 & 1) * 16) + (ax0_0_4 * 4))))[0]), "r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (((ax2_0_1 & 1) * 16) + (ax0_0_4 * 4))))[1]), "r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (((ax2_0_1 & 1) * 16) + (ax1_0_4 * 2))))[0]), "r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4 * 16) + (ax1_0_4 * 2))))[0]), "r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4 * 16) + (ax1_0_4 * 2))))[1]));
}
}
}
}
for (int ax0_0_2 = 0; ax0_0_2 < 2; ++ax0_0_2) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(((half*)buf_dyn_shmem)[((((ax2_0_0 + 1) % 3) * 4096) + 12288)])) + (((((((int)threadIdx.y) >> 1) * 2048) + (ax0_0_2 * 1024)) + (((int)threadIdx.x) * 32)) + ((0 ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))))
);
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (ax0_0_2 * 8)))[0]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (ax0_0_2 * 8)))[1]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (ax0_0_2 * 8)))[2]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (ax0_0_2 * 8)))[3])
: "r"(addr)
);
}
}
for (int ax1_0_2 = 0; ax1_0_2 < 2; ++ax1_0_2) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(((half*)buf_dyn_shmem)[(((ax2_0_0 + 1) % 3) * 4096)])) + ((((((int)threadIdx.x) & 7) * 128) + ((((int)threadIdx.y) & 1) * 64)) + ((((ax1_0_2 * 4) + (((int)threadIdx.x) >> 3)) ^ (((int)threadIdx.x) & 7)) * 8))))
);
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.trans.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (ax1_0_2 * 8)))[0]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (ax1_0_2 * 8)))[1]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (ax1_0_2 * 8)))[2]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (ax1_0_2 * 8)))[3])
: "r"(addr)
);
}
}
for (int ax0_0_4_1 = 0; ax0_0_4_1 < 4; ++ax0_0_4_1) {
for (int ax1_0_4_1 = 0; ax1_0_4_1 < 8; ++ax1_0_4_1) {
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3}, {%4}, {%5, %6};\n"
: "=r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_1 * 16) + (ax1_0_4_1 * 2))))[0]), "=r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_1 * 16) + (ax1_0_4_1 * 2))))[1])
: "r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((ax0_0_4_1 * 4) + 16)))[0]), "r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((ax0_0_4_1 * 4) + 16)))[1]), "r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((ax1_0_4_1 * 2) + 16)))[0]), "r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_1 * 16) + (ax1_0_4_1 * 2))))[0]), "r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_1 * 16) + (ax1_0_4_1 * 2))))[1]));
}
}
}
}
__asm__ __volatile__("cp.async.wait_group 0;");
__syncthreads();
for (int ax2_0_1_1 = 0; ax2_0_1_1 < 3; ++ax2_0_1_1) {
for (int ax0_0_3 = 0; ax0_0_3 < 2; ++ax0_0_3) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(((half*)buf_dyn_shmem)[12288])) + (((((((int)threadIdx.y) >> 1) * 2048) + (ax0_0_3 * 1024)) + (((int)threadIdx.x) * 32)) + (((ax2_0_1_1 + 1) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))))
);
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((((ax2_0_1_1 + 1) & 1) * 16) + (ax0_0_3 * 8))))[0]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((((ax2_0_1_1 + 1) & 1) * 16) + (ax0_0_3 * 8))))[1]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((((ax2_0_1_1 + 1) & 1) * 16) + (ax0_0_3 * 8))))[2]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((((ax2_0_1_1 + 1) & 1) * 16) + (ax0_0_3 * 8))))[3])
: "r"(addr)
);
}
}
for (int ax1_0_3 = 0; ax1_0_3 < 2; ++ax1_0_3) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(((half*)buf_dyn_shmem)[0])) + (((((ax2_0_1_1 * 1024) + ((((int)threadIdx.x) & 7) * 128)) + ((((int)threadIdx.y) & 1) * 64)) + ((((ax1_0_3 * 4) + (((int)threadIdx.x) >> 3)) ^ (((int)threadIdx.x) & 7)) * 8)) + 1024)))
);
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.trans.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((((ax2_0_1_1 + 1) & 1) * 16) + (ax1_0_3 * 8))))[0]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((((ax2_0_1_1 + 1) & 1) * 16) + (ax1_0_3 * 8))))[1]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((((ax2_0_1_1 + 1) & 1) * 16) + (ax1_0_3 * 8))))[2]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((((ax2_0_1_1 + 1) & 1) * 16) + (ax1_0_3 * 8))))[3])
: "r"(addr)
);
}
}
for (int ax0_0_4_2 = 0; ax0_0_4_2 < 4; ++ax0_0_4_2) {
for (int ax1_0_4_2 = 0; ax1_0_4_2 < 8; ++ax1_0_4_2) {
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3}, {%4}, {%5, %6};\n"
: "=r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_2 * 16) + (ax1_0_4_2 * 2))))[0]), "=r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_2 * 16) + (ax1_0_4_2 * 2))))[1])
: "r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (((ax2_0_1_1 & 1) * 16) + (ax0_0_4_2 * 4))))[0]), "r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (((ax2_0_1_1 & 1) * 16) + (ax0_0_4_2 * 4))))[1]), "r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (((ax2_0_1_1 & 1) * 16) + (ax1_0_4_2 * 2))))[0]), "r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_2 * 16) + (ax1_0_4_2 * 2))))[0]), "r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_2 * 16) + (ax1_0_4_2 * 2))))[1]));
}
}
}
}
for (int ax0_0_5 = 0; ax0_0_5 < 2; ++ax0_0_5) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(((half*)buf_dyn_shmem)[16384])) + (((((((int)threadIdx.y) >> 1) * 2048) + (ax0_0_5 * 1024)) + (((int)threadIdx.x) * 32)) + ((0 ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))))
);
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (ax0_0_5 * 8)))[0]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (ax0_0_5 * 8)))[1]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (ax0_0_5 * 8)))[2]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (ax0_0_5 * 8)))[3])
: "r"(addr)
);
}
}
for (int ax1_0_5 = 0; ax1_0_5 < 2; ++ax1_0_5) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(((half*)buf_dyn_shmem)[4096])) + ((((((int)threadIdx.x) & 7) * 128) + ((((int)threadIdx.y) & 1) * 64)) + ((((ax1_0_5 * 4) + (((int)threadIdx.x) >> 3)) ^ (((int)threadIdx.x) & 7)) * 8))))
);
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.trans.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (ax1_0_5 * 8)))[0]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (ax1_0_5 * 8)))[1]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (ax1_0_5 * 8)))[2]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (ax1_0_5 * 8)))[3])
: "r"(addr)
);
}
}
for (int ax0_0_4_3 = 0; ax0_0_4_3 < 4; ++ax0_0_4_3) {
for (int ax1_0_4_3 = 0; ax1_0_4_3 < 8; ++ax1_0_4_3) {
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3}, {%4}, {%5, %6};\n"
: "=r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_3 * 16) + (ax1_0_4_3 * 2))))[0]), "=r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_3 * 16) + (ax1_0_4_3 * 2))))[1])
: "r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((ax0_0_4_3 * 4) + 16)))[0]), "r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((ax0_0_4_3 * 4) + 16)))[1]), "r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((ax1_0_4_3 * 2) + 16)))[0]), "r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_3 * 16) + (ax1_0_4_3 * 2))))[0]), "r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_3 * 16) + (ax1_0_4_3 * 2))))[1]));
}
}
}
for (int ax2_0_1_2 = 0; ax2_0_1_2 < 3; ++ax2_0_1_2) {
for (int ax0_0_6 = 0; ax0_0_6 < 2; ++ax0_0_6) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(((half*)buf_dyn_shmem)[16384])) + (((((((int)threadIdx.y) >> 1) * 2048) + (ax0_0_6 * 1024)) + (((int)threadIdx.x) * 32)) + (((ax2_0_1_2 + 1) ^ ((((int)threadIdx.x) & 7) >> 1)) * 8))))
);
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((((ax2_0_1_2 + 1) & 1) * 16) + (ax0_0_6 * 8))))[0]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((((ax2_0_1_2 + 1) & 1) * 16) + (ax0_0_6 * 8))))[1]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((((ax2_0_1_2 + 1) & 1) * 16) + (ax0_0_6 * 8))))[2]), "=r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((((ax2_0_1_2 + 1) & 1) * 16) + (ax0_0_6 * 8))))[3])
: "r"(addr)
);
}
}
for (int ax1_0_6 = 0; ax1_0_6 < 2; ++ax1_0_6) {
{
unsigned int addr;
__asm__ __volatile__(
"{ .reg .u64 addr; cvta.to.shared.u64 addr, %1; cvt.u32.u64 %0, addr; }\n"
: "=r"(addr)
: "l"((void *)((&(((half*)buf_dyn_shmem)[4096])) + (((((ax2_0_1_2 * 1024) + ((((int)threadIdx.x) & 7) * 128)) + ((((int)threadIdx.y) & 1) * 64)) + ((((ax1_0_6 * 4) + (((int)threadIdx.x) >> 3)) ^ (((int)threadIdx.x) & 7)) * 8)) + 1024)))
);
__asm__ __volatile__(
"ldmatrix.sync.aligned.m8n8.x4.trans.shared.b16"
"{%0, %1, %2, %3}, [%4];\n"
: "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((((ax2_0_1_2 + 1) & 1) * 16) + (ax1_0_6 * 8))))[0]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((((ax2_0_1_2 + 1) & 1) * 16) + (ax1_0_6 * 8))))[1]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((((ax2_0_1_2 + 1) & 1) * 16) + (ax1_0_6 * 8))))[2]), "=r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((((ax2_0_1_2 + 1) & 1) * 16) + (ax1_0_6 * 8))))[3])
: "r"(addr)
);
}
}
for (int ax0_0_4_4 = 0; ax0_0_4_4 < 4; ++ax0_0_4_4) {
for (int ax1_0_4_4 = 0; ax1_0_4_4 < 8; ++ax1_0_4_4) {
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3}, {%4}, {%5, %6};\n"
: "=r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_4 * 16) + (ax1_0_4_4 * 2))))[0]), "=r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_4 * 16) + (ax1_0_4_4 * 2))))[1])
: "r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (((ax2_0_1_2 & 1) * 16) + (ax0_0_4_4 * 4))))[0]), "r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + (((ax2_0_1_2 & 1) * 16) + (ax0_0_4_4 * 4))))[1]), "r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + (((ax2_0_1_2 & 1) * 16) + (ax1_0_4_4 * 2))))[0]), "r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_4 * 16) + (ax1_0_4_4 * 2))))[0]), "r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_4 * 16) + (ax1_0_4_4 * 2))))[1]));
}
}
}
}
for (int ax0_0_4_5 = 0; ax0_0_4_5 < 4; ++ax0_0_4_5) {
for (int ax1_0_4_5 = 0; ax1_0_4_5 < 8; ++ax1_0_4_5) {
{
__asm__ __volatile__(
"mma.sync.aligned.m16n8k8.row.col.f16.f16.f16.f16"
"{%0, %1}, {%2, %3}, {%4}, {%5, %6};\n"
: "=r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_5 * 16) + (ax1_0_4_5 * 2))))[0]), "=r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_5 * 16) + (ax1_0_4_5 * 2))))[1])
: "r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((ax0_0_4_5 * 4) + 16)))[0]), "r"(((unsigned *)(X_reindex_shared_dyn_m16n8k8_matrixA + ((ax0_0_4_5 * 4) + 16)))[1]), "r"(((unsigned *)(Y_reindex_shared_dyn_m16n8k8_matrixB + ((ax1_0_4_5 * 2) + 16)))[0]), "r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_5 * 16) + (ax1_0_4_5 * 2))))[0]), "r"(((unsigned *)(C_reindex_m16n8k8_matrixC + ((ax0_0_4_5 * 16) + (ax1_0_4_5 * 2))))[1]));
}
}
}
for (int ax0_0_7 = 0; ax0_0_7 < 8; ++ax0_0_7) {
__syncthreads();
for (int ax1_0_7 = 0; ax1_0_7 < 8; ++ax1_0_7) {
*(uint1*)(((half*)buf_dyn_shmem) + ((((((int)threadIdx.y) * 512) + (ax1_0_7 * 64)) + (((int)threadIdx.x) * 2)) + 12288)) = C_reindex_m16n8k8_matrixC[((((ax0_0_7 >> 1) * 16) + (ax1_0_7 * 2)) + (ax0_0_7 & 1))];
}
__syncthreads();
for (int ax0_0_2_ax1_0_2_fused_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0 = 0; ax0_0_2_ax1_0_2_fused_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0 < 16; ++ax0_0_2_ax1_0_2_fused_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0) {
C[(((((((((((((int)blockIdx.y) >> 3) * 524288) + ((ax0_0_2_ax1_0_2_fused_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0 >> 3) * 262144)) + (ax0_0_7 * 32768)) + ((((int)threadIdx.y) & 1) * 16384)) + ((((int)threadIdx.x) >> 3) * 4096)) + (((int)blockIdx.x) * 1024)) + ((((int)blockIdx.y) & 7) * 128)) + ((ax0_0_2_ax1_0_2_fused_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0 & 7) * 16)) + ((((int)threadIdx.y) >> 1) * 8)) + (((int)threadIdx.x) & 7))] = ((half*)buf_dyn_shmem)[((((ax0_0_2_ax1_0_2_fused_cache_ax1_0_cache_ax0_1_cache_ax1_1_cache_fused_0 * 128) + (((int)threadIdx.y) * 32)) + ((int)threadIdx.x)) + 12288)];
}
}
}
"""
@tvm.testing.requires_tensorcore
def test_mma_script_after_build():
arch = tvm.contrib.nvcc.get_target_compute_version()
major, _ = tvm.contrib.nvcc.parse_compute_version(arch)
if major < 8:
# At least sm80 is required
return
mod = te.create_prim_func(matmul_fp16(M=4096, N=4096, K=4096, out_dtype="float16"))
actual = _design_space(mod, "float16")
assert len(actual) == 1
sketch = actual[0]
i = 0
new_decisions = {}
for inst in sketch.trace.insts:
if not inst.kind.name.startswith("Sample"):
continue
assert i < len(gemm_decision)
if inst.kind.name == gemm_decision[i][0]:
new_decisions[inst] = gemm_decision[i][1]
i += 1
assert len(new_decisions) == len(gemm_decision)
sch = Schedule(mod)
Trace(
insts=sketch.trace.insts,
decisions=new_decisions,
).apply_to_schedule(sch, remove_postproc=True)
sch.enter_postproc()
# DefaultCUDATensorCore
ms.postproc.DisallowDynamicLoop().apply(sch)
ms.postproc.RewriteCooperativeFetch().apply(sch)
# Disable RewriteUnboundBlock here since max_threads_per_block_ is not set
# ms.postproc.RewriteUnboundBlock(256).apply(sch)
ms.postproc.RewriteParallelVectorizeUnroll().apply(sch)
ms.postproc.RewriteReductionBlock().apply(sch)
ms.postproc.VerifyGPUCode().apply(sch)
ms.postproc.RewriteTensorize(False).apply(sch)
with tvm.transform.PassContext(config={"tir.use_async_copy": 1}):
rt_mod = tvm.build(sch.mod, target="cuda")
assert rt_mod.imported_modules[0].get_source() == expected_cuda_script
def initializer():
@register_func("meta_schedule.builder.async_build")
def async_build(mod, target, _params): # pylint: disable=unused-variable, unused-argument
# pylint: disable=import-outside-toplevel
from tvm.driver import build as tvm_build
from tvm.tir.transform import RemoveWeightLayoutRewriteBlock
# re-import here for local builder to register index_map_m16n8k8_matrixC
# pylint: disable=import-outside-toplevel, unused-import
from tvm.tir.tensor_intrin import cuda
mod = RemoveWeightLayoutRewriteBlock(skip_ndarray_rewrite=True)(mod)
with tvm.transform.PassContext(config={"tir.use_async_copy": 1}):
rt_mod = tvm_build(mod, target=target)
return rt_mod
@tvm.testing.requires_tensorcore
@tvm.testing.requires_cublas
def test_mma_tune():
arch = tvm.contrib.nvcc.get_target_compute_version()
major, _ = tvm.contrib.nvcc.parse_compute_version(arch)
if major < 8:
# At least sm80 is required
return
# pylint: disable=import-outside-toplevel
from tvm.contrib import cublas
def tune(out_dtype):
M, N, K = 1024, 1024, 1024
target = Target("nvidia/geforce-rtx-3080")
func = te.create_prim_func(matmul_fp16(N=N, M=M, K=K, out_dtype=out_dtype)).with_attr(
{"global_symbol": "main"}
)
mod = tvm.IRModule({"main": func})
with tempfile.TemporaryDirectory() as work_dir:
db = ms.tir_integration.tune_tir(
mod=mod,
target=target,
work_dir=work_dir,
max_trials_global=8,
builder=LocalBuilder(
f_build="meta_schedule.builder.async_build", initializer=initializer
),
space=ms.space_generator.PostOrderApply(
sch_rules=[multi_level_tiling_mma(out_dtype=out_dtype)],
),
)
sch = db.query_schedule(mod, target=target, workload_name="main")
with tvm.transform.PassContext(config={"tir.use_async_copy": 1}):
rt_mod = tvm.build(sch.mod, target=target)
a_np = np.random.uniform(0, 1, size=(M, K)).astype("float16")
b_np = np.random.uniform(0, 1, size=(K, N)).astype("float16")
A_cublas = te.placeholder((M, K), name="A", dtype="float16")
B_cublas = te.placeholder((K, N), name="B", dtype="float16")
C_cublas = cublas.matmul(A_cublas, B_cublas, dtype=out_dtype)
s = te.create_schedule(C_cublas.op)
dev = tvm.cuda(0)
f_cublas = tvm.build(s, [A_cublas, B_cublas, C_cublas], target)
a_cublas = tvm.nd.array(a_np.astype("float16"), dev)
b_cublas = tvm.nd.array(b_np.astype("float16"), dev)
c_cublas = tvm.nd.array(np.zeros((M, N), dtype=C_cublas.dtype), dev)
f_cublas(a_cublas, b_cublas, c_cublas)
a_tvm = tvm.nd.array(a_np, device=tvm.cuda(0))
b_tvm = tvm.nd.array(b_np, device=tvm.cuda(0))
c_tvm = tvm.nd.array(np.empty((M, N)).astype(out_dtype), device=tvm.cuda(0))
rt_mod(a_tvm, b_tvm, c_tvm)
assert np.allclose(c_tvm.numpy(), c_cublas.numpy(), rtol=1e-2)
tune("float16")
tune("float32")
if __name__ == "__main__":
test_mma_auto_tensorization()
test_mma_script_after_build()
test_mma_tune()
| 63,370 | 47.154255 | 629 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_inject_copy_intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.driver.build_module import schedule_to_module
def test_copy2d():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
B = te.compute((m, l), lambda i, j: A[i, j], name="B")
s = te.create_schedule(B.op)
s[B].pragma(B.op.axis[0], "memcpy")
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
func = tvm.te.schedule.SchedulePostProcToPrimFunc([A, B], stmt, None)
mod = tvm.IRModule.from_expr(func)
mod = tvm.tir.transform.StorageFlatten(64)(mod)
def cb(src, dst, pad_before, pad_after, pad_value):
assert dst.strides[0] == l
assert dst.strides[1].value == 1
assert src.strides[0] == l
assert tuple(src.shape) == (m, l)
return tvm.tir.Evaluate(0)
stmt = tvm.tir.transform.InjectCopyIntrin("memcpy", cb)(mod)["main"].body
def test_copy_pad():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
B = te.compute(
(m + 2, l),
lambda i, j: tvm.tir.if_then_else(tvm.tir.all(i >= 1, i < m + 1), A[i - 1, j], 1.0),
name="B",
)
s = te.create_schedule(B.op)
s[B].pragma(B.op.axis[0], "memcpy")
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
def cb(src, dst, pad_before, pad_after, pad_value):
tvm.testing.assert_prim_expr_equal(src.elem_offset, 0)
assert pad_before[0].value == 1
assert pad_before[1].value == 0
assert pad_after[0].value == 1
assert pad_after[1].value == 0
assert pad_value.value == 1.0
return tvm.tir.Evaluate(0)
stmt = tvm.tir.transform.InjectCopyIntrin("memcpy", cb)(mod)["main"].body
def test_single_point_test():
A = te.placeholder((1,), name="A")
B = te.compute((1,), lambda i: A[i], name="B")
s = te.create_schedule(B.op)
s[B].pragma(B.op.axis[0], "memcpy")
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod)
def cb(src, dst, pad_before, pad_after, pad_value):
tvm.testing.assert_prim_expr_equal(src.elem_offset, 0)
tvm.testing.assert_prim_expr_equal(dst.elem_offset, 0)
tvm.testing.assert_prim_expr_equal(src.strides[0], 1)
tvm.testing.assert_prim_expr_equal(dst.strides[0], 1)
return tvm.tir.Evaluate(0)
stmt = tvm.tir.transform.InjectCopyIntrin("memcpy", cb)(mod)["main"].body
def test_copy_pad_split():
m = 4 * 3
A = te.placeholder((m,), name="A")
Apad = te.compute(
(m + 2,), lambda i: tvm.tir.if_then_else(tvm.tir.all(i >= 1, i <= m), A[i - 1], 0.0), "Apad"
)
B = te.compute((m,), lambda i: Apad[i] + Apad[i + 1] + Apad[i + 2])
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=4)
s[Apad].compute_at(s[B], xo)
s[Apad].pragma(s[Apad].op.axis[0], "memcpy")
mod = schedule_to_module(s, [A, B])
mod = tvm.tir.transform.StorageFlatten(64)(mod._move())
mod = tvm.tir.transform.Simplify()(mod._move())
def cb(src, dst, pad_before, pad_after, pad_value):
assert dst.elem_offset.value == 0
tvm.testing.assert_prim_expr_equal(src.elem_offset, tvm.te.max(xo * 4, 1) - 1)
rpad_before = tvm.te.max(1 - xo * 4, 0)
rpad_after = tvm.te.max(xo * 4 - 7, 0)
tvm.testing.assert_prim_expr_equal(pad_before[0], rpad_before)
tvm.testing.assert_prim_expr_equal(pad_after[0], rpad_after)
tvm.testing.assert_prim_expr_equal(src.shape[0], 6 - rpad_before - rpad_after)
return tvm.tir.Evaluate(0)
stmt = tvm.tir.transform.InjectCopyIntrin("memcpy", cb)(mod)["main"].body
if __name__ == "__main__":
test_copy2d()
test_copy_pad()
test_copy_pad_split()
test_single_point_test()
| 4,614 | 35.92 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_autotvm_feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test feature extraction"""
import numpy as np
import tvm
from tvm import te
from tvm.autotvm import feature
def test_iter_feature_gemm():
N = 128
k = te.reduce_axis((0, N), "k")
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
C = te.compute(A.shape, lambda y, x: te.sum(A[y, k] * B[k, x], axis=k), name="C")
s = te.create_schedule(C.op)
feas = feature.get_itervar_feature(s, [A, B, C], take_log=False)
expected = [
{
"_attr_": [128, 1, 128, 2097152, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
"A_0": [128, -1, 16384, 128, 0, 0],
"B_0": [0, -1, 16384, 128, 0, 0],
"C_0": [128, -1, 16384, 128, 0, 0],
"C_1": [128, -1, 16384, 128, 0, 0],
},
{
"_attr_": [128, 2, 16384, 16384, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
"A_0": [0, -1, 128, 128, 0, 0],
"B_0": [1, -1, 16384, 1, 0, 0],
"C_0": [1, -1, 128, 128, 0, 0],
"C_1": [1, -1, 128, 128, 0, 0],
},
{
"_attr_": [128, 3, 2097152, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
"A_0": [1, -1, 128, 1, 0, 0],
"B_0": [128, -1, 128, 1, 0, 0],
"C_1": [0, -1, 1, 128, 0, 0],
"C_2": [0, -1, 1, 128, 0, 0],
},
]
for ans, row in zip(expected, feas):
for pair in row:
if pair[0] not in ans:
continue
assert ans[pair[0]] == pair[1:], "%s: %s vs %s" % (pair[0], ans[pair[0]], pair[1:])
def test_curve_feature_gemm():
N = 128
k = te.reduce_axis((0, N), "k")
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
C = te.compute(A.shape, lambda y, x: te.sum(A[y, k] * B[k, x], axis=k), name="C")
s = te.create_schedule(C.op)
feas = feature.get_buffer_curve_sample_flatten(s, [A, B, C], sample_n=30)
# sample_n * #buffers * #curves * 2 numbers per curve
assert len(feas) == 30 * 3 * 4 * 2
def test_feature_shape():
"""test the dimensions of flatten feature are the same"""
N = 1024
n_sample = 100
def get_gemm_feature(target):
k = te.reduce_axis((0, N), "k")
A = te.placeholder((N, N), name="A")
B = te.placeholder((N, N), name="B")
C = te.compute(A.shape, lambda y, x: te.sum(A[y, k] * B[k, x], axis=k), name="C")
s = te.create_schedule(C.op)
y, x = s[C].op.axis
axes = list(s[C].tile(y, x, 8, 8)) + [k]
perm = np.random.permutation(5)
axes = [axes[x] for x in perm]
s[C].reorder(*axes)
if "gpu" in target.keys:
pick = []
# filter out reduction axis
for i in range(len(perm)):
if perm[i] != 4:
pick.append(axes[i])
s[C].bind(pick[0], te.thread_axis("blockIdx.x"))
s[C].bind(pick[1], te.thread_axis("vthread"))
s[C].bind(pick[2], te.thread_axis("threadIdx.y"))
with target:
feas = feature.get_itervar_feature(s, [A, B, C])
feas = feature.flatten_itervar_feature(feas)
return feas
targets = [
tvm.target.cuda(),
tvm.target.mali(),
tvm.target.arm_cpu(),
]
for target in targets:
dim = len(get_gemm_feature(target))
for i in range(n_sample):
assert dim == len(get_gemm_feature(target)), (
"dimensions of feature do not match" " for different configurations"
)
if __name__ == "__main__":
test_iter_feature_gemm()
test_curve_feature_gemm()
test_feature_shape()
| 4,429 | 31.335766 | 95 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_helpers.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm.script import tir as T, ir as I
import tvm.testing
def test_annotate_entry_func_single_primfunc():
@tvm.script.ir_module
class MockModule:
@T.prim_func
def func1(A: T.Buffer((16,), "float32")):
for i in T.serial(16):
if i == 5:
if i == 5:
A[i] = 0.0
mod = MockModule
assert mod
assert mod["func1"].attrs is None
after = tvm.tir.transform.AnnotateEntryFunc()(mod)
assert (
after["func1"].attrs
and "tir.is_entry_func" in after["func1"].attrs
and after["func1"].attrs["tir.is_entry_func"]
)
# Test module
@tvm.script.ir_module
class MockModule:
@T.prim_func
def func1(A: T.Buffer((16,), "float32")):
for i in T.serial(16):
if i == 5:
if i == 5:
A[i] = 0.0
@T.prim_func
def func2(A: T.Buffer((32,), "float32")):
for i in T.serial(32):
if i == 15:
if i == 15:
A[i] = 0.0
@pytest.mark.xfail
def test_annotate_entry_func_multiple_primfunc():
mod = MockModule
assert mod
assert mod["func1"].attrs is None
assert mod["func2"].attrs is None
# This should fail
after = tvm.tir.transform.AnnotateEntryFunc()(mod)
def test_bind_target():
mod = MockModule
assert mod
target = tvm.target.Target("cuda")
assert mod["func1"].attrs is None
assert mod["func2"].attrs is None
after = tvm.tir.transform.BindTarget(target)(mod)
assert after["func1"].attrs and "target" in after["func1"].attrs
assert after["func1"].attrs["target"] == target
assert after["func2"].attrs and "target" in after["func2"].attrs
assert after["func2"].attrs["target"] == target
class TestBindTarget(tvm.testing.CompareBeforeAfter):
"""BindTarget adds the "target" attribute"""
transform = tvm.tir.transform.BindTarget(tvm.target.Target("cuda"))
def before():
T.evaluate(0)
def expected():
T.func_attr({"target": T.target("cuda")})
T.evaluate(0)
class TestBindTargetWithHostToExposedFunction(tvm.testing.CompareBeforeAfter):
"""BindTarget adds the host target to externally-exposed functions"""
transform = tvm.tir.transform.BindTarget(tvm.target.Target("cuda", host="llvm"))
def before():
T.func_attr({"global_symbol": "main"})
T.evaluate(0)
def expected():
T.func_attr({"global_symbol": "main", "target": T.target("cuda", host="llvm")})
T.evaluate(0)
class TestBindTargetWithHostToInternalFunction(tvm.testing.CompareBeforeAfter):
"""Internal functions have a target annotation, but without the host
The host portion of the target annotation provides host
parameters, and is used to expose a function externally as part of
`MakePackedAPI` and `MakeUnpackedAPI`. For internal functions, no
external exposure is required, so the host attribute should not be
used.
"""
transform = tvm.tir.transform.BindTarget(tvm.target.Target("cuda", host="llvm"))
def before():
T.evaluate(0)
def expected():
T.func_attr({"target": T.target("cuda")})
T.evaluate(0)
class TestBindTargetIgnoresExisting(tvm.testing.CompareBeforeAfter):
"""BindTarget should not replace existing annotations"""
transform = tvm.tir.transform.BindTarget(tvm.target.Target("cuda"))
def before():
T.func_attr({"target": T.target("nvptx")})
T.evaluate(0)
expected = before
class TestBindTargetUpdatesHost(tvm.testing.CompareBeforeAfter):
"""BindTarget should update host for existing annotations"""
transform = tvm.tir.transform.BindTarget(tvm.target.Target("cuda", host="llvm -opt-level=0"))
def before():
T.func_attr({"global_symbol": "func", "target": T.target("nvptx")})
T.evaluate(0)
def expected():
T.func_attr(
{
"global_symbol": "func",
"target": T.target("nvptx", host="llvm -opt-level=0"),
}
)
T.evaluate(0)
class TestBindTargetMultipleFunctions(tvm.testing.CompareBeforeAfter):
"""BindTarget may apply to multiple functions in a module"""
transform = tvm.tir.transform.BindTarget(tvm.target.Target("cuda"))
def before(self):
@tvm.script.ir_module
class mod:
@T.prim_func
def func1():
T.evaluate(0)
@T.prim_func
def func2():
T.evaluate(0)
return mod
def expected(self):
@tvm.script.ir_module
class mod:
@T.prim_func
def func1():
T.func_attr({"target": T.target("cuda")})
T.evaluate(0)
@T.prim_func
def func2():
T.func_attr({"target": T.target("cuda")})
T.evaluate(0)
return mod
def test_filter_primfunc():
mod = MockModule
assert mod
# Annotate each function for testing
mod["func1"] = mod["func1"].with_attr("temp", "test1")
mod["func2"] = mod["func2"].with_attr("temp", "test2")
# Test condition that does not filter out anything
def checker_filter_out_none(func: tvm.tir.PrimFunc):
return (func.attrs is not None) and ("temp" in func.attrs)
after = tvm.tir.transform.Filter(checker_filter_out_none)(mod)
assert len(after.functions) == 2
# Filtered functions should satisfy the given condition.
assert checker_filter_out_none(after["func1"])
assert checker_filter_out_none(after["func2"])
# Test condition that selectively filters out primfuncs
def checker_filter_out_one(func: tvm.tir.PrimFunc):
return (func.attrs is not None) and ("temp" in func.attrs) and func.attrs["temp"] == "test1"
after = tvm.tir.transform.Filter(checker_filter_out_one)(mod)
assert len(after.functions) == 1
# Filtered functions should satisfy the given condition.
assert checker_filter_out_one(after["func1"])
# Test condition that filters out everything
def checker_filter_out_both(func: tvm.tir.PrimFunc):
return (func.attrs is not None) and ("invalid_attr" in func.attrs)
after = tvm.tir.transform.Filter(checker_filter_out_both)(mod)
assert len(after.functions) == 0
class TestFilterRemovesGlobalVarMap(tvm.testing.CompareBeforeAfter):
"""Filtering out a function should be identical to never adding it
This test is to guard against hidden state in the IRModule that
remains after filtering. Previously, this was observed in the
`IRModuleNode::global_var_map_`, which retained entries of
filtered-out functions.
"""
transform = tvm.tir.transform.Filter(lambda prim_func: False)
def before(self):
@I.ir_module
class module:
@T.prim_func
def func():
T.evaluate(0)
return module
def expected(self):
@I.ir_module
class module:
pass
return module
if __name__ == "__main__":
tvm.testing.main()
| 7,922 | 29.011364 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_autotvm_measure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test builder and runner"""
import logging
import multiprocessing
import concurrent
import numpy as np
import tvm
from tvm import te
from tvm.autotvm.measure import executor
from tvm.testing.autotvm import DummyRunner, bad_matmul, get_sample_task
from tvm import autotvm
from tvm.autotvm.measure.measure import MeasureErrorNo, MeasureResult
from tvm.autotvm import measure
from inspect import Signature
def test_task_tuner_without_measurement():
"""test task and tuner without measurement"""
task, _ = get_sample_task()
measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner())
logging.info("%s", task.config_space)
for tuner_class in [
autotvm.tuner.RandomTuner,
autotvm.tuner.GridSearchTuner,
autotvm.tuner.GATuner,
autotvm.tuner.XGBTuner,
]:
tuner = tuner_class(task)
tuner.tune(n_trial=10, measure_option=measure_option)
assert tuner.best_flops > 1
def task_tuner_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_task_tuner_without_measurement()
def test_task_tuner_without_measurement_spawn():
# Subprocesses inherit the spawn method of their parents
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=task_tuner_spawn)
p.start()
p.join()
def test_task_runner_with_ref_input():
"""test runner ref_input without measurement"""
refinp = [np.random.rand(128, 128) for i in range(3)]
runner = measure.LocalRunner()
runner.ref_input = refinp
class DummyExecutor(measure.executor.Executor):
def __init__(self):
self.ran_dummy_executor = False
def submit(self, func, *args, **kwargs):
self.ran_dummy_executor = True
sig = Signature.from_callable(func)
assert sig.bind(*args, **kwargs).arguments["ref_input"] == refinp
dummy_future = concurrent.futures.Future()
dummy_future.set_result(None)
return dummy_future
runner.executor = DummyExecutor()
runner.run([None], [None])
assert runner.executor.ran_dummy_executor
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
test_task_tuner_without_measurement()
test_task_tuner_without_measurement_spawn()
test_task_runner_with_ref_input()
| 3,131 | 31.968421 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_format_si_prefix.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from numpy import isclose
import random
from tvm.autotvm import utils
SI_PREFIXES = "yzafpn\xb5m kMGTPEZY"
def test_format_si_prefix():
# test float conversion
assert utils.format_si_prefix(1024, "k") == 1.024
for i, prefix in enumerate(SI_PREFIXES):
integer, decimal = random.randint(0, 1000), random.randint(0, 1000)
exp = -24 + 3 * i # 0th prefix (yocto) is 10^-24
number = integer * (10**exp) + decimal * (10 ** (exp - 3))
expected = integer + decimal / 1000
assert isclose(utils.format_si_prefix(number, prefix), expected)
assert utils.format_si_prefix(0, "y") == 0
if __name__ == "__main__":
test_format_si_prefix()
| 1,477 | 34.190476 | 75 | py |
tvm | tvm-main/tests/python/unittest/test_target_codegen_opencl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import tvm.testing
import re
target = "opencl"
@tvm.testing.requires_gpu
@tvm.testing.requires_opencl
def test_opencl_ternary_expression():
def check_if_then_else(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
true_value = tvm.tir.const(1, dtype=dtype)
false_value = tvm.tir.const(3, dtype=dtype)
max_lhs = tvm.tir.const(2, dtype=dtype)
max_rhs = tvm.tir.if_then_else(A[0] > 0, true_value, false_value)
C = te.compute((n,), lambda i: tvm.te.max(max_lhs, max_rhs), name="C")
func = te.create_prim_func([A, C])
sch = tvm.tir.Schedule(func)
(x,) = sch.get_loops(sch.get_block("C"))
sch.bind(x, "threadIdx.x")
fun = tvm.build(sch.mod, target=target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
# Only need to test compiling here
fun(a, c)
def check_select(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
true_value = tvm.tir.const(1, dtype=dtype)
false_value = tvm.tir.const(3, dtype=dtype)
max_lhs = tvm.tir.const(2, dtype=dtype)
max_rhs = tvm.tir.Select(A[0] > 0, true_value, false_value)
C = te.compute((n,), lambda i: tvm.te.max(max_lhs, max_rhs), name="C")
func = te.create_prim_func([A, C])
sch = tvm.tir.Schedule(func)
(x,) = sch.get_loops(sch.get_block("C"))
sch.bind(x, "threadIdx.x")
fun = tvm.build(sch.mod, target=target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
# Only need to test compiling here
fun(a, c)
dev = tvm.device(target, 0)
check_if_then_else(dev, 1, "int8")
check_if_then_else(dev, 1, "uint8")
check_if_then_else(dev, 1, "int16")
check_if_then_else(dev, 1, "uint16")
check_select(dev, 1, "int8")
check_select(dev, 1, "uint8")
check_select(dev, 1, "int16")
check_select(dev, 1, "uint16")
@tvm.testing.requires_gpu
@tvm.testing.requires_opencl
def test_opencl_inf_nan():
def check_inf_nan(dev, n, value, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
inf_value = tvm.tir.const(value, dtype=dtype)
C = te.compute((n,), lambda i: inf_value, name="C")
func = te.create_prim_func([A, C])
sch = tvm.tir.Schedule(func)
(x,) = sch.get_loops(sch.get_block("C"))
sch.bind(x, "threadIdx.x")
fun = tvm.build(sch.mod, target=target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
# Only need to test compiling here
fun(a, c)
dev = tvm.device(target, 0)
check_inf_nan(dev, 1, -float("inf"), "float32")
check_inf_nan(dev, 1, -float("inf"), "float64")
check_inf_nan(dev, 1, float("inf"), "float32")
check_inf_nan(dev, 1, float("inf"), "float64")
check_inf_nan(dev, 1, float("nan"), "float32")
check_inf_nan(dev, 1, float("nan"), "float64")
@tvm.testing.requires_gpu
@tvm.testing.requires_opencl
def test_opencl_max():
def check_max(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
max_lhs = A[0] + tvm.tir.const(1, dtype=dtype)
max_rhs = tvm.tir.const(0, dtype=dtype)
C = te.compute((n,), lambda i: tvm.te.max(max_lhs, max_rhs), name="C")
func = te.create_prim_func([A, C])
sch = tvm.tir.Schedule(func)
(x,) = sch.get_loops(sch.get_block("C"))
sch.bind(x, "threadIdx.x")
fun = tvm.build(sch.mod, target=target)
a = tvm.nd.empty((n,), A.dtype, dev)
c = tvm.nd.empty((n,), A.dtype, dev)
# Only need to test compiling here
fun(a, c)
dev = tvm.device(target, 0)
check_max(dev, 1, "int8")
check_max(dev, 1, "uint8")
check_max(dev, 1, "int16")
check_max(dev, 1, "uint16")
check_max(dev, 1, "float32")
check_max(dev, 1, "float64")
def test_opencl_erf():
def check_erf(dev, n, dtype):
A = te.placeholder((n,), name="A", dtype=dtype)
C = te.compute(A.shape, lambda *i: te.erf(A(*i)), name="C")
s = te.create_schedule(C.op)
s[C].bind(s[C].op.axis[0], te.thread_axis("threadIdx.x"))
fun = tvm.build(s, [A, C], target)
source_str = fun.imported_modules[0].get_source()
matches = re.findall("erf", source_str)
error_matches = re.findall("erff", source_str)
assert len(matches) == 1 and len(error_matches) == 0
dev = tvm.device(target, 0)
check_erf(dev, 1, "float32")
check_erf(dev, 1, "float64")
@tvm.testing.requires_gpu
@tvm.testing.requires_opencl
def test_opencl_type_casting():
def check_type_casting(ctx, n, dtype):
block_size = 4
C = te.compute(
(n,),
lambda i: tvm.tir.Select(
tvm.tir.all(
*[
i // block_size == tvm.tir.const(3, "int32"),
i % 3 == tvm.tir.const(1, "int32"),
]
),
tvm.tir.const(1, dtype),
tvm.tir.const(0, dtype),
),
name="C",
)
# NOTE: test simple convert pattern
func = te.create_prim_func([C])
sch = tvm.tir.Schedule(func)
(x,) = sch.get_loops(sch.get_block("C"))
tx, vx = sch.split(x, factors=[None, block_size])
sch.bind(tx, "threadIdx.x")
sch.vectorize(vx)
fun = tvm.build(sch.mod, target=target)
c = tvm.nd.empty((n,), dtype, ctx)
assembly = fun.imported_modules[0].get_source()
lcond = "convert_int4(((convert_uint4(((uint4)(((convert_int(get_local_id(0))) == 3), ((convert_int(get_local_id(0))) == 3), ((convert_int(get_local_id(0))) == 3), ((convert_int(get_local_id(0))) == 3)))))"
rcond = "(convert_uint4(((((int4)(((convert_int(get_local_id(0))))+(1*0), ((convert_int(get_local_id(0))))+(1*1), ((convert_int(get_local_id(0))))+(1*2), ((convert_int(get_local_id(0))))+(1*3))) % ((int4)(3, 3, 3, 3))) == ((int4)(1, 1, 1, 1))))))))"
pattern_cond = "({} && {})".format(lcond, rcond)
assert assembly.count(pattern_cond) != 0
fun(c)
dev = tvm.device(target, 0)
check_type_casting(dev, 32, "float32")
# fp16 is not yet supported in ci
# check_type_casting(dev, 16, "float16")
@tvm.testing.requires_gpu
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl", "opencl -device=adreno")
def test_opencl_ceil_log2(target):
def _check(target, n, dtype):
with tvm.target.Target(target):
C = te.compute(
(n,),
lambda i: tvm.topi.ceil_log2(i),
name="C",
)
func = te.create_prim_func([C])
sch = tvm.tir.Schedule(func)
(x,) = sch.get_loops(sch.get_block("C"))
sch.bind(x, "threadIdx.x")
fun = tvm.build(sch.mod, target=target)
assembly = fun.imported_modules[0].get_source()
if "adreno" in target:
pattern = "convert_float"
else:
pattern = "convert_double"
assert assembly.count(pattern) != 0
_check(target, 32, "float32")
if __name__ == "__main__":
tvm.testing.main()
| 8,121 | 35.585586 | 257 | py |
tvm | tvm-main/tests/python/unittest/test_tir_texture_scope.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.testing
from tvm.ir.module import IRModule
from tvm import tir
from tvm.script import tir as T
def test_texture_scope():
@tvm.script.ir_module
class PlusOneMultTwo:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (128, 128, 4), dtype="float32", scope="global.texture")
B = T.alloc_buffer((128, 128, 4), dtype="float32", scope="global.texture")
C = T.match_buffer(b, (128, 128, 4), dtype="float32", scope="global.texture")
for block_idx in T.thread_binding(0, 128, thread="blockIdx.x"):
for thread_idx in T.thread_binding(0, 128, thread="threadIdx.x"):
for k in T.serial(4):
with T.block("B"):
vb, vt, vk = T.axis.remap("SSS", [block_idx, thread_idx, k])
B[vb, vt, vk] = A[vb, vt, vk] + T.float32(1)
for block_idx in T.thread_binding(0, 128, thread="blockIdx.x"):
for thread_idx in T.thread_binding(0, 128, thread="threadIdx.x"):
for k in T.serial(4):
with T.block("C"):
vb, vt, vk = T.axis.remap("SSS", [block_idx, thread_idx, k])
C[vb, vt, vk] = B[vb, vt, vk] * T.float32(2)
sch = tir.Schedule(PlusOneMultTwo, debug_mask="all")
def schedule_block(block):
_, _, inner = sch.get_loops(block)
sch.vectorize(inner)
schedule_block(sch.get_block("B"))
schedule_block(sch.get_block("C"))
target = tvm.target.Target("opencl")
mod = tvm.build(sch.mod["main"], target=target)
if __name__ == "__main__":
tvm.testing.main()
| 2,607 | 39.75 | 89 | py |
tvm | tvm-main/tests/python/unittest/test_te_tensor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm.topi.nn.pooling import pool2d
def test_tensor():
m = te.size_var("m")
n = te.size_var("n")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
B = te.placeholder((n, l), name="B")
T = te.compute((m, n, l), lambda i, j, k: A[i, k] * B[j, k])
print(T)
print(T.op.body)
assert tuple(T.shape) == (m, n, l)
assert isinstance(A.op, tvm.te.PlaceholderOp)
assert A == A
assert T.op.output(0) == T
assert T.op.output(0).__hash__() == T.__hash__()
d = {T.op.output(0): 1}
assert d[T] == 1
assert T[0][0][0].astype("float16").dtype == "float16"
def test_rank_zero():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
scale = te.placeholder((), name="s")
k = te.reduce_axis((0, m), name="k")
T = te.compute((), lambda: te.sum(A[k] * scale(), axis=k))
print(T)
print(T.op.body)
assert tuple(T.shape) == ()
def test_conv1d():
n = te.size_var("n")
A = te.placeholder((n + 2), name="A")
def computeB(ii):
i = ii + 1
return A[i - 1] + A[i] + A[i + 1]
B = te.compute(n, computeB)
def test_tensor_slice():
n = te.size_var("n")
A = te.compute((n, n), lambda i, j: 1)
B = te.compute((n,), lambda i: A[0][i] + A[0][i])
def test_tensor_reduce_multi_axis():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
k1 = te.reduce_axis((0, n), "k")
k2 = te.reduce_axis((0, m), "k")
C = te.compute((1,), lambda _: te.sum(A[k1, k2], axis=(k1, k2)))
C = te.compute((1,), lambda _: te.sum(A[k1, k2], axis=[k1, k2]))
def test_tensor_comm_reducer():
m = te.size_var("m")
n = te.size_var("n")
A = te.placeholder((m, n), name="A")
k = te.reduce_axis((0, n), "k")
mysum = te.comm_reducer(lambda x, y: x + y, lambda t: tvm.tir.const(0, dtype=t))
C = te.compute((m,), lambda i: mysum(A[i, k], axis=k))
def test_tensor_comm_reducer_overload():
m = te.size_var("m")
n = te.size_var("n")
mysum = te.comm_reducer(lambda x, y: x + y, lambda t: tvm.tir.const(0, dtype=t))
sum_res = mysum(m, n)
def test_tensor_reduce():
m = te.size_var("m")
n = te.size_var("n")
l = te.size_var("l")
A = te.placeholder((m, l), name="A")
B = te.placeholder((n, l), name="B")
T = te.compute((m, n, l), lambda i, j, k: A[i, k] * B[j, k])
rv = te.reduce_axis((0, A.shape[1]), "k")
C = te.compute((m, n), lambda i, j: te.sum(T(i, j, rv + 1), axis=rv))
# json load save
C_json = tvm.ir.save_json(C)
C_loaded = tvm.ir.load_json(C_json)
assert isinstance(C_loaded, te.tensor.Tensor)
assert str(C_loaded) == str(C)
def test_tensor_reduce_multiout_with_cond():
def fcombine(x, y):
return x[0] + y[0], x[1] + y[1]
def fidentity(t0, t1):
return tvm.tir.const(0, t0), tvm.tir.const(1, t1)
mysum = te.comm_reducer(fcombine, fidentity, name="mysum")
m = te.var("m")
n = te.var("n")
idx = te.placeholder((m, n), name="idx", dtype="int32")
val = te.placeholder((m, n), name="val", dtype="int32")
k = te.reduce_axis((0, n), "k")
cond = te.floormod(k, 2) == 0
T0, T1 = te.compute((m,), lambda i: mysum((idx[i, k], val[i, k]), axis=k, where=cond), name="T")
def test_tensor_compute1():
m = 1024
factor = 16
dtype = "float32"
def intrin_vadd(n):
x = te.placeholder((n,))
y = te.placeholder((n,))
z = te.compute(x.shape, lambda i: x[i] + y[i])
def intrin_func(ins, outs):
ib = tvm.tir.ir_builder.create()
ib.emit(
tvm.tir.call_extern(
outs[0].dtype,
"vadd",
ins[0].access_ptr("r"),
ins[1].access_ptr("r"),
outs[0].access_ptr("wr"),
)
)
return ib.get()
return te.decl_tensor_intrin(z.op, intrin_func, default_buffer_params={"offset_factor": n})
vadd = intrin_vadd(factor)
A = te.placeholder((m // factor, factor), name="A", dtype=dtype)
B = te.placeholder((m // factor, factor), name="B", dtype=dtype)
C = te.compute((m // factor, factor), lambda i: vadd(A[i, 0:factor], B[i, 0:factor]))
s = te.create_schedule(C.op)
# check lowering with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
stmt = tvm.lower(s, [A, B, C])["main"].body
assert isinstance(stmt.body, tvm.tir.Evaluate)
def test_tensor_compute2():
M = 2048
N = 1024
L = 1024
factor = 16
factor1 = 32
factor2 = 32
dtype = "float32"
def intrin_gemm(m, n, l):
k = te.reduce_axis((0, l))
x = te.placeholder((m, l))
y = te.placeholder((n, l))
# in theory, no relation
z = te.compute((m, n), lambda i, j: te.sum(x[i][k] * y[j][k], axis=k))
def intrin_func(ins, outs):
x_ptr = ins[0].access_ptr("r")
y_ptr = ins[1].access_ptr("r")
z_ptr = outs[0].access_ptr("w")
body = tvm.tir.call_packed("gemv", x_ptr, y_ptr, z_ptr, m, n, l)
reset = tvm.tir.call_packed("fill_zero", z_ptr, m, n)
update = tvm.tir.call_packed("gemv_add", x_ptr, y_ptr, z_ptr, m, n, l)
return body, reset, update
return te.decl_tensor_intrin(z.op, intrin_func, default_buffer_params={"offset_factor": n})
vgemm = intrin_gemm(factor1, factor2, factor)
A = te.placeholder((M // factor1, L // factor, factor1, factor), name="A", dtype=dtype)
B = te.placeholder((N // factor2, L // factor, factor2, factor), name="B", dtype=dtype)
k = te.reduce_axis((0, L // factor), name="k")
C = te.compute(
(M // factor1, N // factor2, factor1, factor2),
lambda i, j: vgemm(
A[i, k, 0:factor1, 0:factor], B[j, k, 0:factor2, 0:factor], reduce_axis=k
),
)
s = te.create_schedule(C.op)
# check lowering with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
stmt = tvm.lower(s, [A, B, C])["main"].body
assert isinstance(stmt.body.body[0], tvm.tir.Evaluate)
assert isinstance(stmt.body.body[1].body, tvm.tir.Evaluate)
def test_tensor_scan():
m = te.size_var("m")
n = te.size_var("n")
x = te.placeholder((m, n))
s = te.placeholder((m, n))
res = tvm.te.scan(
te.compute((1, n), lambda _, i: x[0, i]),
te.compute((m, n), lambda t, i: s[t - 1, i] + x[t, i]),
s,
)
assert tuple(res.shape) == (m, n)
def test_scan_multi_out():
m = te.size_var("m")
n = te.size_var("n")
x1 = te.placeholder((m, n))
s1 = te.placeholder((m, n))
x2 = te.placeholder((m, n))
s2 = te.placeholder((m, n))
s1_init = te.compute((1, n), lambda _, i: x1[0, i])
s2_init = te.compute((1, n), lambda _, i: x2[0, i])
s1_update = te.compute((m, n), lambda t, i: s1[t - 1, i] + s2[t - 1, i] + x1[t, i])
s2_update = te.compute((m, n), lambda t, i: x2[t, i] + s2[t - 1, i])
r0, r1 = tvm.te.scan([s1_init, s2_init], [s1_update, s2_update], [s1, s2])
assert r0.value_index == 0
assert r1.value_index == 1
json_str = tvm.ir.save_json(r0.op)
zz = tvm.ir.load_json(json_str)
assert isinstance(zz, tvm.te.ScanOp)
def test_extern():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
def extern_func(ins, outs):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
return tvm.tir.call_packed("myadd", ins[0].data, outs[0].data, m)
B = te.extern((m,), [A], extern_func)
assert tuple(B.shape) == (m,)
def test_extern_multi_out():
m = te.size_var("m")
A = te.placeholder((m,), name="A")
B = te.compute((m,), lambda i: A[i] * 10)
def extern_func(ins, outs):
assert isinstance(ins[0], tvm.te.schedule.Buffer)
return tvm.tir.call_packed("myadd", ins[0].data, outs[0].data, outs[1].data, m)
res = te.extern([A.shape, A.shape], [A, B], extern_func)
assert len(res) == 2
assert res[1].value_index == 1
def test_tuple_inputs():
m = te.size_var("m")
n = te.size_var("n")
A0 = te.placeholder((m, n), name="A0")
A1 = te.placeholder((m, n), name="A1")
T0, T1 = te.compute((m, n), lambda i, j: (A0[i, j] * 2, A1[i, j] * 3), name="T")
s = te.create_schedule(T0.op)
for i in range(len(T0.shape)):
assert T0.shape[i] == T1.shape[i]
assert T0.op == T1.op
assert T0.value_index == 0
assert T1.value_index == 1
def test_tuple_with_different_deps():
m = te.size_var("m")
n = te.size_var("n")
A0 = te.placeholder((m, n), name="A1")
A1 = te.placeholder((m, n), name="A2")
B0, B1 = te.compute((m, n), lambda i, j: (A0[i, j] * 2, A1[i, j] * 3), name="B")
C = te.compute((m, n), lambda i, j: B0[i, j] + 4, name="C")
s = te.create_schedule(C.op)
xo, xi = s[C].split(C.op.axis[0], factor=10)
s[B0.op].compute_at(s[C], xo)
sch = s.normalize()
bounds = tvm.te.schedule.InferBound(sch)
stmt = tvm.te.schedule.ScheduleOps(sch, bounds)
def get_B1_realize(x):
if (
isinstance(x, tvm.tir.ProducerRealize)
and x.producer.op == B1.op
and x.producer.value_index == 1
):
ret.append(x)
ret = []
tvm.tir.stmt_functor.post_order_visit(stmt, get_B1_realize)
assert stmt.producer == C and len(ret) == 1
def test_tensor_inputs():
x = te.placeholder((1,), name="x")
y = te.compute(x.shape, lambda i: x[i] + x[i])
assert tuple(y.op.input_tensors) == (x,)
def test_tensor_pool():
def intrin_pool():
A = te.placeholder((64, 16, 16), name="A")
kh = te.reduce_axis((0, 3), name="kh")
kw = te.reduce_axis((0, 3), name="kw")
P = te.compute(
(64, 14, 14),
lambda c, oh, ow: tvm.te.max(A[c, oh + kh, ow + kw], axis=[kh, kw]),
name="p",
)
def intrin_func(ins, outs):
dinp = ins[0]
dout = outs[0]
return tvm.tir.call_packed("op", dinp, dout)
return te.decl_tensor_intrin(P.op, intrin_func, default_buffer_params={"offset_factor": 1})
A = te.placeholder((1, 64, 16, 16), name="A")
P = pool2d(
data=A, kernel=(3, 3), stride=(1, 1), dilation=(1, 1), padding=(0, 0, 0, 0), pool_type="max"
)
s = te.create_schedule(P.op)
_, oh, _, _ = P.op.axis
intrin = intrin_pool()
s[P].tensorize(oh, intrin)
tvm.lower(s, [A, P])
def test_tensor_scalar_mixed():
# test te with tensor and scalar
a = np.array(np.random.uniform(size=(10,)), "float32")
b = np.array(np.random.uniform(size=(1))[0], "float32")
c = np.array(np.random.uniform(size=(10,)), "float32")
@tvm.register_func("tvm.test_tensor_scalar_scale")
def my_scale(tensor, scalar, out):
out_np = tensor.numpy() * scalar.numpy()
tvm.nd.array(out_np).copyto(out)
A = te.placeholder(a.shape, name="A")
B = te.placeholder(b.shape, name="B")
C = te.extern(
a.shape,
[A, B],
lambda ins, outs: tvm.tir.call_packed(
"tvm.test_tensor_scalar_scale", ins[0], ins[1], outs[0]
),
name="C",
)
s = te.create_schedule(C.op)
f = tvm.build(s, [A, B, C], "llvm")
ta = tvm.nd.array(a)
tb = tvm.nd.array(b)
tc = tvm.nd.array(c)
f(ta, tb, tc)
tvm.testing.assert_allclose(a * b, tc.numpy())
def test_tensor_scalar():
# test te with scalar shape
a = np.array(np.random.uniform(size=(1))[0], "float32")
b = np.array(0.0, "float32")
@tvm.register_func("tvm.test_tensor_scalar_copy")
def mycopy(x, y):
x.copyto(y)
A = te.placeholder(a.shape, name="A")
B = te.extern(
a.shape,
[A],
lambda ins, outs: tvm.tir.call_packed("tvm.test_tensor_scalar_copy", ins[0], outs[0]),
name="B",
)
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm")
ta = tvm.nd.array(a)
tb = tvm.nd.array(b)
f(ta, tb)
tvm.testing.assert_allclose(ta.numpy(), tb.numpy())
if __name__ == "__main__":
test_tensor()
test_rank_zero()
test_conv1d()
test_tensor_slice()
test_tensor_reduce_multi_axis()
test_tensor_comm_reducer()
test_tensor_comm_reducer_overload()
test_tensor_reduce()
test_tensor_reduce_multiout_with_cond()
test_tensor_compute1()
test_tensor_compute2()
test_tensor_scan()
test_scan_multi_out()
test_extern()
test_extern_multi_out()
test_tuple_inputs()
test_tuple_with_different_deps()
test_tensor_inputs()
test_tensor_pool()
test_tensor_scalar_mixed()
test_tensor_scalar()
| 13,688 | 30.6875 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_auto_scheduler_sketch_generation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test sketch generation. """
import sys
import tvm
import tvm.testing
import pytest
from tvm import te, auto_scheduler
from tvm.auto_scheduler import _ffi_api
from tvm.auto_scheduler.loop_state import Stage
from tvm.testing.auto_scheduler import (
matmul_auto_scheduler_test,
double_matmul_auto_scheduler_test,
conv2d_nchw_bn_relu_auto_scheduler_test,
max_pool2d_auto_scheduler_test,
min_nm_auto_scheduler_test,
softmax_nm_auto_scheduler_test,
softmax_abcd_auto_scheduler_test,
conv2d_winograd_nhwc_auto_scheduler_test,
zero_rank_reduce_auto_scheduler_test,
)
def generate_sketches(
workload_func, args, target, print_for_debug=False, init_search_callbacks=None
):
# NOTE: test_cpu_matmul_sketch and test_cpu_max_pool2d_sketch assume 4 cores to trigger all
# possible sketch generations.
task = auto_scheduler.SearchTask(
func=workload_func,
args=args,
target=target,
hardware_params=auto_scheduler.HardwareParams(num_cores=4, target=target),
)
policy = auto_scheduler.SketchPolicy(
task, verbose=0, init_search_callbacks=init_search_callbacks
)
return policy.generate_sketches(print_for_debug)
def assert_compute_at_condition(stage, condition):
assert stage.compute_at == Stage.COMPUTE_AT_TRANS_TABLE[condition]
def assert_is_tiled(stage):
assert _ffi_api.SearchPolicyUtilsIsTiled(stage)
def assert_is_not_tiled(stage):
assert not _ffi_api.SearchPolicyUtilsIsTiled(stage)
def assert_has_cache_write(state, stage_id):
assert _ffi_api.SearchPolicyUtilsHasCacheWriteStage(state, stage_id)
def assert_has_cache_read(state, stage_id):
assert _ffi_api.SearchPolicyUtilsHasCacheReadStage(state, stage_id)
def assert_has_rfactor(state, stage_id):
assert _ffi_api.SearchPolicyUtilsHasRfactorStage(state, stage_id)
def assert_has_cross_thread_reduction(state, stage_id):
assert _ffi_api.SearchPolicyUtilsHasCrossThreadReduction(state, stage_id)
def test_cpu_matmul_sketch():
sketches = generate_sketches(matmul_auto_scheduler_test, (512, 512, 512), "llvm")
""" 3 multi-level tiling sketches
No.0 : Multi-level tiling
No.1 : Multi-level tiling with cache write on position 0
No.2 : Multi-level tiling with cache write on position 1
"""
assert len(sketches) == 3
# Sketch 0
assert_is_tiled(sketches[0].stages[2])
# Sketch 1
assert_is_tiled(sketches[1].stages[2])
assert_has_cache_write(sketches[1], 2)
assert_compute_at_condition(sketches[1].stages[2], "iter")
# Sketch 2
assert_is_tiled(sketches[2].stages[2])
assert_has_cache_write(sketches[2], 2)
assert_compute_at_condition(sketches[2].stages[2], "iter")
assert sketches[1] != sketches[2]
sketches = generate_sketches(matmul_auto_scheduler_test, (8, 8, 512), "llvm")
""" 2 rfactor sketches + 3 multi-level tiling sketches
No.0 : Rfactor with factor position 0
No.1 : Rfactor with factor position 1
No.2 : Multi-level tiling
No.3 : Multi-level tiling with cache write on position 0
No.4 : Multi-level tiling with cache write on position 1
"""
assert len(sketches) == 5
# Sketch 0
assert_has_rfactor(sketches[0], 2)
# Sketch 1
assert_has_rfactor(sketches[1], 2)
assert sketches[0] != sketches[1]
# Sketch 2
assert_is_tiled(sketches[2].stages[2])
# Sketch 3
assert_is_tiled(sketches[3].stages[2])
assert_has_cache_write(sketches[3], 2)
assert_compute_at_condition(sketches[3].stages[2], "iter")
# Sketch 4
assert_is_tiled(sketches[4].stages[2])
assert_has_cache_write(sketches[4], 2)
assert_compute_at_condition(sketches[4].stages[2], "iter")
assert sketches[3] != sketches[4]
sketches = generate_sketches(double_matmul_auto_scheduler_test, (512,), "llvm")
""" 3 multi-level tiling sketches for one matmul, so 3 * 3 = 9 sketches in total """
assert len(sketches) == 9
assert_is_tiled(sketches[8].stages[5])
def test_cpu_conv2d_bn_relu_sketch():
sketches = generate_sketches(
conv2d_nchw_bn_relu_auto_scheduler_test, (1, 56, 56, 512, 512, 3, 1, 1), "llvm"
)
""" 3 multi-level tiling sketches
No.0 : Conv2d multi-level tiling with fusion on position 0
No.1 : Conv2d multi-level tiling with fusion on position 1
No.2 : Conv2d multi-level tiling without fusion
"""
assert len(sketches) == 3
# Sketch 0
assert_is_not_tiled(sketches[0].stages[1])
assert_is_tiled(sketches[0].stages[3])
assert_compute_at_condition(sketches[0].stages[3], "iter")
assert_compute_at_condition(sketches[0].stages[5], "inlined")
assert_compute_at_condition(sketches[0].stages[7], "inlined")
assert_compute_at_condition(sketches[0].stages[9], "inlined")
assert_is_tiled(sketches[0].stages[10])
# Sketch 1
assert_is_not_tiled(sketches[1].stages[1])
assert_is_tiled(sketches[1].stages[3])
assert_compute_at_condition(sketches[1].stages[3], "iter")
assert_compute_at_condition(sketches[1].stages[5], "inlined")
assert_compute_at_condition(sketches[1].stages[7], "inlined")
assert_compute_at_condition(sketches[1].stages[9], "inlined")
assert_is_tiled(sketches[1].stages[10])
# Sketch 2
assert_is_not_tiled(sketches[2].stages[1])
assert_is_tiled(sketches[2].stages[3])
assert_compute_at_condition(sketches[2].stages[3], "root")
assert_compute_at_condition(sketches[2].stages[5], "inlined")
assert_compute_at_condition(sketches[2].stages[7], "inlined")
assert_compute_at_condition(sketches[2].stages[9], "inlined")
assert_is_not_tiled(sketches[2].stages[10])
def test_cpu_max_pool2d_sketch():
sketches = generate_sketches(max_pool2d_auto_scheduler_test, (1, 56, 56, 512, 1), "llvm")
""" 1 default sketch """
assert len(sketches) == 1
# Sketch 0
assert len(sketches[0].transform_steps) == 0
def test_cpu_min_sketch():
sketches = generate_sketches(min_nm_auto_scheduler_test, (10, 1024), "llvm")
""" 2 rfactor sketches + 1 default sketch
No.0 : Rfactor with factor position 0
No.1 : Rfactor with factor position 1
No.2 : Default sketch
"""
assert len(sketches) == 3
# Sketch 0
assert_has_rfactor(sketches[0], 1)
# Sketch 1
assert_has_rfactor(sketches[1], 1)
assert sketches[0] != sketches[1]
# Sketch 2
assert len(sketches[2].transform_steps) == 0
def test_cpu_softmax_sketch():
sketches = generate_sketches(softmax_nm_auto_scheduler_test, (1, 1024), "llvm")
""" (2 rfactor sketches + 1 default sketch) * (2 rfactor sketches + 1 default sketch) """
assert len(sketches) == (3 * 3)
for i in range(0, 3):
for j in range(0, 3):
sketch = sketches[i * 3 + j]
if j in [0, 1]:
assert_has_rfactor(sketch, 1)
if i in [0, 1]:
assert_has_rfactor(sketch, 4 if j in [0, 1] else 3)
assert len(sketches[8].transform_steps) == 0
sketches = generate_sketches(softmax_abcd_auto_scheduler_test, (1, 12, 128, 128), "llvm")
""" (2 rfactor sketches + 1 default sketch) * (2 rfactor sketches + 1 default sketch) """
assert len(sketches) == (3 * 3)
for i in range(0, 3):
for j in range(0, 3):
sketch = sketches[i * 3 + j]
if j in [0, 1]:
assert_has_rfactor(sketch, 1)
if i in [0, 1]:
assert_has_rfactor(sketch, 4 if j in [0, 1] else 3)
assert len(sketches[8].transform_steps) == 0
def test_cpu_conv2d_winograd_sketch():
sketches = generate_sketches(
conv2d_winograd_nhwc_auto_scheduler_test, (1, 28, 28, 128, 128, 3, 1, 1), "llvm"
)
""" 3 multi-level tiling sketches
No.0 : Bgemm multi-level tiling
No.1 : Bgemm multi-level tiling with cache write on position 0
No.2 : Bgemm multi-level tiling with cache write on position 1
"""
assert len(sketches) == 3
# Sketch 0
assert_is_not_tiled(sketches[0].stages[1])
assert_is_not_tiled(sketches[0].stages[2])
assert_compute_at_condition(sketches[0].stages[3], "inlined")
assert_is_tiled(sketches[0].stages[4])
assert_is_tiled(sketches[0].stages[6])
assert_compute_at_condition(sketches[0].stages[7], "inlined")
assert_is_tiled(sketches[0].stages[8])
assert_is_not_tiled(sketches[0].stages[9])
# Sketch 1
assert_is_not_tiled(sketches[1].stages[1])
assert_is_not_tiled(sketches[1].stages[2])
assert_compute_at_condition(sketches[1].stages[3], "inlined")
assert_is_tiled(sketches[1].stages[4])
assert_is_tiled(sketches[1].stages[6])
assert_has_cache_write(sketches[1], 6)
assert_compute_at_condition(sketches[1].stages[6], "iter")
assert_compute_at_condition(sketches[1].stages[8], "inlined")
assert_is_tiled(sketches[1].stages[9])
assert_is_not_tiled(sketches[1].stages[10])
# Sketch 2
assert_is_not_tiled(sketches[2].stages[1])
assert_is_not_tiled(sketches[2].stages[2])
assert_compute_at_condition(sketches[2].stages[3], "inlined")
assert_is_tiled(sketches[2].stages[4])
assert_is_tiled(sketches[2].stages[6])
assert_has_cache_write(sketches[2], 6)
assert_compute_at_condition(sketches[2].stages[6], "iter")
assert_compute_at_condition(sketches[2].stages[8], "inlined")
assert_is_tiled(sketches[2].stages[9])
assert_is_not_tiled(sketches[2].stages[10])
assert sketches[1] != sketches[2]
def test_cpu_zero_rank_sketch():
sketches = generate_sketches(zero_rank_reduce_auto_scheduler_test, (128,), "llvm")
""" 2 rfactor sketches + 1 multi-level tiling sketches """
assert len(sketches) == 3
def test_cpu_custom_sketch():
def meet_condition_func(search_policy, state, stage_id):
return auto_scheduler.PreloadCustomSketchRule.APPLY_AND_SKIP_REST
def apply_func(search_policy, state, stage_id):
ret = []
state = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
C = state.stage_ops[2]
ret.append([state.state_object, -1])
s1 = state.copy()
i, _, _ = s1[C].iters
s1.split(C, i, [8, 2])
ret.append([s1.state_object, -1])
return ret
sketches = generate_sketches(
matmul_auto_scheduler_test,
(512, 512, 512),
"llvm",
init_search_callbacks=[
auto_scheduler.PreloadCustomSketchRule(meet_condition_func, apply_func)
],
)
assert len(sketches) == 2
assert sketches[0].stages[2].iters[0].range.extent == 512
assert sketches[0].stages[2].iters[1].range.extent == 512
assert sketches[0].stages[2].iters[2].range.extent == 512
assert sketches[1].stages[2].iters[0].range.extent == 32
assert sketches[1].stages[2].iters[1].range.extent == 8
assert sketches[1].stages[2].iters[2].range.extent == 2
assert sketches[1].stages[2].iters[3].range.extent == 512
assert sketches[1].stages[2].iters[4].range.extent == 512
@tvm.testing.requires_cuda
def test_cuda_matmul_sketch():
sketches = generate_sketches(matmul_auto_scheduler_test, (512, 512, 512), "cuda")
""" 1 multi-level tiling sketch """
assert len(sketches) == 1
assert_has_cache_read(sketches[0], 0)
assert_compute_at_condition(sketches[0].stages[1], "iter")
assert_has_cache_read(sketches[0], 2)
assert_compute_at_condition(sketches[0].stages[3], "iter")
assert_has_cache_write(sketches[0], 4)
assert_is_tiled(sketches[0].stages[4])
assert_compute_at_condition(sketches[0].stages[4], "iter")
assert_is_tiled(sketches[0].stages[5])
sketches = generate_sketches(matmul_auto_scheduler_test, (8, 8, 1024), "cuda")
""" 1 cross thread reuction sketch + 1 multi-level tiling sketch """
assert len(sketches) == 2
# Sketch 0
assert_has_cross_thread_reduction(sketches[0], 2)
# Sketch 1
assert_has_cache_read(sketches[1], 0)
assert_compute_at_condition(sketches[1].stages[1], "iter")
assert_has_cache_read(sketches[1], 2)
assert_compute_at_condition(sketches[1].stages[3], "iter")
assert_has_cache_write(sketches[1], 4)
assert_is_tiled(sketches[1].stages[4])
assert_compute_at_condition(sketches[1].stages[4], "iter")
assert_is_tiled(sketches[1].stages[5])
sketches = generate_sketches(double_matmul_auto_scheduler_test, (512,), "cuda")
""" 1 multi-level tiling sketch for one matmul, so 1 x 1 = 1 sketch in total """
assert len(sketches) == 1
assert_compute_at_condition(sketches[0].stages[5], "root")
assert_compute_at_condition(sketches[0].stages[6], "iter")
@tvm.testing.requires_cuda
def test_cuda_conv2d_bn_relu_sketch():
sketches = generate_sketches(
conv2d_nchw_bn_relu_auto_scheduler_test, (1, 56, 56, 512, 512, 3, 1, 1), "cuda"
)
""" 1 multi-level tiling sketch """
assert len(sketches) == 1
assert_has_cache_read(sketches[0], 1)
assert_compute_at_condition(sketches[0].stages[1], "inlined")
assert_compute_at_condition(sketches[0].stages[2], "iter")
assert_has_cache_read(sketches[0], 3)
assert_compute_at_condition(sketches[0].stages[4], "iter")
assert_is_tiled(sketches[0].stages[5])
assert_compute_at_condition(sketches[0].stages[5], "iter")
assert_compute_at_condition(sketches[0].stages[7], "inlined")
assert_compute_at_condition(sketches[0].stages[9], "inlined")
assert_compute_at_condition(sketches[0].stages[11], "inlined")
assert_is_tiled(sketches[0].stages[12])
@tvm.testing.requires_cuda
def test_cuda_max_pool2d_sketch():
sketches = generate_sketches(max_pool2d_auto_scheduler_test, (1, 56, 56, 512, 0), "cuda")
""" 1 default sketch """
assert len(sketches) == 1
assert len(sketches[0].transform_steps) == 0
@tvm.testing.requires_cuda
def test_cuda_min_sketch():
sketches = generate_sketches(min_nm_auto_scheduler_test, (10, 1024), "cuda")
""" 1 cross thread reuction sketch + 1 default sketch """
assert len(sketches) == 2
# Sketch 0
assert_has_cross_thread_reduction(sketches[0], 1)
# Sketch 1
assert len(sketches[1].transform_steps) == 0
@tvm.testing.requires_cuda
def test_cuda_softmax_sketch():
sketches = generate_sketches(softmax_nm_auto_scheduler_test, (2, 1024), "cuda")
""" (1 cross thread reuction sketch + 1 default sketch) * (1 cross thread reuction sketch + 1 default sketch) """
assert len(sketches) == (2 * 2)
# Sketch 0
assert_has_cross_thread_reduction(sketches[0], 1)
assert_compute_at_condition(sketches[3].stages[2], "inlined")
assert_has_cross_thread_reduction(sketches[0], 3)
# Sketch 1
assert_compute_at_condition(sketches[3].stages[2], "inlined")
assert_has_cross_thread_reduction(sketches[1], 3)
# Sketch 2
assert_has_cross_thread_reduction(sketches[2], 1)
assert_compute_at_condition(sketches[3].stages[2], "inlined")
# Sketch 3
assert_compute_at_condition(sketches[3].stages[2], "inlined")
sketches = generate_sketches(softmax_abcd_auto_scheduler_test, (1, 12, 128, 128), "cuda")
""" (1 cross thread reuction sketch + 1 default sketch) * (1 cross thread reuction sketch + 1 default sketch) """
assert len(sketches) == (2 * 2)
# Sketch 0
assert_has_cross_thread_reduction(sketches[0], 1)
assert_compute_at_condition(sketches[3].stages[2], "inlined")
assert_has_cross_thread_reduction(sketches[0], 3)
# Sketch 1
assert_compute_at_condition(sketches[3].stages[2], "inlined")
assert_has_cross_thread_reduction(sketches[1], 3)
# Sketch 2
assert_has_cross_thread_reduction(sketches[2], 1)
assert_compute_at_condition(sketches[3].stages[2], "inlined")
# Sketch 3
assert_compute_at_condition(sketches[3].stages[2], "inlined")
@tvm.testing.requires_cuda
def test_cuda_conv2d_winograd_sketch():
sketches = generate_sketches(
conv2d_winograd_nhwc_auto_scheduler_test, (1, 28, 28, 128, 128, 3, 1, 1), "cuda"
)
""" 1 multi-level tiling sketch """
assert len(sketches) == 1
assert_compute_at_condition(sketches[0].stages[1], "inlined")
assert_compute_at_condition(sketches[0].stages[2], "iter")
assert_compute_at_condition(sketches[0].stages[3], "inlined")
assert_is_tiled(sketches[0].stages[4])
assert_has_cache_read(sketches[0], 4)
assert_compute_at_condition(sketches[0].stages[5], "iter")
assert_has_cache_read(sketches[0], 6)
assert_compute_at_condition(sketches[0].stages[7], "iter")
assert_is_tiled(sketches[0].stages[8])
assert_compute_at_condition(sketches[0].stages[8], "iter")
assert_has_cache_write(sketches[0], 8)
assert_compute_at_condition(sketches[0].stages[9], "root")
assert_is_tiled(sketches[0].stages[11])
assert_is_not_tiled(sketches[0].stages[12])
@tvm.testing.requires_cuda
def test_cuda_zero_rank_sketch():
sketches = generate_sketches(zero_rank_reduce_auto_scheduler_test, (128,), "cuda")
""" 1 cross thread reuction sketch + 1 multi-level tiling sketch """
assert len(sketches) == 2
if __name__ == "__main__":
tvm.testing.main()
| 17,926 | 38.4 | 117 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_error.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
# pylint: enable=no-member,invalid-name,unused-variable
def test_tir_schedule_error_detail():
sch = tir.Schedule(matmul, debug_mask="all", error_render_level="detail")
with pytest.raises(tir.ScheduleError) as excinfo:
sch.get_block("wrong_name")
(msg,) = excinfo.value.args
assert "Cannot find a block with the name: wrong_name" in msg
def test_tir_schedule_error_fast():
sch = tir.Schedule(matmul, debug_mask="all", error_render_level="fast")
with pytest.raises(tir.ScheduleError) as excinfo:
sch.get_block("wrong_name")
(msg,) = excinfo.value.args
assert "Cannot find a block with the specified name" in msg
def test_tir_schedule_error_none():
sch = tir.Schedule(matmul, debug_mask="all", error_render_level="none")
with pytest.raises(tir.ScheduleError) as excinfo:
sch.get_block("wrong_name")
(msg,) = excinfo.value.args
assert "(not rendered)" in msg
def test_tir_schedule_attribute_error():
sch = tir.Schedule(matmul)
with pytest.raises(AttributeError):
sch.non_existent_field()
if __name__ == "__main__":
tvm.testing.main()
| 2,679 | 32.924051 | 77 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
from tvm.tir import Schedule
from tvm.tir.schedule.transform import tile_with_tensor_intrin
from tvm.tir.tensor_intrin.x86 import VNNI_DOT_16x4_INTRIN, AVX512_DOT_16x4_INTRIN
@tvm.script.ir_module
class DenseTIRModule:
@T.prim_func
def main(
placeholder: T.Buffer((1024, 1024), "uint8"),
placeholder_1: T.Buffer((64, 256, 16, 4), "int8"),
compute: T.Buffer((1024, 1024), "int32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
for i0, i1, i2 in T.grid(1024, 1024, 1024):
with T.block("compute"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(placeholder[i, k], placeholder_1[j // 16, k // 4, j % 16, k % 4])
T.writes(compute[i, j])
with T.init():
compute[i, j] = 0
compute[i, j] = compute[i, j] + T.cast(placeholder[i, k], "int32") * T.cast(
placeholder_1[j // 16, k // 4, j % 16, k % 4], "int32"
)
@tvm.script.ir_module
class DenseTIRModuleTiled:
@T.prim_func
def main(
placeholder: T.Buffer((1024, 1024), "uint8"),
placeholder_1: T.Buffer((64, 256, 16, 4), "int8"),
compute: T.Buffer((1024, 1024), "int32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0, i1_0, i2_0, i1_1, i2_1 in T.grid(1024, 64, 256, 16, 4):
with T.block("compute"):
i = T.axis.spatial(1024, i0)
j = T.axis.spatial(1024, i1_0 * 16 + i1_1)
k = T.axis.reduce(1024, i2_0 * 4 + i2_1)
T.reads(placeholder[i, k], placeholder_1[j // 16, k // 4, j % 16, k % 4])
T.writes(compute[i, j])
with T.init():
compute[i, j] = 0
compute[i, j] = compute[i, j] + T.cast(placeholder[i, k], "int32") * T.cast(
placeholder_1[j // 16, k // 4, j % 16, k % 4], "int32"
)
@tvm.script.ir_module
class Conv2dNCHWcTIRModule:
@T.prim_func
def main(
placeholder: T.Buffer((1, 4, 56, 56, 16), "uint8"),
placeholder_1: T.Buffer((16, 4, 1, 1, 4, 16, 4), "int8"),
conv2d_NCHWc_int8: T.Buffer((1, 16, 56, 56, 16), "int32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1, i2, i3, i4, i5, i6, i7, i8, i9 in T.grid(1, 16, 56, 56, 16, 1, 1, 4, 4, 4):
with T.block("conv2d_NCHWc_int8"):
(
n,
oc_chunk,
oh,
ow,
oc_block,
kh,
kw,
ic_outer,
ic_f_inner,
ic_s_inner,
) = T.axis.remap("SSSSSRRRRR", [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9])
T.reads(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
with T.init():
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[
n, oc_chunk, oh, ow, oc_block
] + T.cast(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
"int32",
) * T.cast(
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
"int32",
)
@tvm.script.ir_module
class Conv2dNCHWcTIRModuleTiled:
@T.prim_func
def main(
placeholder: T.Buffer((1, 4, 56, 56, 16), "uint8"),
placeholder_1: T.Buffer((16, 4, 1, 1, 4, 16, 4), "int8"),
conv2d_NCHWc_int8: T.Buffer((1, 16, 56, 56, 16), "int32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0, i1, i2, i3, i4_0, i5, i6, i7, i8, i9_0, i4_1, i9_1 in T.grid(
1, 16, 56, 56, 1, 1, 1, 4, 4, 1, 16, 4
):
with T.block("conv2d_NCHWc_int8"):
n, oc_chunk, oh, ow = T.axis.remap("SSSS", [i0, i1, i2, i3])
oc_block = T.axis.spatial(16, i4_0 * 16 + i4_1)
kh, kw, ic_outer, ic_f_inner = T.axis.remap("RRRR", [i5, i6, i7, i8])
ic_s_inner = T.axis.reduce(4, i9_0 * 4 + i9_1)
T.reads(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
)
T.writes(conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block])
with T.init():
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = 0
conv2d_NCHWc_int8[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc_int8[
n, oc_chunk, oh, ow, oc_block
] + T.cast(
placeholder[n, ic_outer, oh + kh, ow + kw, ic_f_inner * 4 + ic_s_inner],
"int32",
) * T.cast(
placeholder_1[oc_chunk, ic_outer, kh, kw, ic_f_inner, oc_block, ic_s_inner],
"int32",
)
def test_tile_with_tensor_intrin_dense(intrin=VNNI_DOT_16x4_INTRIN):
s = Schedule(DenseTIRModule)
block = s.get_block("compute")
tiled_loop = tile_with_tensor_intrin(s, block, intrin)
_, _, _, i1_1, _ = s.get_loops(block)
assert s.get(tiled_loop) == s.get(i1_1)
tvm.ir.assert_structural_equal(s.mod, DenseTIRModuleTiled)
def test_tile_with_tensor_intrin_conv2d_nchwc(intrin=VNNI_DOT_16x4_INTRIN):
s = Schedule(Conv2dNCHWcTIRModule)
block = s.get_block("conv2d_NCHWc_int8")
tiled_loop = tile_with_tensor_intrin(s, block, intrin)
tiled_loops = s.get_loops(block)
assert len(tiled_loops) == 12
assert s.get(tiled_loop) == s.get(tiled_loops[-2])
tvm.ir.assert_structural_equal(s.mod, Conv2dNCHWcTIRModuleTiled)
if __name__ == "__main__":
test_tile_with_tensor_intrin_dense()
test_tile_with_tensor_intrin_dense(AVX512_DOT_16x4_INTRIN)
test_tile_with_tensor_intrin_conv2d_nchwc()
test_tile_with_tensor_intrin_conv2d_nchwc(AVX512_DOT_16x4_INTRIN)
| 7,588 | 41.161111 | 96 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_merge_dynamic_shared_memory_allocations.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm.driver.build_module import schedule_to_module
from tvm.topi.math import cast
from tvm.script import tir as T
def run_passes(sch, args):
mod = schedule_to_module(sch, args)
return tvm.transform.Sequential(
[
tvm.tir.transform.StorageFlatten(64),
tvm.tir.transform.Simplify(),
tvm.tir.transform.VectorizeLoop(),
tvm.tir.transform.StorageRewrite(),
tvm.tir.transform.MergeDynamicSharedMemoryAllocations(),
]
)(mod)
def verify_single_allocation(stmt, alloc_size=None):
num_alloc = [0]
alloc_extents = []
def verify(n):
if (
isinstance(n, tvm.tir.Allocate)
and n.buffer_var.type_annotation.storage_scope == "shared.dyn"
):
num_alloc[0] += 1
alloc_extents.append(n.extents[0])
tvm.tir.stmt_functor.post_order_visit(stmt, verify)
assert num_alloc[0] == 1
if alloc_size:
assert alloc_extents[0] == alloc_size
@tvm.testing.requires_gpu
def test_matmul_dyn_shared():
n = 1024
block = 16
A = te.placeholder((n, n), name="A", dtype="float16")
B = te.placeholder((n, n), name="B", dtype="float16")
def syncthread():
return tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"]))
def test_matmul_ir(A, B, C):
ib = tvm.tir.ir_builder.create()
tx = te.thread_axis("threadIdx.x")
ty = te.thread_axis("threadIdx.y")
bx = te.thread_axis("blockIdx.x")
by = te.thread_axis("blockIdx.y")
ib.scope_attr(tx, "thread_extent", block)
ib.scope_attr(ty, "thread_extent", block)
ib.scope_attr(bx, "thread_extent", n // block)
ib.scope_attr(by, "thread_extent", n // block)
A_sh = ib.allocate(A.dtype, (block, block), scope="shared.dyn", name="A_sh") # fp16
B_sh = ib.allocate(B.dtype, (block, block), scope="shared.dyn", name="B_sh") # fp16
# Create a dynamic shared memory for the accumulation.
# This is for testing merging dynamic shared memory alloctions with different data type.
# In practice, there is no need to allocate a shared memory for C.
C_local = ib.allocate(C.dtype, (1,), scope="local", name="C_local")
C_sh = ib.allocate(C.dtype, (block, block), scope="shared.dyn", name="C_sh") # fp32
A_ptr = ib.buffer_ptr(A)
B_ptr = ib.buffer_ptr(B)
C_ptr = ib.buffer_ptr(C)
C_local[0] = 0.0
with ib.for_range(0, n // block, name="i") as i:
A_sh[ty, tx] = A_ptr[by * block + ty, i * block + tx]
B_sh[ty, tx] = B_ptr[i * block + ty, bx * block + tx]
ib.emit(syncthread())
with ib.for_range(0, block, name="k") as k:
C_local[0] += cast(A_sh[ty, k] * B_sh[k, tx], "float32")
ib.emit(syncthread())
C_sh[ty, tx] = C_local[0]
C_ptr[by * block + ty, bx * block + tx] = C_sh[ty, tx]
return ib.get()
C = te.extern(
A.shape,
[A, B],
lambda ins, outs: test_matmul_ir(ins[0], ins[1], outs[0]),
name="matmul",
dtype="float32",
)
s = te.create_schedule(C.op)
mod = run_passes(s, [A, B, C])
# C can be allocated at the start of A, so we only need to allocate 2 block * block memory with dtype = float16
expected_alloc_size = block * block * 4
verify_single_allocation(mod["main"].body, expected_alloc_size)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
fmatmul = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
size = (n, n)
a_np = np.random.uniform(size=size).astype(A.dtype)
b_np = np.random.uniform(size=size).astype(B.dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(size, dtype=C.dtype), dev)
fmatmul(a, b, c)
np_ref = np.dot(a_np.astype("float32"), b_np.astype("float32"))
tvm.testing.assert_allclose(c.numpy(), np_ref, 1e-4, 1e-4)
for target in ["cuda", "nvptx"]:
check_target(target)
@tvm.testing.requires_gpu
def test_dyn_shared_vectorized_store():
"""Test vectorized store into dynamic shared memory"""
n = te.size_var("n")
A = te.placeholder((n,), name="A", dtype="float16")
B = te.placeholder((n,), name="B", dtype="float32")
def test_device_ir(A, B, C):
n = A.shape[0]
ib = tvm.tir.ir_builder.create()
values_per_thread = 4
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", tvm.tir.indexdiv(n, values_per_thread))
A_sh = ib.allocate(A.dtype, (n,), scope="shared.dyn") # fp16
B_sh = ib.allocate(B.dtype, (n,), scope="shared.dyn") # fp32
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
with ib.for_range(0, values_per_thread, kind="vectorize") as i:
A_sh[tx * values_per_thread + i] = Aptr[tx * values_per_thread + i]
B_sh[tx * values_per_thread + i] = Bptr[tx * values_per_thread + i]
with ib.for_range(0, values_per_thread) as i:
Cptr[tx * values_per_thread + i] = (
cast(A_sh[tx * values_per_thread + i], "float32") + B_sh[tx * values_per_thread + i]
)
return ib.get()
C = te.extern(
(n,),
[A, B],
lambda ins, outs: test_device_ir(ins[0], ins[1], outs[0]),
name="vadd",
dtype="float32",
)
s = te.create_schedule(C.op)
mod = run_passes(s, [A, B, C])
verify_single_allocation(mod["main"].body)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
fadd = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
for n in [512, 1024]:
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((n,), dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), a.numpy().astype("float32") + b.numpy(), 1e-4, 1e-4
)
for target in ["cuda", "nvptx"]:
check_target(target)
@tvm.testing.requires_gpu
def test_dyn_shared_reuse_and_merge():
n = 64
A = te.placeholder((n,), name="A", dtype="float32")
B = te.placeholder((n,), name="B", dtype="float32")
C = te.placeholder((te.size_var("n_dyn"),), name="C", dtype="float32")
def test_device_ir(A, B, C, D):
ib = tvm.tir.ir_builder.create()
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", n)
A_sh = ib.allocate(A.dtype, (n,), scope="shared.dyn", name="A_sh")
B_sh = ib.allocate(B.dtype, (n,), scope="shared.dyn", name="B_sh")
C_sh = ib.allocate(C.dtype, (C.shape[0],), scope="shared.dyn", name="C_sh")
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
Dptr = ib.buffer_ptr(D)
A_sh[tx] = Aptr[tx]
Dptr[tx] = A_sh[tx]
B_sh[tx] = Bptr[tx]
Dptr[tx] += B_sh[tx]
C_sh[tx] = Cptr[tx] # C cannot reuse other buffers since it size is dynamic
Dptr[tx] += C_sh[tx]
return ib.get()
D = te.extern(
(n,),
[A, B, C],
lambda ins, outs: test_device_ir(ins[0], ins[1], ins[2], outs[0]),
name="vadd",
dtype="float32",
)
s = te.create_schedule(D.op)
mod = run_passes(s, [A, B, C, D])
# merged allocation
# allocate(buf_dyn_shmem: Pointer(shared.dyn uint8), uint8, [((n_dyn*4) + 256)]);
verify_single_allocation(mod["main"].body)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
fadd = tvm.build(s, [A, B, C, D], target)
dev = tvm.device(target, 0)
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.random.uniform(size=n).astype(C.dtype), dev)
d = tvm.nd.array(np.zeros((n,), dtype=D.dtype), dev)
fadd(a, b, c, d)
tvm.testing.assert_allclose(d.numpy(), a.numpy() + b.numpy() + c.numpy(), 1e-4, 1e-4)
for target in ["cuda", "nvptx"]:
check_target(target)
def test_dyn_shared_more_dtype():
"""Test vectorized store into dynamic shared memory"""
n = 512
A = te.placeholder((n,), name="A", dtype="int8")
B = te.placeholder((n,), name="B", dtype="int16")
def test_device_ir(A, B, C):
n = A.shape[0]
ib = tvm.tir.ir_builder.create()
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", n)
A_sh = ib.allocate(A.dtype, (n,), scope="shared.dyn") # i8
B_sh = ib.allocate(B.dtype, (n,), scope="shared.dyn") # i16
C_sh = ib.allocate(C.dtype, (n,), scope="shared.dyn") # i32
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
A_sh[tx] = Aptr[tx]
B_sh[tx] = Bptr[tx]
C_sh[tx] = cast(A_sh[tx], "int32") + cast(B_sh[tx], "int32")
Cptr[tx] = C_sh[tx]
return ib.get()
C = te.extern(
(n,),
[A, B],
lambda ins, outs: test_device_ir(ins[0], ins[1], outs[0]),
name="vadd",
dtype="int32",
)
s = te.create_schedule(C.op)
mod = run_passes(s, [A, B, C])
verify_single_allocation(mod["main"].body, n * 4)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
fadd = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((n,), dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy().astype("float32") + b.numpy(), 1e-4, 1e-4)
for target in ["cuda", "nvptx"]:
check_target(target)
class TestMatmul(tvm.testing.CompareBeforeAfter):
"""Shared allocations should be merged, preserving DeclBuffer if present
This test uses a matmul PrimFunc adapted from from
test_matmul_dyn_shared, using either `T.Buffer` (Allocate without
DeclBuffer) or `T.decl_buffer` (Allocate followed by DeclBuffer)
for the replaced allocations.
"""
transform = tvm.tir.transform.MergeDynamicSharedMemoryAllocations()
use_decl_buffer = tvm.testing.parameter(by_dict={"t_buffer": False, "decl_buffer": True})
@tvm.testing.fixture
def buffer_func(self, use_decl_buffer):
if use_decl_buffer:
return T.decl_buffer
else:
return T.Buffer
@tvm.testing.fixture
def before(self, buffer_func):
@T.prim_func
def func(
A: T.Buffer((1024, 1024), "float16"),
B: T.Buffer((1024, 1024), "float16"),
matmul: T.Buffer((1024, 1024), "float32"),
):
A_flat = T.Buffer(1048576, "float16", data=A.data)
B_flat = T.Buffer(1048576, "float16", data=B.data)
matmul_flat = T.Buffer(1048576, data=matmul.data)
threadIdx_x = T.launch_thread("threadIdx.x", 16)
C_local_data = T.allocate([1], "float32", "local")
C_local = T.Buffer(1, data=C_local_data, scope="local")
A_sh_data = T.allocate([256], "float16", "shared.dyn")
A_sh = buffer_func(256, "float16", data=A_sh_data, scope="shared.dyn")
B_sh_data = T.allocate([256], "float16", "shared.dyn")
B_sh = buffer_func(256, "float16", data=B_sh_data, scope="shared.dyn")
C_sh_data = T.allocate([256], "float32", "shared.dyn")
C_sh = buffer_func(256, "float32", data=C_sh_data, scope="shared.dyn")
threadIdx_y = T.launch_thread("threadIdx.y", 16)
blockIdx_x = T.launch_thread("blockIdx.x", 64)
blockIdx_y = T.launch_thread("blockIdx.y", 64)
C_local[0] = T.float32(0)
for i in range(64):
A_sh[threadIdx_y * 16 + threadIdx_x] = A_flat[
blockIdx_y * 16384 + threadIdx_y * 1024 + i * 16 + threadIdx_x
]
B_sh[threadIdx_y * 16 + threadIdx_x] = B_flat[
i * 16384 + threadIdx_y * 1024 + blockIdx_x * 16 + threadIdx_x
]
T.tvm_storage_sync("shared")
for k in range(16):
C_local[0] = C_local[0] + T.Cast(
"float32",
A_sh[threadIdx_y * 16 + k] * B_sh[k * 16 + threadIdx_x],
)
T.tvm_storage_sync("shared")
C_sh[threadIdx_y * 16 + threadIdx_x] = C_local[0]
T.tvm_storage_sync("shared.dyn")
matmul_flat[
blockIdx_y * 16384 + threadIdx_y * 1024 + blockIdx_x * 16 + threadIdx_x
] = C_sh[threadIdx_y * 16 + threadIdx_x]
return func
@tvm.testing.fixture
def expected(self, buffer_func):
@T.prim_func
def func(
A: T.Buffer((1024, 1024), "float16"),
B: T.Buffer((1024, 1024), "float16"),
matmul: T.Buffer((1024, 1024), "float32"),
):
A_flat = T.Buffer(1048576, "float16", data=A.data)
B_flat = T.Buffer(1048576, "float16", data=B.data)
matmul_flat = T.Buffer(1048576, data=matmul.data)
threadIdx_x = T.launch_thread("threadIdx.x", 16)
buf_dyn_shmem = T.allocate([1024], "uint8", "shared.dyn")
C_local_data = T.allocate([1], "float32", "local")
C_local = T.Buffer(1, data=C_local_data, scope="local")
A_sh = buffer_func(256, "float16", data=buf_dyn_shmem, scope="shared.dyn")
B_sh = buffer_func(256, "float16", data=buf_dyn_shmem, scope="shared.dyn")
C_sh = buffer_func(256, "float32", data=buf_dyn_shmem, scope="shared.dyn")
threadIdx_y = T.launch_thread("threadIdx.y", 16)
blockIdx_x = T.launch_thread("blockIdx.x", 64)
blockIdx_y = T.launch_thread("blockIdx.y", 64)
C_local[0] = T.float32(0)
for i in range(64):
A_sh[threadIdx_y * 16 + threadIdx_x + 256] = A_flat[
blockIdx_y * 16384 + threadIdx_y * 1024 + i * 16 + threadIdx_x
]
B_sh[threadIdx_y * 16 + threadIdx_x] = B_flat[
i * 16384 + threadIdx_y * 1024 + blockIdx_x * 16 + threadIdx_x
]
T.tvm_storage_sync("shared")
for k in range(16):
C_local[0] = C_local[0] + T.Cast(
"float32",
A_sh[threadIdx_y * 16 + k + 256] * B_sh[k * 16 + threadIdx_x],
)
T.tvm_storage_sync("shared")
C_sh[threadIdx_y * 16 + threadIdx_x] = C_local[0]
T.tvm_storage_sync("shared.dyn")
matmul_flat[
blockIdx_y * 16384 + threadIdx_y * 1024 + blockIdx_x * 16 + threadIdx_x
] = C_sh[threadIdx_y * 16 + threadIdx_x]
return func
if __name__ == "__main__":
tvm.testing.main()
| 16,314 | 34.544662 | 115 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_module_based_interface.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import os
from tvm import relay, runtime
from tvm.relay import testing
import tvm
from tvm.contrib import graph_executor
from tvm.contrib.debugger import debug_executor
from tvm.contrib.cuda_graph import cuda_graph_executor
import tvm.testing
def input_shape(mod):
return [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
def verify(data):
if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
graph, lib, graph_params = relay.build_module.build(mod, "llvm", params=params)
dev = tvm.cpu()
module = graph_executor.create(graph, lib, dev)
module.set_input("data", data)
module.set_input(**graph_params)
module.run()
out = module.get_output(0).numpy()
return out
@tvm.testing.requires_llvm
def test_legacy_compatibility():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
graph, lib, graph_params = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cpu()
module = graph_executor.create(graph, lib, dev)
module.set_input("data", data)
module.set_input(**graph_params)
module.run()
out = module.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def test_cpu():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
# raw api
dev = tvm.cpu()
gmod = complied_graph_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(complied_graph_lib["default"](dev))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def test_cpu_get_graph_json():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
file_name = "deploy_lib.so"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
json = loaded_lib["get_graph_json"]()
assert isinstance(json, str) == True
assert json.find("tvmgen_default_fused_nn_softmax_add") > -1
@tvm.testing.requires_llvm
def test_cpu_get_graph_params_run():
mod, params = relay.testing.synthetic.get_workload()
with tvm.transform.PassContext(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cpu()
from tvm.contrib import utils
temp = utils.tempdir()
file_name = "deploy_lib.so"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_params = loaded_lib["get_graph_params"]()
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
gmod.set_input(key="data", value=data, **loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def test_cpu_get_graph_params_compare():
# Create sample net
from tvm.relay.testing.init import create_workload, Constant
inp_shape = (1, 3, 24, 12)
dtype = "float32"
data = relay.var("data", shape=inp_shape, dtype=dtype)
conv_shape = [inp_shape[1], inp_shape[1], 3, 3]
conv = relay.nn.conv2d(
data,
relay.var("conv_weight", shape=conv_shape, dtype=dtype),
padding=1,
kernel_size=3,
)
args = relay.analysis.free_vars(conv)
func = relay.Function(args, conv)
mod, params = create_workload(func, initializer=Constant())
with tvm.transform.PassContext(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
file_name = "deploy_lib.so"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_params = loaded_lib["get_graph_params"]()
tvm.testing.assert_allclose(
params["conv_weight"].numpy(), loaded_params["p0"].numpy()[0][0], atol=1e-5
)
@tvm.testing.requires_cuda
@tvm.testing.requires_gpu
def test_gpu():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cuda()
# raw api
gmod = complied_graph_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(complied_graph_lib["default"](dev))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.uses_gpu
def test_mod_export():
def verify_cpu_export(obj_format):
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
# run the setup in a separate function, so the load_lib
# can get destructed right away
# test the robustness wrt to parent module destruction
def setup_gmod():
loaded_lib = tvm.runtime.load_module(path_lib)
dev = tvm.cpu(0)
return loaded_lib["default"](dev)
gmod = setup_gmod()
# raw api
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(setup_gmod())
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_gpu_export(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
# run the setup in a separate function, so the load_lib
# can get destructed right away
# test the robustness wrt to parent module destruction
def setup_gmod():
loaded_lib = tvm.runtime.load_module(path_lib)
dev = tvm.cuda()
return loaded_lib["default"](dev)
gmod = setup_gmod()
# raw api
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(setup_gmod())
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def verify_rpc_cpu_export(obj_format):
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
from tvm import rpc
remote = rpc.LocalSession()
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = remote.cpu()
# raw api
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data, device=dev))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_rpc_gpu_export(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
from tvm import rpc
def check_remote(server):
remote = rpc.connect(server.host, server.port)
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = remote.cuda()
# raw api
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data, device=dev))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
check_remote(rpc.Server("127.0.0.1"))
for obj_format in [".so", ".tar"]:
verify_cpu_export(obj_format)
verify_gpu_export(obj_format)
verify_rpc_cpu_export(obj_format)
verify_rpc_gpu_export(obj_format)
@tvm.testing.requires_llvm
@tvm.testing.uses_gpu
def test_remove_package_params():
def verify_cpu_remove_package_params(obj_format):
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
with open(temp.relpath("deploy_param.params"), "wb") as fo:
fo.write(runtime.save_param_dict(complied_graph_lib.get_params()))
loaded_lib = tvm.runtime.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cpu(0)
# raw api
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
set_input("data", tvm.nd.array(data))
load_params(loaded_params)
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_gpu_remove_package_params(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
with open(temp.relpath("deploy_param.params"), "wb") as fo:
fo.write(runtime.save_param_dict(complied_graph_lib.get_params()))
loaded_lib = tvm.runtime.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cuda(0)
# raw api
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
set_input("data", tvm.nd.array(data))
load_params(loaded_params)
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def verify_rpc_cpu_remove_package_params(obj_format):
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
path_params = temp.relpath("deploy_param.params")
with open(path_params, "wb") as fo:
fo.write(runtime.save_param_dict(complied_graph_lib.get_params()))
from tvm import rpc
remote = rpc.LocalSession()
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = remote.cpu()
# raw api
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(path_params, "rb").read())
set_input("data", tvm.nd.array(data, device=dev))
load_params(loaded_params)
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
loaded_params = bytearray(open(path_params, "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_rpc_gpu_remove_package_params(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
path_params = temp.relpath("deploy_param.params")
with open(path_params, "wb") as fo:
fo.write(runtime.save_param_dict(complied_graph_lib.get_params()))
from tvm import rpc
remote = rpc.LocalSession()
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = remote.cuda()
# raw api
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(path_params, "rb").read())
set_input("data", tvm.nd.array(data, device=dev))
load_params(loaded_params)
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
loaded_params = bytearray(open(path_params, "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
for obj_format in [".so", ".tar"]:
verify_cpu_remove_package_params(obj_format)
verify_gpu_remove_package_params(obj_format)
verify_rpc_cpu_remove_package_params(obj_format)
verify_rpc_gpu_remove_package_params(obj_format)
@tvm.testing.requires_llvm
def test_debug_graph_executor():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
# raw api
dev = tvm.cpu()
try:
gmod = complied_graph_lib["debug_create"]("default", dev)
except:
print("Skip because debug graph_executor not enabled")
return
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# debug graph executor wrapper
debug_g_mod = debug_executor.GraphModuleDebug(
complied_graph_lib["debug_create"]("default", dev),
[dev],
complied_graph_lib.get_graph_json(),
None,
)
debug_g_mod.set_input("data", data)
debug_g_mod.run()
out = debug_g_mod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_cudagraph
def test_cuda_graph_executor():
mod, params = relay.testing.synthetic.get_workload()
with tvm.transform.PassContext(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cuda()
try:
gmod = complied_graph_lib["cuda_graph_create"](dev)
except:
print("Skip because cuda_graph not enabled")
return
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# cuda graph executor wrapper
cu_gmod = cuda_graph_executor.GraphModuleCudaGraph(gmod)
cu_gmod.set_input("data", data)
cu_gmod.run()
out = cu_gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def test_multiple_imported_modules():
def make_func(symbol):
n = tvm.te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype="float32")
i = tvm.te.var("i")
stmt = tvm.tir.For(
i,
0,
n - 1,
tvm.tir.ForKind.SERIAL,
tvm.tir.BufferStore(Ab, tvm.tir.BufferLoad(Ab, [i]) + 1, [i + 1]),
)
return tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", symbol)
def make_module(mod):
mod = tvm.IRModule(mod)
mod = tvm.driver.build(mod, target="llvm")
return mod
module_main = make_module({"main": make_func("main")})
module_a = make_module({"func_a": make_func("func_a")})
module_b = make_module({"func_b": make_func("func_b")})
module_main.import_module(module_a)
module_main.import_module(module_b)
module_main.get_function("func_a", query_imports=True)
module_main.get_function("func_b", query_imports=True)
def test_num_threads():
reported = tvm.runtime.num_threads()
env_threads = os.getenv("TVM_NUM_THREADS")
omp_env_threads = os.getenv("OMP_NUM_THREADS")
if env_threads is not None:
assert reported == env_threads
elif omp_env_threads is not None:
assert reported == omp_env_threads
else:
hardware_threads = os.cpu_count()
assert reported == hardware_threads or reported == hardware_threads // 2
@tvm.testing.requires_llvm
@tvm.testing.requires_package("torch")
def test_graph_module_zero_copy():
mod = tvm.IRModule()
params = {}
dev = tvm.cpu()
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1, 10))
z = relay.add(x, y)
mod["main"] = relay.Function([x, y], z)
# need torch to do the from_dlpack trick
import torch
compiled_graph_lib = relay.build(mod, target="llvm", params=params)
gm = graph_executor.GraphModule(compiled_graph_lib["default"](dev))
x_data = torch.rand((1, 10))
y_data = torch.rand((1, 10))
z_data = torch.rand((1, 10))
z_torch = x_data + y_data
# zero copy run
assert not np.allclose(z_data.numpy(), z_torch.numpy())
gm.set_input_zero_copy("x", tvm.nd.from_dlpack(x_data))
gm.set_input_zero_copy("y", tvm.nd.from_dlpack(y_data))
gm.set_output_zero_copy(0, tvm.nd.from_dlpack(z_data))
gm.run()
tvm.testing.assert_allclose(z_data.numpy(), z_torch.numpy())
# zero input copy with params
gm = graph_executor.GraphModule(compiled_graph_lib["default"](dev))
gm.set_input_zero_copy(x=tvm.nd.from_dlpack(x_data), y=tvm.nd.from_dlpack(y_data))
gm.run()
tvm.testing.assert_allclose(gm.get_output(0).numpy(), z_torch.numpy())
if __name__ == "__main__":
test_legacy_compatibility()
test_cpu()
test_gpu()
test_mod_export()
test_remove_package_params()
test_debug_graph_executor()
test_multiple_imported_modules()
test_cpu_get_graph_json()
test_cpu_get_graph_params_run()
test_cpu_get_graph_params_compare()
test_graph_module_zero_copy()
| 26,827 | 35.205128 | 89 | py |
tvm | tvm-main/tests/python/unittest/test_auto_scheduler_feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test feature extraction"""
import math
import tempfile
import tvm
from tvm import te, auto_scheduler, relay
from tvm.script import tir as T
from tvm.testing.auto_scheduler import matmul_auto_scheduler_test
def fequal(a, b):
return math.fabs(a - b) < 1e-6
def test_cpu_matmul():
dag = auto_scheduler.ComputeDAG(matmul_auto_scheduler_test(512, 512, 512))
s = dag.get_init_state()
C = s.stage_ops[2]
i, j, k = s[C].iters
io, ii = s.split(C, i, [16])
jo, ji = s.split(C, j, [8])
s.reorder(C, [io, jo, k, ji, ii])
s.vectorize(C, ji)
s.parallel(C, io)
s.parallel(C, jo)
s.unroll(C, k)
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key="test", target=target)
names = auto_scheduler.feature.get_per_store_feature_names()
fea = auto_scheduler.feature.get_per_store_features_from_states([s], task)[0]
stage_0 = fea[0]
assert len(stage_0) == len(names), "%d vs %d" % (len(stage_0), len(names))
fea_dict = {}
for name, value in zip(names, stage_0):
fea_dict[name] = value
for name in ["B0", "B1", "B2"]:
if fequal(fea_dict[name + ".acc_type.kReadWrite"], 1.0):
c_name = name
if fequal(fea_dict[name + ".acc_type.kRead"], 1.0):
if fequal(fea_dict[name + ".stride"], 0.0):
b_name = name
else:
a_name = name
"""
lowered IR:
Placeholder: A, B
parallel i.0 (0,32)
parallel j.0 (0,64)
unroll k (0,512)
vectorize j.1 (0,8)
for i.1 (0,16)
C...] = A[...] * B[...]
"""
# check touched memory in bytes, touched unique memory in bytes, reuse distance, etc.
assert fequal(fea_dict[c_name + ".bytes"], math.log2(512**3 * 4 + 1))
assert fequal(fea_dict[b_name + ".unique_bytes"], math.log2(512**2 * 4 + 1))
assert fequal(fea_dict[c_name + ".reuse_dis_iter"], math.log2(8 * 16 + 1))
assert fequal(fea_dict[c_name + ".reuse_dis_bytes"], math.log2((8 * 16 + 8 + 16) * 4 + 1))
assert fequal(fea_dict[c_name + ".reuse_ct"], math.log2(512 + 1))
# check annotations
assert fequal(fea_dict["unroll_num"], math.log2(1 + 1))
# assert fequal(fea_dict["unroll_type.kPosInnerReduce"], 1.0)
assert fequal(fea_dict["vec_num"], math.log2(1 + 1))
assert fequal(fea_dict["parallel_num"], math.log2(2 + 1))
assert fequal(fea_dict["parallel_prod"], math.log2((512 * 512 / 16 / 8) + 1))
def test_cpu_fusion():
def fusion_test(N, M):
A = te.placeholder((N, M), name="A")
B = te.compute((N, M), lambda i, j: A[i][j], name="B")
C = te.compute((N, M), lambda i, j: B[i][j], name="C")
return [A, B, C]
dag = auto_scheduler.ComputeDAG(fusion_test(64, 32))
s = dag.get_init_state()
s.compute_at(1, 2, s.stages[2].iters[1])
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key="test", target=target)
names = auto_scheduler.feature.get_per_store_feature_names()
fea = auto_scheduler.feature.get_per_store_features_from_states([s], task)[0]
"""
lowered IR:
Placeholder: A
for i (0,64)
for j (0,32)
for ii (1)
for jj (1)
B[...] = A[...]
C[...] = B[...]
"""
# check reuse distance and reuse type after fusion
found = False
for stage_fea in fea:
for i, (name, value) in enumerate(zip(names, stage_fea)):
if "reuse_type.kSerialMultipleReadWrite" in name and value > 0.5:
# reuse distance in #iter
assert fequal(stage_fea[i + 2], 1.0)
# reuse distance in bytes
assert fequal(stage_fea[i + 3], math.log2(16 + 1))
found = True
assert found
def test_gpu_feature():
# Use records to build a complicated GPU program
json_records = "\n".join(
(
"""{"i": [["[\\"matmul_auto_scheduler_test\\", 512, 512, 512]", "cuda"], [[], [["CHW", 2, "local"], ["SP", 2, 0, 512, [1, 16, 32, 1], 1], ["SP", 2, 5, 512, [4, 1, 1, 16], 1], ["SP", 2, 10, 512, [1, 2], 1], ["RE", 2, [0, 5, 1, 6, 2, 7, 10, 11, 3, 8, 12, 4, 9]], ["FSP", 3, 0, 1, 3], ["FSP", 3, 4, 2, 3], ["RE", 3, [0, 4, 1, 5, 2, 6, 3, 7]], ["FU", 2, [0, 1]], ["FU", 3, [0, 1]], ["FU", 2, [1, 2]], ["FU", 3, [1, 2]], ["FU", 2, [2, 3]], ["FU", 3, [2, 3]], ["CA", 2, 3, 2], ["CHR", 1, "shared", [2]], ["CA", 2, 3, 3], ["FU", 2, [0, 1]], ["FFSP", 2, 0, [1, 2], 1, 1], ["AN", 2, 1, 6], ["CHR", 0, "shared", [3]], ["CA", 1, 4, 3], ["FU", 1, [0, 1]], ["FFSP", 1, 0, [1, 2], 1, 1], ["AN", 1, 1, 6], ["AN", 5, 0, 5], ["AN", 5, 1, 4], ["AN", 5, 2, 6], ["PR", 4, 0, "auto_unroll_max_step$1024"]]]], "r": [[0.00536798], 0, 2.49277, 1585564852], "v": "v0.1"}""",
)
)
# load states
with tempfile.NamedTemporaryFile(mode="w") as f:
f.write(json_records)
f.flush()
inputs, _ = auto_scheduler.RecordReader(f.name).read_lines()
inp = inputs[0]
task = auto_scheduler.SearchTask(
workload_key=inp.task.workload_key,
target=inp.task.target,
hardware_params=auto_scheduler.HardwareParams(
100000, 16, 64, 1 << 30, 1 << 30, 1 << 30, 1 << 30, 1 << 30
),
)
state = task.compute_dag.infer_bound_from_state(inputs[0].state)
fea = auto_scheduler.feature.get_per_store_features_from_states([state], task)[0]
names = auto_scheduler.feature.get_per_store_feature_names()
# build feature dict
fea_dicts = []
for i in range(len(fea)):
tmp_dict = {}
for j in range(len(names)):
tmp_dict[names[j]] = fea[i][j]
fea_dicts.append(tmp_dict)
"""
lowered IR:
Placeholder: A, B
blockIdx.x i.0@j.0@ (0,8)
vthread i.1@j.1@ (0,4)
threadIdx.x i.2@j.2@ (0,16)
C.local auto_unroll: 1024
for k.0 (0,256)
for ax0@ax1@.0 (0,8)
threadIdx.x ax0@ax1@.1 (0,16)
B.shared = ...
for ax0@ax1@.0 (0,64)
threadIdx.x ax0@ax1@.1 (0,16)
A.shared = ...
for i_c.3 (0,32)
for k.2 (0,2)
for j_c.4 (0,16)
C.local = ...
for i.3 (0,32)
for j.3 (0,16)
C = ...
"""
# check gpu-related features
assert fequal(fea_dicts[0]["blockIdx_x_len"], math.log2(8 + 1))
assert fequal(fea_dicts[0]["vthread_len"], math.log2(4 + 1))
assert fequal(fea_dicts[1]["threadIdx_x_len"], math.log2(16 + 1))
assert fequal(fea_dicts[0]["threadIdx_y_len"], math.log2(1 + 1))
assert fequal(fea_dicts[2]["blockIdx_z_len"], math.log2(1 + 1))
assert fequal(fea_dicts[0]["is_gpu"], 1.0)
@T.prim_func
def tir_matmul(
A: T.Buffer((256, 256), "float32"),
B: T.Buffer((256, 256), "float32"),
C: T.Buffer((256, 256), "float32"),
) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
A_flat = T.Buffer([16384], dtype="float32", data=A.data)
B_flat = T.Buffer([16384], dtype="float32", data=B.data)
C_flat = T.Buffer([16384], dtype="float32", data=C.data)
# body
for x, y in T.grid(128, 128):
C_flat[x * 128 + y] = T.float32(0)
for k in T.serial(128):
C_flat[x * 128 + y] = C_flat[x * 128 + y] + A_flat[x * 128 + k] * B_flat[y * 128 + k]
def test_primfunc_without_lowering():
features = auto_scheduler.feature.named_features_from_primfunc(tir_matmul)
assert features["float_mad"].shape == (1,)
# featurization does not handle multiple-add right now, so they are split out
assert abs(features["float_addsub"][0] - 128 * 128 * 128) < 10
assert abs(features["float_mul"][0] - 128 * 128 * 128) < 10
for i in range(0, 3):
assert abs(features[f"B{i}.unique_bytes"][0] - 128 * 128 * 4) < 10 # 4 bytes per float32
def test_primfunc_lowered():
# Lower tir function so all passes get applied
f = tvm.lower(tir_matmul)
features = auto_scheduler.feature.named_features_from_primfunc(f["main"])
assert features["float_mad"].shape == (1,)
# featurization does not handle multiple-add right now, so they are split out
assert abs(features["float_addsub"][0] - 128 * 128 * 128) < 10
assert abs(features["float_mul"][0] - 128 * 128 * 128) < 10
for i in range(0, 3):
assert abs(features[f"B{i}.unique_bytes"][0] - 128 * 128 * 4) < 10 # 4 bytes per float32
def test_dense_lowered():
a = relay.var("a", relay.TensorType((128, 128), "float32"))
b = relay.var("b", relay.TensorType((128, 128), "float32"))
c = relay.nn.dense(a, b)
mod = tvm.IRModule.from_expr(relay.Function([a, b], c))
target = "llvm"
comp = relay.vm.VMCompiler()
mod, params = comp.optimize(mod, params={}, target=target)
for name, func in mod.functions.items():
if name.name_hint != "main":
break
features = auto_scheduler.feature.named_features_from_primfunc(func)
# featurization does not handle multiple-add right now, so they are split out
assert features["float_addsub"].sum() >= 128 * 128 * 128
assert features["float_mul"].sum() >= 128 * 128 * 128
total_bytes_loaded = 0
for i in range(0, 4):
total_bytes_loaded += features[f"B{i}.unique_bytes"].sum()
assert total_bytes_loaded > 2 * 128 * 128 * 4 # 4 bytes per float32
@T.prim_func
def negative_extent(A: T.Buffer((1,), "float32")):
for j in range(0, -1):
A[j] = A[j] + 1.0
def test_negative_extent():
features = auto_scheduler.feature.named_features_from_primfunc(negative_extent)
assert features["B0.unique_bytes"] == 0
@T.prim_func
def zero_dim(
p2: T.Buffer((), "float32"),
T_cast: T.Buffer((T.int64(1), T.int64(768)), "int8"),
):
# function attr dict
T.func_attr(
{
"tir.noalias": True,
"Primitive": 1,
}
)
# buffer definition
T_cast_1 = T.buffer_decl([T.int64(768)], dtype="int8", data=T_cast.data)
p2_1 = T.buffer_decl([1], dtype="float32", data=p2.data)
# body
for i0_i1_fused in T.serial(768):
T_cast_1[i0_i1_fused] = p2_1[0]
def test_zero_dim():
features = auto_scheduler.feature.named_features_from_primfunc(zero_dim)
assert features["B1.stride"] == 1
assert features["B0.stride"] == 1
if __name__ == "__main__":
test_cpu_matmul()
test_cpu_fusion()
test_gpu_feature()
| 11,515 | 36.633987 | 861 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_database.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
"""Test Meta Schedule Database"""
import os.path as osp
import tempfile
from typing import Callable, List, Optional
import pytest
import tvm
import tvm.testing
from tvm import meta_schedule as ms
from tvm import relay, tir
from tvm.ir.module import IRModule
from tvm.meta_schedule.database import TuningRecord, Workload
from tvm.script import tir as T
from tvm.target import Target
from tvm.tir import Schedule
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
# fmt: off
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class MatmulRelu:
@T.prim_func
def main(a: T.handle, b: T.handle, d: T.handle) -> None: # pylint: disable=no-self-argument
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (16, 16), "float32")
D = T.match_buffer(d, (16, 16), "float32")
C = T.alloc_buffer((16, 16), "float32")
for i, j, k in T.grid(16, 16, 16):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(16, 16):
with T.block("relu"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = T.max(C[vi, vj], 0.0)
# fmt: on
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
def _schedule_matmul(sch: Schedule):
block = sch.get_block("matmul")
i, j, k = sch.get_loops(block=block)
i_tiles = [1, 1, 2, 512]
j_tiles = [1, 512, 1, 2]
k_tiles = [256, 4]
i_0, i_1, i_2, i_3 = sch.split(loop=i, factors=i_tiles)
j_0, j_1, j_2, j_3 = sch.split(loop=j, factors=j_tiles)
k_0, k_1 = sch.split(loop=k, factors=k_tiles)
sch.reorder(i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3)
def _create_schedule(mod: IRModule, sch_fn: Callable[[Schedule], None]) -> Schedule:
sch = tir.Schedule(mod=mod, debug_mask="all")
sch_fn(sch)
return sch
def _create_tmp_database(tmpdir: str, mod_eq: str = "structural") -> ms.database.JSONDatabase:
path_workload = osp.join(tmpdir, "workloads.json")
path_tuning_record = osp.join(tmpdir, "tuning_records.json")
return ms.database.JSONDatabase(path_workload, path_tuning_record, module_equality=mod_eq)
def _equal_record(a: ms.database.TuningRecord, b: ms.database.TuningRecord):
assert str(a.trace) == str(b.trace)
assert str(a.run_secs) == str(b.run_secs)
# AWAIT(@zxybazh): change to export after fixing "(bool)0"
assert str(a.target) == str(b.target)
assert tvm.ir.structural_equal(a.workload.mod, b.workload.mod)
for arg0, arg1 in zip(a.args_info, b.args_info):
assert str(arg0.as_json()) == str(arg1.as_json())
@ms.utils.derived_object
class PyMemoryDatabaseDefault(ms.database.PyDatabase):
def __init__(self):
super().__init__()
self.tuning_records_: List[TuningRecord] = []
self.workloads_: List[Workload] = []
def has_workload(self, mod: IRModule) -> bool:
for workload in self.workloads_:
if tvm.ir.structural_equal(mod, workload.mod):
return True
def commit_workload(self, mod: IRModule) -> ms.database.Workload:
if self.has_workload(mod):
for workload in self.workloads_:
if tvm.ir.structural_equal(mod, workload.mod):
return workload
else:
workload = ms.database.Workload(mod)
self.workloads_.append(workload)
return workload
def commit_tuning_record(self, record: TuningRecord) -> None:
self.tuning_records_.append(record)
def get_all_tuning_records(self) -> List[TuningRecord]:
return self.tuning_records_
def get_top_k(self, workload: ms.database.Workload, top_k: int) -> List[TuningRecord]:
return sorted(
list(
filter(
lambda x: tvm.ir.structural_equal(workload.mod, x.workload.mod),
self.tuning_records_,
)
),
key=lambda x: sum(x.run_secs) / len(x.run_secs) if x.run_secs else 1e9,
)[:top_k]
def __len__(self) -> int:
return len(self.tuning_records_)
@ms.utils.derived_object
class PyMemoryDatabaseOverride(ms.database.PyDatabase):
def __init__(self):
super().__init__()
self.tuning_records_: List[TuningRecord] = []
self.workloads_: List[Workload] = []
def has_workload(self, mod: IRModule) -> bool:
for workload in self.workloads_:
if tvm.ir.structural_equal(mod, workload.mod):
return True
def commit_workload(self, mod: IRModule) -> ms.database.Workload:
if self.has_workload(mod):
for workload in self.workloads_:
if tvm.ir.structural_equal(mod, workload.mod):
return workload
else:
workload = ms.database.Workload(mod)
self.workloads_.append(workload)
return workload
def commit_tuning_record(self, record: TuningRecord) -> None:
self.tuning_records_.append(record)
def get_all_tuning_records(self) -> List[TuningRecord]:
return self.tuning_records_
def get_top_k(self, workload: ms.database.Workload, top_k: int) -> List[TuningRecord]:
return sorted(
list(
filter(
lambda x: tvm.ir.structural_equal(workload.mod, x.workload.mod),
self.tuning_records_,
)
),
key=lambda x: sum(x.run_secs) / len(x.run_secs) if x.run_secs else 1e9,
)[:top_k]
def __len__(self) -> int:
return len(self.tuning_records_)
def query_tuning_record(
self, mod: IRModule, target: Target, workload_name: Optional[str] = None
) -> Optional[TuningRecord]:
if self.has_workload(mod):
records = self.get_top_k(self.commit_workload(mod), 2)
if len(records) == 1:
return records[0]
elif len(records) == 2:
return records[1] # return the 2nd best if there are two records
return None
def query_schedule(
self, mod: IRModule, target: Target, workload_name: Optional[str] = None
) -> Optional[Schedule]:
record = self.query_tuning_record(mod, target, workload_name)
if record is not None:
sch = Schedule(record.workload.mod)
record.trace.apply_to_schedule(sch, remove_postproc=False)
return sch
return None
def query_ir_module(
self, mod: IRModule, target: Target, workload_name: Optional[str] = None
) -> Optional[IRModule]:
record = self.query_tuning_record(mod, target, workload_name)
if record is not None:
sch = Schedule(record.workload.mod)
record.trace.apply_to_schedule(sch, remove_postproc=False)
return sch.mod
return None
def test_meta_schedule_tuning_record_round_trip():
mod: IRModule = Matmul
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
workload = database.commit_workload(mod)
record = ms.database.TuningRecord(
_create_schedule(mod, _schedule_matmul).trace,
workload,
[1.5, 2.5, 1.8],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
)
database.commit_tuning_record(record)
new_record = ms.database.TuningRecord.from_json(record.as_json(), workload)
_equal_record(record, new_record)
def test_meta_schedule_database_create():
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
assert osp.exists(database.path_workload)
assert osp.exists(database.path_tuning_record)
def test_meta_schedule_database_has_workload():
mod: IRModule = Matmul
missing_mod: IRModule = MatmulRelu
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
workload = database.commit_workload(mod)
record = ms.database.TuningRecord(
_create_schedule(mod, _schedule_matmul).trace,
workload,
[1.5, 2.5, 1.8],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
)
database.commit_tuning_record(record)
assert len(database) == 1
assert database.has_workload(mod)
assert not database.has_workload(missing_mod)
def test_meta_schedule_database_add_entry():
mod: IRModule = Matmul
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
workload = database.commit_workload(mod)
record = ms.database.TuningRecord(
_create_schedule(mod, _schedule_matmul).trace,
workload,
[1.5, 2.5, 1.8],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
)
database.commit_tuning_record(record)
assert len(database) == 1
(ret,) = database.get_top_k(workload, 3)
_equal_record(ret, record)
def test_meta_schedule_database_missing():
mod: IRModule = Matmul
mod_2: IRModule = MatmulRelu
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
workload = database.commit_workload(mod)
workload_2 = database.commit_workload(mod_2)
record = ms.database.TuningRecord(
_create_schedule(mod, _schedule_matmul).trace,
workload,
[1.5, 2.5, 1.8],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
)
database.commit_tuning_record(record)
ret = database.get_top_k(workload_2, 3)
assert len(ret) == 0
def test_meta_schedule_database_sorting():
mod: IRModule = Matmul
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
token = database.commit_workload(mod)
trace = _create_schedule(mod, _schedule_matmul).trace
records = [
ms.database.TuningRecord(
trace,
token,
[7.0, 8.0, 9.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[1.0, 2.0, 3.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[4.0, 5.0, 6.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[1.1, 1.2, 600.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[1.0, 100.0, 6.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[4.0, 9.0, 8.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
]
for record in records:
database.commit_tuning_record(record)
ret = database.get_top_k(token, 2)
assert len(ret) == 2
try:
_equal_record(ret[0], records[2])
_equal_record(ret[1], records[1])
except AssertionError:
_equal_record(ret[0], records[1])
_equal_record(ret[1], records[2])
def test_meta_schedule_database_reload():
mod: IRModule = Matmul
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
token = database.commit_workload(mod)
trace = _create_schedule(mod, _schedule_matmul).trace
records = [
ms.database.TuningRecord(
trace,
token,
[7.0, 8.0, 9.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[1.0, 2.0, 3.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
ms.database.TuningRecord(
trace,
token,
[4.0, 5.0, 6.0],
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
),
]
for record in records:
database.commit_tuning_record(record)
new_database = ms.database.JSONDatabase(
path_workload=database.path_workload,
path_tuning_record=database.path_tuning_record,
)
token = new_database.commit_workload(mod)
ret = new_database.get_top_k(token, 2)
assert len(ret) == 2
try:
_equal_record(ret[0], records[2])
_equal_record(ret[1], records[1])
except AssertionError:
_equal_record(ret[0], records[1])
_equal_record(ret[1], records[2])
def test_meta_schedule_database_union():
mod: IRModule = Matmul
target = tvm.target.Target("llvm")
arg_info = ms.arg_info.ArgInfo.from_prim_func(func=mod["main"])
db_1 = ms.database.MemoryDatabase()
db_2 = ms.database.MemoryDatabase()
trace = _create_schedule(mod, _schedule_matmul).trace
def query(db): # pylint: disable=invalid-name
return db.query_tuning_record(mod=mod, target=target, workload_name="main").run_secs
def commit_record(db, run_sec): # pylint: disable=invalid-name
db.commit_tuning_record(
ms.database.TuningRecord(
trace,
workload=db.commit_workload(mod),
run_secs=[run_sec],
target=target,
args_info=arg_info,
)
)
commit_record(db_1, 1.0)
(run_sec,) = query(db_1)
assert run_sec.value == 1.0
commit_record(db_2, 0.5)
(run_sec,) = query(db_2)
assert run_sec.value == 0.5
(run_secs,) = query(ms.database.UnionDatabase(db_1, db_2))
assert run_secs.value == 0.5
(run_secs,) = query(ms.database.OrderedUnionDatabase(db_1, db_2))
assert run_secs.value == 1.0
def test_meta_schedule_pydatabase_default_query():
mod: IRModule = Matmul
target = tvm.target.Target("llvm")
arg_info = ms.arg_info.ArgInfo.from_prim_func(func=mod["main"])
db = PyMemoryDatabaseDefault() # pylint: disable=invalid-name
sch = _create_schedule(mod, _schedule_matmul)
trace = sch.trace
def query(db, mod, target, kind): # pylint: disable=invalid-name
return db.query(mod=mod, target=target, workload_name="main", kind=kind)
def commit_record(trace, db, run_sec): # pylint: disable=invalid-name
db.commit_tuning_record(
ms.database.TuningRecord(
trace,
workload=db.commit_workload(mod),
run_secs=[run_sec],
target=target,
args_info=arg_info,
)
)
commit_record(trace, db, 1.0)
record = query(db, mod, target, "record")
assert record is not None and record.run_secs[0].value == 1.0
sch_res = query(db, mod, target, "schedule")
assert sch_res is not None and tvm.ir.structural_equal(sch_res.mod, sch.mod)
mod_res = query(db, mod, target, "ir_module")
assert mod_res is not None and tvm.ir.structural_equal(mod_res, sch.mod)
commit_record(Schedule(mod).trace, db, 0.2) # Empty Trace
record = query(db, mod, target, "record")
assert record is not None and record.run_secs[0].value == 0.2
sch_res = query(db, mod, target, "schedule")
assert sch_res is not None and tvm.ir.structural_equal(sch_res.mod, mod)
mod_res = query(db, mod, target, "ir_module")
assert mod_res is not None and tvm.ir.structural_equal(mod_res, mod)
def test_meta_schedule_pydatabase_override_query():
mod: IRModule = Matmul
target = tvm.target.Target("llvm")
arg_info = ms.arg_info.ArgInfo.from_prim_func(func=mod["main"])
db = PyMemoryDatabaseOverride() # pylint: disable=invalid-name
sch = _create_schedule(mod, _schedule_matmul)
trace = sch.trace
def query(db, mod, target, kind): # pylint: disable=invalid-name
return db.query(mod=mod, target=target, workload_name="main", kind=kind)
def commit_record(trace, db, run_sec): # pylint: disable=invalid-name
db.commit_tuning_record(
ms.database.TuningRecord(
trace,
workload=db.commit_workload(mod),
run_secs=[run_sec],
target=target,
args_info=arg_info,
)
)
commit_record(trace, db, 1.14)
record = query(db, mod, target, "record")
assert record is not None and record.run_secs[0].value == 1.14
sch_res = query(db, mod, target, "schedule")
assert sch_res is not None and tvm.ir.structural_equal(sch_res.mod, sch.mod)
mod_res = query(db, mod, target, "ir_module")
assert mod_res is not None and tvm.ir.structural_equal(mod_res, sch.mod)
commit_record(Schedule(mod).trace, db, 0.514) # Empty Trace
record = query(db, mod, target, "record")
assert record is not None and record.run_secs[0].value == 1.14 # Override to 2nd best
sch_res = query(db, mod, target, "schedule")
assert sch_res is not None and tvm.ir.structural_equal(sch_res.mod, sch.mod)
mod_res = query(db, mod, target, "ir_module")
assert mod_res is not None and tvm.ir.structural_equal(mod_res, sch.mod)
def test_meta_schedule_pydatabase_current():
db = PyMemoryDatabaseDefault() # pylint: disable=invalid-name
with db: # pylint: disable=not-context-manager
assert ms.database.Database.current() == db
def call_get_top_k(run_secs_list, database, k):
mod: IRModule = Matmul
workload = database.commit_workload(mod)
for run_secs in run_secs_list:
record = ms.database.TuningRecord(
_create_schedule(mod, _schedule_matmul).trace,
workload,
run_secs,
tvm.target.Target("llvm"),
ms.arg_info.ArgInfo.from_prim_func(func=mod["main"]),
)
database.commit_tuning_record(record)
return [[v.value for v in record.run_secs] for record in database.get_top_k(workload, k)]
@pytest.mark.parametrize(
"k,expected",
[
(0, []),
(1, [[0.0, 2.0]]),
(4, [[0.0, 2.0], [2.0], [1.5, 4.5], [3.0, 1e10]]),
(5, [[0.0, 2.0], [2.0], [1.5, 4.5], [3.0, 1e10]]),
],
)
def test_memory_database_get_top_k(k, expected):
run_secs_list = [[1.5, 4.5], [], [0.0, 2.0], None, [2.0], [3.0, 1e10], [1e10]]
database = ms.database.MemoryDatabase()
result = call_get_top_k(run_secs_list, database, k)
assert result == expected
@pytest.mark.parametrize(
"k,expected",
[
(0, []),
(4, [[0.0, 2.0], [2.0], [1.5, 4.5], [3.0, 1e10]]),
(5, [[0.0, 2.0], [2.0], [1.5, 4.5], [3.0, 1e10]]),
],
)
def test_json_database_get_top_k(k, expected):
run_secs_list = [[1.5, 4.5], [], [0.0, 2.0], None, [2.0], [3.0, 1e10], [1e10]]
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir)
result = call_get_top_k(run_secs_list, database, k)
assert result == expected
def MatmulFunc() -> IRModule:
a = relay.var("a", relay.TensorType((1024, 1024), "float32"))
b = relay.var("b", relay.TensorType((1024, 1024), "float32"))
func = relay.Function([a, b], relay.nn.matmul(a, b))
return tvm.IRModule.from_expr(func)
def MatmulPrimFunc() -> IRModule:
return Matmul
@pytest.mark.parametrize("f_mod", [MatmulPrimFunc, MatmulFunc])
@pytest.mark.parametrize("mod_eq", ["structural", "ignore-ndarray", "anchor-block"])
def test_json_database_commit_workload(f_mod, mod_eq):
mod: IRModule = f_mod()
with tempfile.TemporaryDirectory() as tmpdir:
database = _create_tmp_database(tmpdir, mod_eq)
database.commit_workload(mod)
@pytest.mark.parametrize("f_mod", [MatmulPrimFunc, MatmulFunc])
@pytest.mark.parametrize("mod_eq", ["structural", "ignore-ndarray", "anchor-block"])
def test_memory_database_commit_workload(f_mod, mod_eq):
mod: IRModule = f_mod()
database = ms.database.MemoryDatabase(module_equality=mod_eq)
database.commit_workload(mod)
if __name__ == "__main__":
tvm.testing.main()
| 22,746 | 35.867099 | 96 | py |
tvm | tvm-main/tests/python/unittest/test_lower_build.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
from tvm.ir.module import IRModule
from tvm.script import tir as T
import tvm.testing
def _check_module_with_numpy(mod, shape=(128, 128, 128)):
m, n, k = shape
a = tvm.nd.array(np.random.rand(m, k).astype("float32"))
b = tvm.nd.array(np.random.rand(n, k).astype("float32"))
c = tvm.nd.array(np.zeros((m, n), dtype="float32"))
c_np = np.dot(a.numpy(), b.numpy().transpose())
mod(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
# pylint: disable=no-self-argument, missing-class-docstring, missing-function-docstring
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = T.float32(0)
for k in range(128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@tvm.script.ir_module
class LoweredModule:
@T.prim_func
def main(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32"),
C: T.Buffer((128, 128), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "from_legacy_te_schedule": True, "tir.noalias": True})
A_flat = T.Buffer([16384], data=A.data)
B_flat = T.Buffer([16384], data=B.data)
C_flat = T.Buffer([16384], data=C.data)
# body
for x, y in T.grid(128, 128):
C_flat[x * 128 + y] = 0.0
for k in T.serial(0, 128):
C_flat[x * 128 + y] = (
C_flat[x * 128 + y] + A_flat[x * 128 + k] * B_flat[y * 128 + k]
)
@tvm.script.ir_module
class LoweredTIRModule:
@T.prim_func
def main(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32"),
C: T.Buffer((128, 128), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A_flat = T.Buffer([16384], data=A.data)
B_flat = T.Buffer([16384], data=B.data)
C_flat = T.Buffer([16384], data=C.data)
# body
for x, y in T.grid(128, 128):
C_flat[x * 128 + y] = 0.0
for k in T.serial(0, 128):
C_flat[x * 128 + y] = (
C_flat[x * 128 + y] + A_flat[x * 128 + k] * B_flat[y * 128 + k]
)
def test_lower_build_te_schedule():
m, n, k = 128, 128, 128
axis_k = te.reduce_axis((0, k), "k")
A = te.placeholder((m, k), name="A")
B = te.placeholder((k, n), name="B")
C = te.compute((m, n), lambda x, y: te.sum(A[x, axis_k] * B[y, axis_k], axis=axis_k), name="C")
s = te.create_schedule(C.op)
# check lowering with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
ir_mod = tvm.lower(s, [A, B, C])
tvm.ir.assert_structural_equal(ir_mod, LoweredModule)
# check building
mod = tvm.build(s, [A, B, C], target="llvm")
_check_module_with_numpy(mod)
def test_lower_build_tir_func():
# check lowering with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
ir_mod = tvm.lower(matmul)
tvm.ir.assert_structural_equal(ir_mod, LoweredTIRModule)
# check building
mod = tvm.build(matmul, target="llvm")
_check_module_with_numpy(mod)
def test_lower_build_tir_module():
func = matmul.with_attr("global_symbol", "main")
func = func.with_attr("tir.noalias", True)
ir_mod = IRModule({"main": func})
# check lowering with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
lowered_mod = tvm.lower(ir_mod)
tvm.ir.assert_structural_equal(lowered_mod, LoweredTIRModule)
# check building
mod = tvm.build(ir_mod, target="llvm")
_check_module_with_numpy(mod)
def test_lower_build_lowered_module():
# check lowering with the CSE pass disabled as otherwise it would do some commoning
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
ir_mod = tvm.lower(LoweredTIRModule)
tvm.ir.assert_structural_equal(ir_mod, LoweredTIRModule)
# check building
mod = tvm.build(ir_mod, target="llvm")
_check_module_with_numpy(mod)
if __name__ == "__main__":
test_lower_build_te_schedule()
test_lower_build_tir_func()
test_lower_build_tir_module()
test_lower_build_lowered_module()
| 5,699 | 36.748344 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_fp8_legalize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.script
import tvm.testing
from tvm.script import tir as T
# pylint: disable=no-member,invalid-name,unused-variable
def get_before(dtype: str):
@tvm.script.ir_module
class Before:
@T.prim_func
def main(Aptr: T.handle(dtype), Bptr: T.handle(dtype), Dptr: T.handle(dtype)):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), dtype, data=Aptr)
B = T.decl_buffer((100,), dtype, data=Bptr)
D = T.decl_buffer((100,), dtype, data=Dptr)
C = T.decl_buffer((100,), dtype)
for i in T.grid(100):
C[i] = A[i] + B[i]
D[i] = T.exp(C[i])
return Before
def promote_f8(f8_dtype: str, promote_dtype: str, v):
return promote_uint8(f8_dtype, promote_dtype, T.reinterpret("uint8", v))
def cast_to_f8(f8_dtype: str, promote_dtype: str, v):
return T.reinterpret(f8_dtype, cast_to_uint8(f8_dtype, promote_dtype, v))
def get_after_compute_legalize(dtype: str, promote_dtype: str):
@tvm.script.ir_module
class After:
@T.prim_func
def main(Aptr: T.handle(dtype), Bptr: T.handle(dtype), Dptr: T.handle(dtype)):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), dtype, data=Aptr)
B = T.decl_buffer((100,), dtype, data=Bptr)
D = T.decl_buffer((100,), dtype, data=Dptr)
C = T.decl_buffer((100,), promote_dtype)
for i in T.grid(100):
C[i] = promote_f8(dtype, promote_dtype, A[i]) + promote_f8(
dtype, promote_dtype, B[i]
)
D[i] = cast_to_f8(dtype, promote_dtype, T.exp(C[i]))
return After
def promote_uint8(f8_dtype: str, promote_dtype: str, v):
if f8_dtype == "e4m3_float8":
if promote_dtype == "float16":
mantissa = T.bitwise_and(
T.shift_left(T.Cast("uint16", v), T.uint16(7)), T.uint16(0x3FF)
)
exponent = T.shift_left(
T.Cast(
"uint16",
T.shift_right(T.shift_left(v, T.uint8(1)), T.uint8(4)) + T.uint8(8),
),
T.uint16(10),
)
sign = T.shift_left(T.Cast("uint16", T.shift_right(v, T.uint8(7))), T.uint16(15))
return T.reinterpret("float16", T.bitwise_or(T.bitwise_or(mantissa, exponent), sign))
else: # promote_dtype == "float32"
mantissa = T.bitwise_and(
T.shift_left(T.Cast("uint32", v), T.uint32(20)), T.uint32(0x7FFFFF)
)
exponent = T.shift_left(
T.Cast(
"uint32",
T.shift_right(T.shift_left(v, T.uint8(1)), T.uint8(4)) + T.uint8(120),
),
T.uint32(23),
)
sign = T.shift_left(T.Cast("uint32", T.shift_right(v, T.uint8(7))), T.uint32(31))
return T.reinterpret("float32", T.bitwise_or(T.bitwise_or(mantissa, exponent), sign))
else: # f8_dtype == "e5m2_float8"
if promote_dtype == "float16":
return T.reinterpret("float16", T.shift_left(T.Cast("uint16", v), T.uint16(8)))
else: # promote_dtype == "float32"
mantissa = T.bitwise_and(
T.shift_left(T.Cast("uint32", v), T.uint32(21)), T.uint32(0x7FFFFF)
)
exponent = T.shift_left(
T.Cast(
"uint32",
T.shift_right(T.shift_left(v, T.uint8(1)), T.uint8(3)) + T.uint8(112),
),
T.uint32(23),
)
sign = T.shift_left(T.Cast("uint32", T.shift_right(v, T.uint8(7))), T.uint32(31))
return T.reinterpret("float32", T.bitwise_or(T.bitwise_or(mantissa, exponent), sign))
def cast_to_uint8(f8_dtype: str, promote_dtype: str, v):
if f8_dtype == "e4m3_float8":
if promote_dtype == "float16":
uint16_v = T.reinterpret("uint16", v)
rounding_bias = T.bitwise_and(
T.shift_right(uint16_v, T.uint16(7)),
T.uint16(1),
) + T.uint16(0x3F)
uint16_v = uint16_v + rounding_bias
mantissa = T.bitwise_and(
T.Cast("uint8", T.shift_right(uint16_v, T.uint8(7))), T.uint8(0x7)
)
exponent_before_delta = T.shift_right(T.shift_left(uint16_v, T.uint16(1)), T.uint16(11))
round_to_zero = exponent_before_delta < T.uint16(8)
exponent = T.shift_left(
T.Cast("uint8", exponent_before_delta - T.uint16(8)),
T.uint8(3),
)
sign = T.shift_left(T.Cast("uint8", T.shift_right(uint16_v, T.uint16(15))), T.uint8(7))
return T.if_then_else(
round_to_zero, T.uint8(0), T.bitwise_or(T.bitwise_or(mantissa, exponent), sign)
)
else: # promote_dtype == "float32"
uint32_v = T.reinterpret("uint32", v)
rounding_bias = T.bitwise_and(
T.shift_right(uint32_v, T.uint32(20)), T.uint32(1)
) + T.uint32(0x7FFFF)
uint32_v = uint32_v + rounding_bias
mantissa = T.bitwise_and(
T.Cast("uint8", T.shift_right(uint32_v, T.uint8(20))), T.uint8(0x7)
)
exponent_before_delta = T.shift_right(T.shift_left(uint32_v, T.uint32(1)), T.uint32(24))
round_to_zero = exponent_before_delta < T.uint32(120)
exponent = T.shift_left(
T.Cast("uint8", exponent_before_delta - T.uint32(120)), T.uint8(3)
)
sign = T.shift_left(T.Cast("uint8", T.shift_right(uint32_v, T.uint32(31))), T.uint8(7))
return T.if_then_else(
round_to_zero, T.uint8(0), T.bitwise_or(T.bitwise_or(mantissa, exponent), sign)
)
else: # f8_dtype == "e5m2_float8"
if promote_dtype == "float16":
uint16_v = T.reinterpret("uint16", v)
rounding_bias = T.bitwise_and(
T.shift_right(uint16_v, T.uint16(8)), T.uint16(1)
) + T.uint16(0x7F)
uint16_v = uint16_v + rounding_bias
return T.Cast("uint8", T.shift_right(uint16_v, T.uint16(8)))
else: # promote_dtype == "float32"
uint32_v = T.reinterpret("uint32", v)
rounding_bias = T.bitwise_and(
T.shift_right(uint32_v, T.uint32(21)), T.uint32(1)
) + T.uint32(0xFFFFF)
uint32_v = uint32_v + rounding_bias
mantissa = T.bitwise_and(
T.Cast("uint8", T.shift_right(uint32_v, T.uint8(21))), T.uint8(0x3)
)
exponent_before_delta = T.shift_right(T.shift_left(uint32_v, T.uint32(1)), T.uint32(24))
round_to_zero = exponent_before_delta < T.uint32(112)
exponent = T.shift_left(
T.Cast("uint8", exponent_before_delta - T.uint32(112)), T.uint8(2)
)
sign = T.shift_left(T.Cast("uint8", T.shift_right(uint32_v, T.uint32(31))), T.uint8(7))
return T.if_then_else(
round_to_zero, T.uint8(0), T.bitwise_or(T.bitwise_or(mantissa, exponent), sign)
)
def get_after_storage_legalize(dtype: str, promote_dtype: str):
@tvm.script.ir_module
class After:
@T.prim_func
def main(Aptr: T.handle("uint8"), Bptr: T.handle("uint8"), Dptr: T.handle("uint8")):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "uint8", data=Aptr)
B = T.decl_buffer((100,), "uint8", data=Bptr)
D = T.decl_buffer((100,), "uint8", data=Dptr)
C = T.decl_buffer((100,), promote_dtype)
for i in T.grid(100):
C[i] = promote_uint8(dtype, promote_dtype, A[i]) + promote_uint8(
dtype, promote_dtype, B[i]
)
D[i] = cast_to_uint8(dtype, promote_dtype, T.exp(C[i]))
return After
dtype = tvm.testing.parameter("e4m3_float8", "e5m2_float8")
promote_dtype = tvm.testing.parameter("float16", "float32")
def test_fp8_compute_legalize(dtype, promote_dtype):
before = get_before(dtype)
expected = get_after_compute_legalize(dtype, promote_dtype)
# run the transform twice to ensure we can afford to deal
# with this repeative optimizations
after = tvm.tir.transform.FP8ComputeLegalize(promote_dtype)(before)
after = tvm.tir.transform.FP8ComputeLegalize(promote_dtype)(after)
tvm.ir.assert_structural_equal(after, expected)
def test_fp8_storage_legalize(dtype, promote_dtype):
before = get_after_compute_legalize(dtype, promote_dtype)
after = tvm.tir.transform.FP8StorageLegalize()(before)
expected = get_after_storage_legalize(dtype, promote_dtype)
tvm.ir.assert_structural_equal(after, expected)
if __name__ == "__main__":
tvm.testing.main()
| 9,704 | 42.133333 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_inject_rolling_buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
import tvm.script
from tvm import te, topi
from tvm.driver.build_module import get_binds
from tvm.script import tir as T
def _tile_nd(s, tensor, tile):
outer_indices = []
inner_indices = []
for i, size in enumerate(tile):
outer, inner = s[tensor].split(tensor.op.axis[i], size)
outer_indices.append(outer)
inner_indices.append(inner)
s[tensor].reorder(*outer_indices, *inner_indices)
return outer_indices, inner_indices
@tvm.tir.transform.prim_func_pass(opt_level=0)
def remove_rolling_buffer_attr(func, mod, ctx):
def unwrap(node):
if isinstance(node, tvm.tir.AttrStmt) and node.attr_key == "rolling_buffer_scope":
return node.body
else:
return node
return func.with_body(
tvm.tir.stmt_functor.ir_transform(
func.body, None, postorder=unwrap, only_enable=["tir.AttrStmt"]
)
)
@tvm.tir.transform.prim_func_pass(opt_level=0)
def verify_no_rolling_buffer_attr(func, mod, ctx):
def verify(node):
if isinstance(node, tvm.tir.AttrStmt):
assert node.attr_key != "rolling_buffer_scope", "Failed to lower rolling buffers"
tvm.tir.stmt_functor.post_order_visit(func.body, verify)
return func
def _verify_schedule(sch, inputs, output):
user_pass_lists = [
[(0, remove_rolling_buffer_attr), (0, verify_no_rolling_buffer_attr)],
[(0, tvm.tir.transform.InjectRollingBuffer()), (0, verify_no_rolling_buffer_attr)],
]
built_funcs = []
for user_pass_list in user_pass_lists:
with tvm.transform.PassContext(config={"tir.add_lower_pass": user_pass_list}):
built_funcs.append(tvm.build(sch, inputs + [output]))
outputs = []
ctx = tvm.cpu(0)
input_data = []
for tensor in inputs:
shape = [i.value for i in tensor.shape]
input_data.append(
tvm.nd.array(np.random.randint(low=-100, high=100, size=shape).astype("int8"), ctx)
)
shape = [i.value for i in output.shape]
out = tvm.nd.array(np.zeros(shape, dtype="int8"), ctx)
for func in built_funcs:
func(*input_data, out)
outputs.append(out.numpy())
np.testing.assert_equal(outputs[0], outputs[1])
@pytest.mark.parametrize("tile_shape", [(1, 4, 8, 16), (1, 8, 7, 11), (1, 8, 3, 8), (1, 7, 5, 3)])
def test_tile_shapes(tile_shape):
A = te.placeholder((1, 12, 14, 16), name="A", dtype="int8")
pool_a = topi.nn.pool2d(A, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_b = topi.nn.pool2d(pool_a, (3, 5), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
sch = tvm.te.create_schedule([pool_b.op])
oi, ii = _tile_nd(sch, pool_b, tile_shape)
sch[pool_a].compute_at(sch[pool_b], oi[-1])
sch[pool_a].rolling_buffer()
_verify_schedule(sch, [A], pool_b)
def test_implied_split():
A = te.placeholder((1, 12, 12, 16), name="A", dtype="int8")
pool_a = topi.nn.pool2d(A, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_b = topi.nn.pool2d(pool_a, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
sch = tvm.te.create_schedule([pool_b.op])
n, h, w, c = pool_b.op.axis
oi, ii = sch[pool_b].split(w, 4)
sch[pool_a].compute_at(sch[pool_b], oi)
sch[pool_a].rolling_buffer()
_verify_schedule(sch, [A], pool_b)
@pytest.mark.parametrize("kernel_shape", [(1, 1), (3, 3)])
def test_upscale(kernel_shape):
output_shape = (1, 24, 24, 16)
input_shape = (
output_shape[0],
output_shape[1] // 2 + 2 * (kernel_shape[0] - 1),
output_shape[2] // 2 + 2 * (kernel_shape[1] - 1),
output_shape[3],
)
A = te.placeholder(input_shape, name="A", dtype="int8")
pool_a = topi.nn.pool2d(A, kernel_shape, (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_b = topi.nn.pool2d(
pool_a, kernel_shape, (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC"
)
upscale = te.compute((1, 24, 24, 16), lambda nn, hh, ww, cc: pool_b[nn, hh // 2, ww // 2, cc])
sch = tvm.te.create_schedule([upscale.op])
oi, ii = _tile_nd(sch, upscale, (1, 5, 5, 16))
sch[pool_b].compute_at(sch[upscale], oi[-1])
sch[pool_b].rolling_buffer()
sch[pool_a].compute_at(sch[upscale], oi[-1])
sch[pool_a].rolling_buffer()
_verify_schedule(sch, [A], upscale)
@pytest.mark.parametrize("tile_shape", [(1, 4, 8, 16), (1, 8, 7, 11), (1, 8, 3, 8), (1, 7, 5, 3)])
def test_3_tiled_poolings(tile_shape):
A = te.placeholder((1, 14, 14, 16), name="A", dtype="int8")
pool_a = topi.nn.pool2d(A, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_b = topi.nn.pool2d(pool_a, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_c = topi.nn.pool2d(pool_b, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
sch = tvm.te.create_schedule([pool_c.op])
oi, ii = _tile_nd(sch, pool_c, tile_shape)
sch[pool_b].compute_at(sch[pool_c], oi[-1])
sch[pool_b].rolling_buffer()
sch[pool_a].compute_at(sch[pool_c], oi[-1])
sch[pool_a].rolling_buffer()
_verify_schedule(sch, [A], pool_c)
@pytest.mark.parametrize("tile_shape", [(1, 4, 8, 16), (1, 8, 7, 11), (1, 8, 3, 8), (1, 7, 5, 3)])
def test_tiled_added_poolings(tile_shape):
A = te.placeholder((1, 12, 12, 16), name="A", dtype="int8")
B = te.placeholder((1, 14, 14, 16), name="A", dtype="int8")
pool_a = topi.nn.pool2d(A, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_b = topi.nn.pool2d(B, (5, 5), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
add = topi.add(pool_a, pool_b)
pool_c = topi.nn.pool2d(add, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
sch = tvm.te.create_schedule([pool_c.op])
oi, ii = _tile_nd(sch, pool_c, tile_shape)
sch[add].compute_at(sch[pool_c], oi[-1])
sch[add].rolling_buffer()
sch[pool_b].compute_at(sch[pool_c], oi[-1])
sch[pool_b].rolling_buffer()
sch[pool_a].compute_at(sch[pool_c], oi[-1])
sch[pool_a].rolling_buffer()
_verify_schedule(sch, [A, B], pool_c)
@pytest.mark.parametrize("make_rolling", [(0, 0), (1, 0), (0, 1), (1, 1)])
def test_mixed_buffers(make_rolling):
A = te.placeholder((1, 14, 14, 16), name="A", dtype="int8")
pool_a = topi.nn.pool2d(A, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_b = topi.nn.pool2d(pool_a, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
pool_c = topi.nn.pool2d(pool_b, (3, 3), (1, 1), (1, 1), (0, 0, 0, 0), "max", layout="NHWC")
sch = tvm.te.create_schedule([pool_c.op])
oi, ii = _tile_nd(sch, pool_c, (1, 4, 8, 16))
sch[pool_b].compute_at(sch[pool_c], oi[-1])
if make_rolling[0]:
sch[pool_b].rolling_buffer()
sch[pool_a].compute_at(sch[pool_c], oi[-1])
if make_rolling[1]:
sch[pool_a].rolling_buffer()
_verify_schedule(sch, [A], pool_c)
# fmt: off
@tvm.script.ir_module
class PreRollingBuffer:
@T.prim_func
def main(A: T.handle, tensor: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# buffer definition
tensor_2 = T.Buffer([1, 10, 12, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1)
A_1 = T.match_buffer(A, [1, 12, 14, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1)
tensor_1 = T.match_buffer(tensor, [1, 8, 8, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.realize(tensor_1[0:1, 0:8, 0:8, 0:16], "")
for ax1_outer in T.serial(0, 2):
T.realize(tensor_2[0:1, (ax1_outer*4):((ax1_outer*4) + 6), 0:12, 0:16], "")
T.attr(tensor_2, "rolling_buffer_scope", True)
for ax1 in T.serial(0, 6):
for ax2 in T.serial(0, 12):
for ax3 in T.serial(0, 16):
tensor_2[0, (ax1 + (ax1_outer*4)), ax2, ax3] = T.int8(0)
for dh in T.serial(0, 3):
for dw in T.serial(0, 3):
tensor_2[0, (ax1 + (ax1_outer*4)), ax2, ax3] = T.max(tensor_2[0, (ax1 + (ax1_outer*4)), ax2, ax3], A_1[0, ((ax1 + (ax1_outer*4)) + dh), (ax2 + dw), ax3])
for ax1_inner in T.serial(0, 4):
for ax2_inner in T.serial(0, 8):
for ax3_inner in T.serial(0, 16):
tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner] = T.int8(0)
for dh_1 in T.serial(0, 3):
for dw_1 in T.serial(0, 5):
tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner] = T.max(tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner], tensor_2[0, ((ax1_inner + (ax1_outer*4)) + dh_1), (ax2_inner + dw_1), ax3_inner])
__tvm_meta__ = None
@tvm.script.ir_module
class PostRollingBuffer:
@T.prim_func
def main(A: T.handle, tensor: T.handle) -> None:
# function attr dict
T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True})
# buffer definition
tensor_2 = T.Buffer([1, 10, 12, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1)
A_1 = T.match_buffer(A, [1, 12, 14, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1)
tensor_1 = T.match_buffer(tensor, [1, 8, 8, 16], dtype="int8", elem_offset=0, align=64, offset_factor=1)
# body
T.realize(tensor_1[0:1, 0:8, 0:8, 0:16], "")
T.realize(tensor_2[0:1, 0:6, 0:12, 0:16], "")
for ax1_outer in T.serial(0, 2):
for ax1 in T.serial(0, 6):
for ax2 in T.serial(0, 12):
for ax3 in T.serial(0, 16):
if T.likely(((ax1_outer < 1) or (ax1 >= 2)), dtype='bool') :
tensor_2[0, T.floormod((ax1 + (ax1_outer*4)), 6), ax2, ax3] = T.int8(0)
for dh in T.serial(0, 3):
for dw in T.serial(0, 3):
if T.likely(((ax1_outer < 1) or (ax1 >= 2)), dtype='bool'):
tensor_2[0, T.floormod((ax1 + (ax1_outer*4)), 6), ax2, ax3] = T.max(tensor_2[0, T.floormod((ax1 + (ax1_outer*4)), 6), ax2, ax3], A_1[0, ((ax1 + (ax1_outer*4)) + dh), (ax2 + dw), ax3])
for ax1_inner in T.serial(0, 4):
for ax2_inner in T.serial(0, 8):
for ax3_inner in T.serial(0, 16):
tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner] = T.int8(0)
for dh_1 in T.serial(0, 3):
for dw_1 in T.serial(0, 5):
tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner] = T.max(tensor_1[0, (ax1_inner + (ax1_outer*4)), ax2_inner, ax3_inner], tensor_2[0, T.floormod(((ax1_inner + (ax1_outer*4)) + dh_1), 6), (ax2_inner + dw_1), ax3_inner])
__tvm_meta__ = None
# fmt: on
def test_rolling_buffer_ir_transform():
mod = PreRollingBuffer
mod = tvm.tir.transform.InjectRollingBuffer()(mod)
script = mod.script()
mod = tvm.script.from_source(script)
tvm.ir.assert_structural_equal(mod["main"], PostRollingBuffer["main"], True)
if __name__ == "__main__":
tvm.testing.main()
| 12,184 | 42.673835 | 263 | py |
tvm | tvm-main/tests/python/unittest/test_autotvm_xgboost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import multiprocessing
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm import MeasureInput, MeasureResult
from tvm.autotvm.tuner.xgboost_cost_model import XGBoostCostModel
from tvm.testing.autotvm import get_sample_task, get_sample_records
def test_fit():
task, target = get_sample_task()
records = get_sample_records(n=500)
base_model = XGBoostCostModel(task, feature_type="itervar", loss_type="reg")
base_model.fit_log(records, plan_size=32)
upper_model = XGBoostCostModel(task, feature_type="itervar", loss_type="reg")
upper_model.load_basemodel(base_model)
xs = np.arange(10)
ys = np.arange(10)
upper_model.fit(xs, ys, plan_size=32)
# feature lengths are not guaranteed to always be the same
upper_model.predict(np.ones(12))
upper_model.predict(np.ones(8))
def fit_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_fit()
def test_fit_spawn():
# Subprocesses inherit the spawn method of their parents
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=test_fit)
p.start()
p.join()
def test_tuner():
task, target = get_sample_task()
records = get_sample_records(n=10)
tuner = autotvm.tuner.XGBTuner(task)
tuner.load_history(records, min_seed_records=10)
# Confirm that loading history successfully loaded a
# base_model.
assert tuner.cost_model.base_model is not None
tuner = autotvm.tuner.XGBTuner(task)
tuner.load_history(records, min_seed_records=11)
# Confirm that loading history did not load base_model
# when not enough records according to `min_seed_records`
# are provided
assert tuner.cost_model.base_model is None
def test_update():
task, target = get_sample_task()
tuner = autotvm.tuner.XGBTuner(task)
n_records = 5
records = get_sample_records(n=n_records)
tuner.update([inp for inp, _ in records], [res for _, res in records])
assert len(tuner.xs) == n_records
assert len(tuner.ys) == n_records
assert len(tuner.visited) == n_records
assert all(x in tuner.visited for x in tuner.xs)
if __name__ == "__main__":
test_fit()
test_fit_spawn()
test_tuner()
test_update()
| 3,059 | 29.909091 | 81 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_runner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test Meta Schedule Runner """
import itertools
import sys
import time
from typing import Any, List
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm._ffi import register_func
from tvm.meta_schedule.arg_info import TensorInfo
from tvm.meta_schedule.builder import BuilderInput, LocalBuilder
from tvm.meta_schedule.runner import (
EvaluatorConfig,
LocalRunner,
PyRunner,
RPCConfig,
RPCRunner,
RunnerFuture,
RunnerInput,
)
from tvm.meta_schedule.runner.local_runner import (
default_alloc_argument as local_default_alloc_argument,
)
from tvm.meta_schedule.runner.rpc_runner import (
T_ARG_INFO_JSON_OBJ_LIST,
T_ARGUMENT_LIST,
)
from tvm.meta_schedule.runner.rpc_runner import (
default_alloc_argument as rpc_default_alloc_argument,
)
from tvm.meta_schedule.testing.local_rpc import LocalRPC
from tvm.meta_schedule.utils import (
derived_object,
get_global_func_with_default_on_worker,
)
from tvm.rpc import RPCSession
from tvm.runtime import Device, Module
from tvm.script import tir as T
from tvm.target import Target
from tvm.tir import FloatImm
MATMUL_N = 16
MATMUL_M = 32
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,missing-docstring,unbalanced-tuple-unpacking
@tvm.script.ir_module
class MatmulModule:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=no-self-argument
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (16, 16), "float32")
C = T.match_buffer(c, (16, 16), "float32")
for i, j, k in T.grid(16, 16, 16):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class MatmulReluModule:
@T.prim_func
def main(a: T.handle, b: T.handle, d: T.handle) -> None: # pylint: disable=no-self-argument
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (16, 16), "float32")
B = T.match_buffer(b, (16, 16), "float32")
D = T.match_buffer(d, (16, 16), "float32")
C = T.alloc_buffer((16, 16), "float32")
for i, j, k in T.grid(16, 16, 16):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
for i, j in T.grid(16, 16):
with T.block("relu"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = T.max(C[vi, vj], 0.0)
@tvm.script.ir_module
class BatchMatmulModule:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=no-self-argument
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, [16, 32, 32])
B = T.match_buffer(b, [16, 32, 32])
C = T.match_buffer(c, [16, 32, 32])
for n, i, j, k in T.grid(16, 32, 32, 32):
with T.block("update"):
vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k])
with T.init():
C[vn, vi, vj] = 0.0
C[vn, vi, vj] = C[vn, vi, vj] + A[vn, vi, vk] * B[vn, vj, vk]
@tvm.script.ir_module
class AddModule:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=no-self-argument
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, [32], "float32")
B = T.match_buffer(b, [32], "float32")
C = T.match_buffer(c, [32], "float32")
for i in range(32):
with T.block("add"):
vi = T.axis.S(32, i)
C[vi] = A[vi] + B[vi]
# A huge matmul that must cause timeout in the timeout test below.
@tvm.script.ir_module
class MatmulHugeModule:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=no-self-argument
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (4096, 4096), "float32")
B = T.match_buffer(b, (4096, 4096), "float32")
C = T.match_buffer(c, (4096, 4096), "float32")
for i, j, k in T.grid(4096, 4096, 4096):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,missing-docstring
def _clean_build(artifact_path: str) -> None:
f_clean_build = get_global_func_with_default_on_worker("meta_schedule.remove_build_dir", None)
if f_clean_build is not None:
f_clean_build(artifact_path)
else:
raise RuntimeError("Unable to find remove_build_dir function.")
def test_meta_schedule_rpc_single_run():
"""Test meta schedule rpc runner for a single run"""
# Build the module
mod = MatmulModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
with LocalRPC() as rpc:
rpc_config = RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key,
session_priority=1,
session_timeout_sec=100,
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = RPCRunner(rpc_config, evaluator_config)
# Run the module
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
_clean_build(builder_result.artifact_path)
def test_meta_schedule_local_single_run():
"""Test meta schedule local runner for a single run"""
# Build the module
mod = MatmulModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = LocalRunner(timeout_sec=100, evaluator_config=evaluator_config)
# Run the module
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
_clean_build(builder_result.artifact_path)
def test_meta_schedule_rpc_multiple_runs():
"""Test meta schedule rpc runner for multiple runs"""
# Build the module
mods = [
MatmulModule,
MatmulReluModule,
BatchMatmulModule,
]
builder = LocalBuilder()
builder_inputs = [BuilderInput(mod, Target("llvm")) for mod in mods]
builder_results = builder.build(builder_inputs)
for builder_result in builder_results:
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
args_infos = [
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
[
TensorInfo("float32", [16, MATMUL_M, MATMUL_M]),
TensorInfo("float32", [16, MATMUL_M, MATMUL_M]),
TensorInfo("float32", [16, MATMUL_M, MATMUL_M]),
],
]
runner_inputs = [
RunnerInput(builder_results[i].artifact_path, "llvm", args_infos[i])
for i in range(len(mods))
]
with LocalRPC() as rpc:
rpc_config = RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key,
session_priority=1,
session_timeout_sec=100,
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = RPCRunner(rpc_config, evaluator_config)
# Run the module
runner_futures = runner.run(runner_inputs)
runner_results = [runner_future.result() for runner_future in runner_futures]
for runner_result in runner_results:
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
for builder_result in builder_results:
_clean_build(builder_result.artifact_path)
def test_meta_schedule_local_multiple_runs():
"""Test meta schedule local runner for multiple runs"""
# Build the module
mods = [
MatmulModule,
MatmulReluModule,
BatchMatmulModule,
]
builder = LocalBuilder()
builder_inputs = [BuilderInput(mod, Target("llvm")) for mod in mods]
builder_results = builder.build(builder_inputs)
for builder_result in builder_results:
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
args_infos = [
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
[
TensorInfo("float32", [16, MATMUL_M, MATMUL_M]),
TensorInfo("float32", [16, MATMUL_M, MATMUL_M]),
TensorInfo("float32", [16, MATMUL_M, MATMUL_M]),
],
]
runner_inputs = [
RunnerInput(builder_results[i].artifact_path, "llvm", args_infos[i])
for i in range(len(mods))
]
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = LocalRunner(timeout_sec=100, evaluator_config=evaluator_config)
# Run the module
runner_futures = runner.run(runner_inputs)
runner_results = [runner_future.result() for runner_future in runner_futures]
for runner_result in runner_results:
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
for builder_result in builder_results:
_clean_build(builder_result.artifact_path)
def test_meta_schedule_py_runner():
"""Test meta schedule PyRunner"""
@derived_object
class TestRunner(PyRunner):
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
raise ValueError("TestRunner")
runner = TestRunner()
with pytest.raises(ValueError, match="TestRunner"):
runner.run([])
def test_meta_schedule_rpc_runner_time_out():
"""Test meta schedule RPC Runner time out by using a super large workload"""
builder = LocalBuilder()
builder_inputs = [BuilderInput(MatmulHugeModule, Target("llvm"))]
builder_results = builder.build(builder_inputs)
builder_results[0].artifact_path
runner_input = RunnerInput(
builder_results[0].artifact_path,
"llvm",
[
TensorInfo("float32", (4096, 4096)),
TensorInfo("float32", (4096, 4096)),
TensorInfo("float32", (4096, 4096)),
],
)
with LocalRPC() as rpc:
rpc_config = RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key,
session_priority=1,
session_timeout_sec=1,
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = RPCRunner(
rpc_config,
evaluator_config,
)
# Run the module
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is not None and runner_result.error_msg.startswith(
"RPCRunner: An exception occurred"
)
assert runner_result.run_secs is None
def test_meta_schedule_local_runner_time_out():
"""Test meta schedule Local Runner time out"""
mod = MatmulModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
def initializer():
@register_func("meta_schedule.runner.test_time_out")
def timeout_session_creator( # pylint: disable=unused-variable
device: Device, # pylint: disable=unused-argument
args_info: T_ARG_INFO_JSON_OBJ_LIST, # pylint: disable=unused-argument
alloc_repeat: int, # pylint: disable=unused-argument
) -> RPCSession:
time.sleep(2)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = LocalRunner(
timeout_sec=1,
evaluator_config=evaluator_config,
initializer=initializer,
f_alloc_argument="meta_schedule.runner.test_time_out",
)
# Run the module
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is not None and runner_result.error_msg.startswith(
"LocalRunner: Timeout, killed after"
)
assert runner_result.run_secs is None
_clean_build(builder_result.artifact_path)
def test_meta_schedule_rpc_runner_exception():
"""Test meta schedule RPC Runner exception"""
def initializer():
@register_func("meta_schedule.runner.test_exception")
def exception_session_creator( # pylint: disable=unused-variable
rpc_config: RPCConfig, # pylint: disable=unused-argument
) -> RPCSession:
raise Exception("Test")
runner_input = RunnerInput(
"test",
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
with LocalRPC() as rpc:
rpc_config = RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key,
session_priority=1,
session_timeout_sec=100,
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = RPCRunner(
rpc_config,
evaluator_config,
initializer=initializer,
f_create_session="meta_schedule.runner.test_exception",
)
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is not None and runner_result.error_msg.startswith(
"RPCRunner: An exception occurred\n"
)
assert runner_result.run_secs is None
def test_meta_schedule_local_runner_exception():
"""Test meta schedule Local Runner exception"""
mod = MatmulModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
def initializer():
@register_func("meta_schedule.runner.test_exception")
def timeout_session_creator( # pylint: disable=unused-variable
device: Device, # pylint: disable=unused-argument
args_info: T_ARG_INFO_JSON_OBJ_LIST, # pylint: disable=unused-argument
alloc_repeat: int, # pylint: disable=unused-argument
) -> RPCSession:
raise Exception("Test")
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = LocalRunner(
evaluator_config=evaluator_config,
initializer=initializer,
f_alloc_argument="meta_schedule.runner.test_exception",
)
# Run the module
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is not None and runner_result.error_msg.startswith(
"LocalRunner: An exception occurred\n"
)
assert runner_result.run_secs is None
_clean_build(builder_result.artifact_path)
def test_meta_schedule_runner_matmul_test():
"""Test meta schedule runner with add module"""
def _check_correct_matmul(
args_before: List[np.ndarray],
args_after: List[np.ndarray],
) -> None:
a_before, b_before, c_before = args_before
a_after, b_after, c_after = args_after
c_before = np.matmul(a_before, b_before)
assert (a_before == a_after).all()
assert (b_before == b_after).all()
tvm.testing.assert_allclose(c_before, c_after, rtol=1e-5)
def test_alloc_argument(
session: RPCSession,
device: Device,
args_info: Any,
alloc_repeat: int,
) -> List[Any]:
global repeated_args_before # pylint: disable=global-variable-undefined, invalid-name
repeated_args_before = [] # type: ignore
repeated_args = rpc_default_alloc_argument(session, device, args_info, alloc_repeat)
for args in repeated_args:
repeated_args_before.append([arg.numpy() for arg in args]) # type: ignore
return repeated_args
def test_run_evaluator(
session: RPCSession, # pylint: disable=unused-argument
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[Any],
) -> List[float]:
global repeated_args_before # pylint: disable=global-variable-undefined, invalid-name
repeated_args_after = []
evaluator = rt_mod.time_evaluator(
func_name=rt_mod.entry_name,
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc="cache_flush_cpu_non_first_arg"
if evaluator_config.enable_cpu_cache_flush
else "",
)
repeated_costs: List[List[float]] = []
for args in repeated_args:
device.sync()
profile_result = evaluator(*args)
repeated_costs.append(profile_result.results)
repeated_args_after.append([arg.numpy() for arg in args])
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
for args_before, args_after in zip(
repeated_args_before, # type: ignore
repeated_args_after,
):
_check_correct_matmul(args_before, args_after)
del repeated_args_before # type: ignore
return costs
# Build the module
mod = MatmulModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
TensorInfo("float32", (MATMUL_N, MATMUL_N)),
],
)
with LocalRPC() as rpc:
rpc_config = RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key,
session_priority=1,
session_timeout_sec=100,
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = RPCRunner(
rpc_config,
evaluator_config,
f_alloc_argument=test_alloc_argument,
f_run_evaluator=test_run_evaluator,
)
# Run the module
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
_clean_build(builder_result.artifact_path)
def test_meta_schedule_runner_add_test():
"""Test meta schedule runner with add module"""
def _check_correct_add(args_before: List[np.ndarray], args_after: List[np.ndarray]) -> None:
a_before, b_before, c_before = args_before
a_after, b_after, c_after = args_after
c_before = a_before + b_before
assert (a_before == a_after).all()
assert (b_before == b_after).all()
assert (c_before == c_after).all()
def test_alloc_argument(
session: RPCSession,
device: Device,
args_info: Any,
alloc_repeat: int,
) -> List[Any]:
global repeated_args_before # pylint: disable=global-variable-undefined, invalid-name
repeated_args_before = [] # type: ignore
repeated_args = rpc_default_alloc_argument(
session,
device,
args_info,
alloc_repeat,
)
for args in repeated_args:
repeated_args_before.append([arg.numpy() for arg in args]) # type: ignore
return repeated_args
def test_run_evaluator(
session: RPCSession, # pylint: disable=unused-argument
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[Any],
) -> List[float]:
global repeated_args_before # pylint: disable=global-variable-undefined, invalid-name
repeated_args_after = []
evaluator = rt_mod.time_evaluator(
func_name=rt_mod.entry_name,
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc="cache_flush_cpu_non_first_arg"
if evaluator_config.enable_cpu_cache_flush
else "",
)
repeated_costs: List[List[float]] = []
for args in repeated_args:
device.sync()
profile_result = evaluator(*args)
repeated_costs.append(profile_result.results)
repeated_args_after.append([arg.numpy() for arg in args])
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
for args_before, args_after in zip(
repeated_args_before, # type: ignore
repeated_args_after,
):
_check_correct_add(args_before, args_after)
del repeated_args_before # type: ignore
return costs
# Build the module
mod = AddModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", [MATMUL_M]),
TensorInfo("float32", [MATMUL_M]),
TensorInfo("float32", [MATMUL_M]),
],
)
with LocalRPC() as rpc:
rpc_config = RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key,
session_priority=1,
session_timeout_sec=100,
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = RPCRunner(
rpc_config,
evaluator_config,
f_alloc_argument=test_alloc_argument,
f_run_evaluator=test_run_evaluator,
)
# Run the module
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
_clean_build(builder_result.artifact_path)
def test_meta_schedule_local_runner_add_test():
"""Test meta schedule local runner with add module"""
def _check_correct_add(args_before: List[np.array], args_after: List[np.array]) -> None:
a_before, b_before, c_before = args_before
a_after, b_after, c_after = args_after
c_before = a_before + b_before
assert (a_before == a_after).all()
assert (b_before == b_after).all()
assert (c_before == c_after).all()
def test_alloc_argument(
device: Device,
args_info: T_ARG_INFO_JSON_OBJ_LIST, # pylint: disable=unused-argument
alloc_repeat: int,
) -> List[T_ARGUMENT_LIST]:
global repeated_args_before # pylint: disable=global-variable-undefined, invalid-name
repeated_args_before = []
repeated_args = local_default_alloc_argument(device, args_info, alloc_repeat)
for args in repeated_args:
repeated_args_before.append([arg.asnumpy() for arg in args])
return repeated_args
def test_run_evaluator(
rt_mod: Module,
device: Device,
evaluator_config: EvaluatorConfig,
repeated_args: List[Any],
) -> List[float]:
global repeated_args_before # pylint: disable=global-variable-undefined, invalid-name
repeated_args_after = []
evaluator = rt_mod.time_evaluator(
func_name=rt_mod.entry_name,
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc="cache_flush_cpu_non_first_arg"
if evaluator_config.enable_cpu_cache_flush
else "",
)
repeated_costs: List[List[float]] = []
for args in repeated_args:
device.sync()
profile_result = evaluator(*args)
repeated_costs.append(profile_result.results)
repeated_args_after.append([arg.asnumpy() for arg in args])
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
for args_before, args_after in zip(repeated_args_before, repeated_args_after):
_check_correct_add(args_before, args_after)
del repeated_args_before
return costs
# Build the module
mod = AddModule
builder = LocalBuilder()
(builder_result,) = builder.build([BuilderInput(mod, Target("llvm"))])
assert builder_result.artifact_path is not None
assert builder_result.error_msg is None
runner_input = RunnerInput(
builder_result.artifact_path,
"llvm",
[
TensorInfo("float32", [MATMUL_M]),
TensorInfo("float32", [MATMUL_M]),
TensorInfo("float32", [MATMUL_M]),
],
)
evaluator_config = EvaluatorConfig(
number=1,
repeat=1,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner = LocalRunner(
timeout_sec=100,
evaluator_config=evaluator_config,
f_alloc_argument=test_alloc_argument,
f_run_evaluator=test_run_evaluator,
)
# Run the module
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
_clean_build(builder_result.artifact_path)
if __name__ == "__main__":
tvm.testing.main()
| 31,353 | 33.568908 | 122 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_graph_cuda_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import re
import sys
import time
import pytest
import tvm
import tvm.testing
from tvm import te
import numpy as np
from tvm.contrib import utils, graph_executor
from tvm.contrib.cuda_graph import cuda_graph_executor
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
@tvm.testing.requires_cudagraph
def test_graph_simple():
n = 32
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=8)
s[B].bind(xo, bx)
s[B].bind(xi, tx)
node0 = {"op": "null", "name": "x", "inputs": []}
node1 = {
"op": "tvm_op",
"name": "add",
"inputs": [[0, 0, 0]],
"attrs": {"func_name": "myadd", "flatten_data": "1", "num_inputs": "1", "num_outputs": "1"},
}
nodes = [node0, node1]
arg_nodes = [0]
node_row_ptr = [0, 1, 2]
outputs = [[1, 0, 0]]
shape = (n,)
attrs = {
"shape": ["list_shape", [shape, shape]],
"dltype": ["list_str", ["float32", "float32"]],
"storage_id": ["list_int", [0, 1]],
}
graph = {
"nodes": nodes,
"arg_nodes": arg_nodes,
"node_row_ptr": node_row_ptr,
"heads": outputs,
"attrs": attrs,
}
graph = json.dumps(graph)
def check_verify():
mlib = tvm.build(s, [A, B], "cuda", name="myadd")
dev = tvm.cuda(0)
try:
mod = cuda_graph_executor.create(graph, mlib, dev)
except ValueError:
return
for i in range(3):
a = np.random.uniform(size=(n,)).astype(A.dtype)
mod.run(x=a) # The first run captured a CUDA graph
out = mod.get_output(0, tvm.nd.empty((n,)))
np.testing.assert_equal(out.numpy(), a + 1)
# capture / run CUDA graph manually
mod.capture_cuda_graph()
a = np.random.uniform(size=(n,)).astype(A.dtype)
mod.set_input(x=a)
mod.run_cuda_graph()
out = mod.get_output(0, tvm.nd.empty((n,)))
np.testing.assert_equal(out.numpy(), a + 1)
check_verify()
if __name__ == "__main__":
test_graph_simple()
| 2,993 | 28.643564 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_remove_no_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.script import tir as T
import tvm.testing
import pytest
def nop():
return tvm.tir.Evaluate(1)
def test_remove_no_op():
i = te.var("i")
j = te.var("j")
k = te.var("k")
m = te.var("m")
n = te.var("n")
dtype = "int64"
Ab = tvm.tir.decl_buffer((n,), dtype)
stmt = tvm.tir.For(
i,
0,
4,
tvm.tir.ForKind.SERIAL,
tvm.tir.For(
j,
0,
n,
tvm.tir.ForKind.SERIAL,
tvm.tir.For(
k,
0,
m,
tvm.tir.ForKind.SERIAL,
tvm.tir.IfThenElse((i * m + j + k < n), tvm.tir.Evaluate(m), tvm.tir.Evaluate(n)),
),
),
)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt))
ret = tvm.tir.transform.RemoveNoOp()(mod)["main"].body
assert isinstance(ret, tvm.tir.Evaluate)
store = tvm.tir.BufferStore(Ab, tvm.tir.BufferLoad(Ab, [i]) + 1, [i + 1])
stmt2 = tvm.tir.SeqStmt([nop(), tvm.tir.SeqStmt([store, nop()])])
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt2))
ret = tvm.tir.transform.RemoveNoOp()(mod)["main"].body
assert ret == store
# remove zero extent loop
stmt3 = tvm.tir.For(i, 0, 0, tvm.tir.ForKind.SERIAL, store)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt3))
ret = tvm.tir.transform.RemoveNoOp()(mod)["main"].body
assert isinstance(ret, tvm.tir.Evaluate)
def test_remove_no_op_with_invalid_extent():
@T.prim_func
def main(A: T.Buffer((16), "int32"), B: T.Buffer((16), "int32")) -> None:
for i in T.serial(16):
for j in T.serial(i - 20):
B[i] = A[i] + j
mod = tvm.ir.module.IRModule.from_expr(main)
ret = tvm.tir.transform.RemoveNoOp()(mod)["main"].body
assert isinstance(ret, tvm.tir.Evaluate)
class BaseBeforeAfter(tvm.testing.CompareBeforeAfter):
use_dataflow_analysis = False
max_simplification_steps = 0
def transform(self):
def inner(mod):
config = {
"tir.RemoveNoOp": {
"use_dataflow_analysis": self.use_dataflow_analysis,
"max_simplification_steps": self.max_simplification_steps,
}
}
with tvm.transform.PassContext(config=config):
mod = tvm.tir.transform.RemoveNoOp()(mod)
return mod
return inner
class TestRemoveEmptyForLoop(BaseBeforeAfter):
"""A for-loop whose body is a no-op is itself a no-op."""
def before():
for i in T.serial(16):
T.evaluate(0)
def expected():
T.evaluate(0)
class TestRemoveZeroExtentLoop(BaseBeforeAfter):
"""A for-loop with no extent is a no-op."""
def before(A: T.Buffer(16, "int32")):
for i in T.serial(0):
A[i] = 42
def expected(A: T.Buffer(16, "int32")):
T.evaluate(0)
class TestRemoveUnusedLet(BaseBeforeAfter):
"""A let statement that is never used is a no-op."""
def before(A: T.Buffer(16, "int32")):
x = 5
for i in T.serial(16):
A[i] = 0
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 0
class TestRemoveLetUsedOnlyInNoOp(BaseBeforeAfter):
"""A let statement that is never used is a no-op.
Similar to TestRemoveUnusedLet, but the usage of the let binding
may have been removed by an earlier removal of another no-op.
"""
def before(A: T.Buffer(16, "int32")):
x = 5
for i in T.serial(0):
A[i] = x
def expected(A: T.Buffer(16, "int32")):
T.evaluate(0)
class TestKeepSideEffectsOfLet(BaseBeforeAfter):
"""The side effects of a no-op let must be kept."""
def before():
x = T.call_extern("extern_func", dtype="int32")
T.evaluate(0)
def expected():
T.evaluate(T.call_extern("extern_func", dtype="int32"))
class TestRemoveEmptyThenCase(BaseBeforeAfter):
"""A no-op then_case can be removed."""
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i < 8:
T.evaluate(0)
else:
A[i] = 42
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if not (i < 8):
A[i] = 42
class TestRemoveEmptyElseCase(BaseBeforeAfter):
"""A no-op else_case can be removed."""
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i < 8:
A[i] = 42
else:
T.evaluate(0)
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i < 8:
A[i] = 42
class TestRemoveUnusedWrite(BaseBeforeAfter):
"""For two sequential writes, the first is a no-op"""
use_dataflow_analysis = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 100
A[i] = 42
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 42
class TestSuppressRemovalOfUnusedWrite(BaseBeforeAfter):
"""Dataflow analysis requires the config to opt-in
Like TestRemoveUnusedWrite, but dataflow analysis isn't enabled.
"""
use_dataflow_analysis = False
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 100
A[i] = 42
expected = before
class TestKeepSideEffectsOfUnusedWrite(BaseBeforeAfter):
"""For two sequential writes, the first value may have side effects"""
use_dataflow_analysis = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = T.call_extern("extern_func", dtype="int32")
A[i] = 42
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
T.evaluate(T.call_extern("extern_func", dtype="int32"))
A[i] = 42
class TestKeepFirstWriteWhenUsed(BaseBeforeAfter):
"""For two sequential writes, keep the first if it is used"""
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 100
A[i] = A[i] + 1
expected = before
class TestRemoveOverwrittenLoop(BaseBeforeAfter):
"""Remove repeated writes to the same region
If two loops write to the same region, the first is a no-op.
"""
use_dataflow_analysis = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 100
for i in T.serial(16):
A[i] = 42
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 42
class TestRemoveOverwrittenSubloop(BaseBeforeAfter):
"""Remove repeated writes to the same region
If the first loop writes to a subset of the region, the first loop
is a no-op. Similar to TestRemoveOverwrittenLoop, but the first
loop's extents are a subset of the second loop.
"""
use_dataflow_analysis = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(4, 12):
A[i] = 100
for i in T.serial(16):
A[i] = 42
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 42
class TestKeepPartiallyOverwrittenLoop(BaseBeforeAfter):
"""Keep partially overwritten regions
If the second loop doesn't entirely overwrite the first, the first
may not be removed be kept.
"""
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 100
for i in T.serial(16):
if i < 12:
A[i] = 42
expected = before
class TestRemoveOverwrittenPredicatedLoopWithIdenticalCondition(BaseBeforeAfter):
"""Remove repeated writes to the same predicated region.
Similar to TestKeepPartiallyOverwrittenLoop, except the first loop
has the same predicate as the second, and can therefore be
removed.
In the past, this test has had performance regressions in which
the runtime increased from a few seconds to nearly ten minutes.
The "max_simplification_steps" parameter is set at twice the
current number of steps required, in order to prevent similar
performance regression.
"""
use_dataflow_analysis = True
max_simplification_steps = 200000
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i < 12:
A[i] = 100
for i in T.serial(16):
if i < 12:
A[i] = 42
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i < 12:
A[i] = 42
class TestRemoveOverwrittenPredicatedLoopWithProvableCondition(BaseBeforeAfter):
"""Remove repeated writes to the same predicated region.
Similar to
TestRemoveOverwrittenPredicatedLoopWithIdenticalCondition, except
the first loop's predicate is not a precise match for the second
loop's predicate. So long as the regions written in the first
loop are a subset of those written in the second loop, they can be
removed.
In the past, this test has had performance regressions in which
the runtime increased from a few seconds to nearly ten minutes.
The "max_simplification_steps" parameter is set at twice the
current number of steps required, in order to prevent similar
performance regression.
"""
use_dataflow_analysis = True
max_simplification_steps = 200000
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i < 10:
A[i] = 100
for i in T.serial(16):
if i // 4 < 3:
A[i] = 42
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i // 4 < 3:
A[i] = 42
class TestRemoveSeparatedOverwrites(BaseBeforeAfter):
"""Remove repeated writes to the same predicated region.
Similar to TestRemoveOverwrittenLoopRegion, but with an
independent loop between the first and second write of the buffer.
"""
use_dataflow_analysis = True
def before(A: T.Buffer(16, "int32"), B: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 100
for i in T.serial(16):
B[i] = 0
for i in T.serial(16):
A[i] = 42
def expected(A: T.Buffer(16, "int32"), B: T.Buffer(16, "int32")):
for i in T.serial(16):
B[i] = 0
for i in T.serial(16):
A[i] = 42
@pytest.mark.xfail(reason="Not implemented yet")
class TestRemoveSeparatedOverwriteOfPredicatedLoop(BaseBeforeAfter):
"""Remove repeated writes to the same predicated region.
Similar to TestRemoveSeparatedOverwrites, but the independent loop
between the first and second writes writes to a different subset
of the same buffer.
"""
use_dataflow_analysis = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i < 12:
A[i] = 100
for i in T.serial(16):
if i > 12:
A[i] = 15
for i in T.serial(16):
if i < 12:
A[i] = 42
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i > 12:
A[i] = 15
for i in T.serial(16):
if i < 12:
A[i] = 42
class TestRemoveReadWrite(BaseBeforeAfter):
"""Writing a value to the same location as was just read is a no-op."""
def before(A: T.Buffer(1, "int32")):
A[0] = A[0]
def expected(A: T.Buffer(1, "int32")):
T.evaluate(0)
class TestKeepReadWriteToDifferentIndices(BaseBeforeAfter):
"""Writing a value to a different index should not be removed"""
def before(A: T.Buffer(16, "int32")):
for i in T.serial(15):
A[i] = A[i + 1]
expected = before
class TestRemoveReadWriteSameIndexDifferentExpression(BaseBeforeAfter):
"""Writing a value to the same location as the read is a no-op.
If the value of the index can be proven to be the same, then the
no-op can be removed, even if they have different forms of the
expression.
"""
def before(A: T.Buffer(16, "int32")):
for io, ii in T.grid(4, 4):
i = 4 * io + ii
A[4 * io + ii] = A[i]
def expected(A: T.Buffer(16, "int32")):
T.evaluate(0)
class TestRemoveReadWriteSameIndexUsingConstraint(BaseBeforeAfter):
"""Writing a value to the same location as the read is a no-op.
If the value of the index can be proven to be the same, then the
no-op can be removed. This may require using the a constraint
that is known from a conditional containing the read/write.
"""
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i != 0:
A[i] = A[i - 1]
else:
A[i] = A[0]
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
if i != 0:
A[i] = A[i - 1]
class TestRemoveWritingOfKnownValue(BaseBeforeAfter):
"""Writing a value that already exists at that index is a no-op"""
use_dataflow_analysis = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = i
A[4] = 4
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = i
class TestKeepOneOfDuplicateLoops(BaseBeforeAfter):
"""Must not reason based on a touch point after removing it.
If the first loop is removed because it is overwritten by the
second loop, and the second loop is removed because it writes the
same value as the first loop, the overall transformation is no
longer valid. In this case, only one of the two should be
removed.
"""
use_dataflow_analysis = True
def before(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = i
for i in T.serial(16):
A[i] = i
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = i
class TestRemoveEmptyTemporary(BaseBeforeAfter):
"""An allocation with a no-op body is a no-op."""
def before():
A = T.allocate([16], "int32", "local")
T.evaluate(0)
def expected():
T.evaluate(0)
class TestRemoveEmptyTemporaryWithDeclBuffer(BaseBeforeAfter):
"""Remove DeclBuffer alongside Allocate
If an unused allocation is removed, any DeclBuffer instances that
refer to it should also be removed.
"""
def before():
A = T.decl_buffer([4, 4], "int32", scope="local")
A_flat = T.decl_buffer(16, "int32", scope="local", data=A.data)
T.evaluate(0)
def expected():
T.evaluate(0)
@pytest.mark.xfail(reason="Not implemented yet")
class TestRemoveUnusedTemporary(BaseBeforeAfter):
"""An unused allocation is a no-op."""
def before(A: T.Buffer(16, "int32")):
B = T.allocate([16], "int32", "local")
for i in T.serial(16):
A[i] = 1
def expected(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 1
@pytest.mark.xfail(reason="Not implemented yet")
class TestRemoveUnusedWriteIntoTemporary(BaseBeforeAfter):
"""A write that only impacts a temporary allocation is a no-op."""
def before():
A = T.decl_buffer([16], "int32", scope="local")
for i in T.serial(16):
A[i] = 0
def expected():
T.evaluate(0)
class TestKeepUsedWriteIntoTemporary(BaseBeforeAfter):
"""A write into a temporary that is used later must be kept."""
def before(B: T.Buffer(16, "int32")):
A = T.decl_buffer([16], "int32", scope="local")
for i in T.serial(16):
A[i] = 0
for i in T.serial(16):
B[i] = A[i]
expected = before
@pytest.mark.xfail(reason="Not implemented yet")
class TestRemoveWriteIntoTemporary(BaseBeforeAfter):
"""A write that only impacts a temporary allocation is a no-op."""
def before(A: T.Buffer(16, "int32"), C: T.Buffer(1, "int32")):
B = T.decl_buffer([16], "int32", scope="local")
for i in T.serial(16):
B[i] = A[i]
C[0] = 0
for i in T.serial(16):
C[0] = C[0] + B[i]
for i in T.serial(16):
B[i] = 0
def expected(A: T.Buffer(16, "int32"), C: T.Buffer(1, "int32")):
B = T.decl_buffer([16], "int32", scope="local")
for i in T.serial(16):
B[i] = A[i]
C[0] = 0
for i in T.serial(16):
C[0] = C[0] + B[i]
class TestCertainConditon(BaseBeforeAfter):
"""The conditon of the If-Else node is certain.
This would cause `Segmentation fault` error before."""
def before():
if True:
T.evaluate(0)
else:
T.evaluate(0)
def expected():
T.evaluate(0)
if __name__ == "__main__":
tvm.testing.main()
| 17,847 | 26.29052 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_tir_analysis_detect_buffer_access_lca.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import tir
from tvm.script import tir as T
@T.prim_func
def buffer_load_store_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (128, 128), "float32")
C = T.alloc_buffer((128, 128), "float32")
D = T.alloc_buffer((128, 128), "float32")
for ii, jj in T.grid(128, 128):
with T.block():
i, j = T.axis.remap("SS", [ii, jj])
A[i, j] = T.float32(0)
for i0, j0, k0 in T.grid(32, 32, 32):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
with T.init():
for ii, jj in T.grid(4, 4):
B[i * 4 + ii, j * 4 + jj] = A[i * 4 + ii, j * 4 + jj]
for ii, jj in T.grid(4, 4):
for kk in range(0, 4):
B[i * 4 + ii, j * 4 + jj] += C[i * 4 + ii, k * 4 + kk]
for kk in range(0, 4):
B[i * 4 + ii, j * 4 + jj] += (
D[j * 4 + jj, k * 4 + kk] * C[i * 4 + ii, k * 4 + kk]
)
@T.prim_func
def buffer_opaque_access(b: T.handle, c: T.handle) -> None:
B = T.match_buffer(b, [16, 16], "float32")
C = T.match_buffer(c, [16, 16], "float32")
with T.block():
T.reads([])
T.writes(B[0:16, 0:16])
A = T.decl_buffer([256], "float32")
for i, j in T.grid(16, 16):
A[i * 16 + j] = 1
for i in range(0, 16):
for j in range(0, 16):
T.evaluate(A[i * 16 + j])
for j in range(0, 16):
T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, T.float32(0), dtype="handle"))
for i, j in T.grid(16, 16):
with T.block():
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj]
@T.prim_func
def lca_is_func_root(a: T.handle) -> None:
A = T.match_buffer(a, [0, 0], "float32")
A[0, 0] = 1.0
@T.prim_func
def match_buffer_func(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (128, 128), "float32")
for i, j in T.grid(8, 8):
with T.block("block"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi * 16 + 2 : vi * 16 + 12, vj * 16 + 2 : vj * 16 + 16])
T.writes(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
B0 = T.match_buffer(B[vi * 16 + 2 : vi * 16 + 6, vj * 16 + 2 : vj * 16 + 6], (4, 4))
B1 = T.match_buffer(B[vi * 16 + 8 : vi * 16 + 12, vj * 16 + 8 : vj * 16 + 16], (4, 8))
for ii, jj in T.grid(16, 16):
with T.block("AAA"):
vii, vjj = T.axis.remap("SS", [ii, jj])
AA = T.match_buffer(A[vii, vjj], ())
AA[()] = 1.0
T.evaluate(B0.data)
T.evaluate(B1.data)
@T.prim_func
def global_buffer_with_blockidx(
a: T.Buffer((1, 32), "int32"), b: T.Buffer((1, 32), "int32")
) -> None:
for i0 in T.thread_binding(0, 1, thread="blockIdx.x"):
for i1 in T.thread_binding(0, 32, thread="threadIdx.x"):
with T.block("copy"):
i, j = T.axis.remap("SS", [i0, i1])
T.reads(a[i, j])
T.writes(b[i, j])
b[i, j] = a[i, j]
def test_buffer_load_store():
func = buffer_load_store_func
A, B = [func.buffer_map[x] for x in func.params]
C, D = func.body.block.alloc_buffers
lca = tir.analysis.detect_buffer_access_lca(func)
# LCA of Buffer A is root
root_block = func.body.block
assert lca[A] == func.body.block
# LCA of Buffer B is reduction block
reduce_block = root_block.body[1].body.body.body.block
assert lca[B] == reduce_block
# LCA of Buffer C is the second loop kk
loop_jj = reduce_block.body.body
assert lca[C] == loop_jj
# LCA of Buffer D is loop jj
loop_kk = loop_jj.body[1]
assert lca[D] == loop_kk
def test_opaque_access():
func = buffer_opaque_access
B, C = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
# Cannot detect buffer A since it is define by low-level Allocate
# LCA of Buffer B is root
root_block = func.body.block
assert lca[B] == func.body.block
# LCA of Buffer C is the correspond block
assert lca[C] == root_block.body[1].body.body.block
def test_lca_func_root():
func = lca_is_func_root
(A,) = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
assert lca[A] is None
def test_match_buffer():
func = match_buffer_func
A, B = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
root_block = func.body.block
block = root_block.body.body.body.block
block_inner = block.body[0].body.body.block
# LCA of Buffer C is the inner block
assert lca[A] == block_inner
# LCA of Buffer C is the main block
assert lca[B] == block
def test_global_buffer_with_blockidx():
func = global_buffer_with_blockidx
A, B = [func.buffer_map[x] for x in func.params]
lca = tir.analysis.detect_buffer_access_lca(func)
root_block = func.body.block
blockidx_loop = root_block.body
# LCA of both A and B should be the loop bound to `blockIdx`
assert lca[A] == blockidx_loop
assert lca[B] == blockidx_loop
if __name__ == "__main__":
test_buffer_load_store()
test_opaque_access()
test_lca_func_root()
test_match_buffer()
test_global_buffer_with_blockidx()
| 6,360 | 32.835106 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_container_structural_equal.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.testing
from tvm.ir.base import get_first_structural_mismatch
from tvm.runtime import ObjectPath
def get_first_mismatch_ensure_symmetry(a, b):
mismatch = get_first_structural_mismatch(a, b)
mismatch_swapped = get_first_structural_mismatch(b, a)
if mismatch is None and mismatch_swapped is None:
return None
if (
mismatch is None
or mismatch_swapped is None
or mismatch[0] != mismatch_swapped[1]
or mismatch[1] != mismatch_swapped[0]
):
raise AssertionError(
"get_first_structural_mismatch(a, b) and get_first_structural_mismatch(b, a) returned"
" inconsistent results '{}' and '{}' for a='{}', b='{}'".format(
mismatch, mismatch_swapped, a, b
)
)
a_path, b_path = mismatch
b_path_swapped, a_path_swapped = mismatch_swapped
assert a_path == a_path_swapped
assert b_path == b_path_swapped
return mismatch
@pytest.mark.parametrize(
"a, b, expected_a_path, expected_b_path",
[
(
[1, 2, 3],
[1, 4, 3],
ObjectPath.root().array_index(1).attr("value"),
ObjectPath.root().array_index(1).attr("value"),
),
(
[1, 2, 3],
[10, 2, 30],
ObjectPath.root().array_index(0).attr("value"),
ObjectPath.root().array_index(0).attr("value"),
),
(
[1, 3, 4],
[1, 2, 3, 4],
ObjectPath.root().array_index(1).attr("value"),
ObjectPath.root().array_index(1).attr("value"),
),
(
[1, 2, 3],
[1, 2, 3, 4],
ObjectPath.root().missing_array_element(3),
ObjectPath.root().array_index(3),
),
(
[],
[1],
ObjectPath.root().missing_array_element(0),
ObjectPath.root().array_index(0),
),
],
)
def test_array_structural_mismatch(a, b, expected_a_path, expected_b_path):
a = tvm.runtime.convert(a)
b = tvm.runtime.convert(b)
a_path, b_path = get_first_mismatch_ensure_symmetry(a, b)
assert a_path == expected_a_path
assert b_path == expected_b_path
@pytest.mark.parametrize(
"contents",
[
[],
[1],
[1, 2, 3],
],
)
def test_array_structural_equal_to_self(contents):
a = tvm.runtime.convert(list(contents))
b = tvm.runtime.convert(list(contents))
assert get_first_mismatch_ensure_symmetry(a, b) is None
@pytest.mark.parametrize(
"contents",
[
[],
[1],
[1, 2, 3],
],
)
def test_shape_tuple_structural_equal_to_self(contents):
a = tvm.runtime.ShapeTuple(list(contents))
b = tvm.runtime.ShapeTuple(list(contents))
assert get_first_mismatch_ensure_symmetry(a, b) is None
@pytest.mark.parametrize(
"a, b, expected_a_path, expected_b_path",
[
(
dict(a=3, b=4),
dict(a=3, b=5),
ObjectPath.root().map_value("b").attr("value"),
ObjectPath.root().map_value("b").attr("value"),
),
(
dict(a=3, b=4),
dict(a=3, b=4, c=5),
ObjectPath.root().missing_map_entry(),
ObjectPath.root().map_value("c"),
),
],
)
def test_string_map_structural_mismatch(a, b, expected_a_path, expected_b_path):
a = tvm.runtime.convert(a)
b = tvm.runtime.convert(b)
a_path, b_path = get_first_mismatch_ensure_symmetry(a, b)
assert a_path == expected_a_path
assert b_path == expected_b_path
@pytest.mark.parametrize(
"contents",
[
dict(),
dict(a=1),
dict(a=3, b=4, c=5),
],
)
def test_string_structural_equal_to_self(contents):
a = tvm.runtime.convert(dict(contents))
b = tvm.runtime.convert(dict(contents))
assert get_first_mismatch_ensure_symmetry(a, b) is None
# The behavior of structural equality for maps with non-string keys is fairly specific
# to IR variables because it assumes that map keys have been "mapped" using
# `SEqualReducer::FreeVarEqualImpl()`. So we leave this case to TIR tests.
if __name__ == "__main__":
tvm.testing.main()
| 5,011 | 28.482353 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_auto_scheduler_measure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test measurement and log serialization. """
import json
import multiprocessing
import numpy as np
import tvm
from tvm import topi
from tvm import te, auto_scheduler
import tempfile
import tvm.testing
import pickle
from tvm.testing.auto_scheduler import matmul_auto_scheduler_test
from tvm.auto_scheduler import workload_registry
def record_common(dag, s):
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key="test", target=target)
inp = auto_scheduler.measure.MeasureInput(task, s)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
# Test in-memory record processing.
record_str = auto_scheduler.measure_record.dump_record_to_string(inp, res)
r_inp, r_res = auto_scheduler.measure_record.load_record_from_string(record_str)
# Only check the workload_key for simplification.
assert inp.task.workload_key == r_inp.task.workload_key
assert str(res) == str(r_res)
# Test file-based record processing.
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
s1 = dag.infer_bound_from_state(s)
s2 = dag.infer_bound_from_state(inputs[0].state)
assert s1 == s2
assert not (s1 == dag.get_init_state())
def test_record_split_reorder_fuse_annotation():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Split
its0 = s.split(C, s[C].iters[0], [4, 8, 8])
its1 = s.split(C, s[C].iters[4], [8, 4, 4])
# Reorder
s.reorder(
C, [its0[0], its1[0], its0[1], its1[1], its0[2], its1[2], its0[3], s[C].iters[8], its1[3]]
)
# Fuse
s.fuse(C, [s[C].iters[0], s[C].iters[1], s[C].iters[2]])
# Parallel
s.parallel(C, s[C].iters[0])
# Thread bind(The blockIdx & threadIdx are used in GPU, just for record testing here)
s.bind(C, s[C].iters[1], "blockIdx.x")
s.bind(C, s[C].iters[2], "threadIdx.z")
s.bind(C, s[C].iters[3], "vthread")
# Unroll
s.unroll(C, s[C].iters[4])
# Vectorize
s.vectorize(C, s[C].iters[6])
record_common(dag, s)
def test_record_compute_at_root_inline_cache_read_write():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
AA = topi.nn.relu(A)
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(AA[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Cache Write
C_shared = s.cache_write(C, "shared")
# Compute At
s.compute_at(C_shared, C, s[C].iters[0])
# Cache Read
B_global = s.cache_read(B, "global", [C_shared])
s.compute_at(B_global, C_shared, s[C_shared].iters[2])
# Compute Inline
s.compute_inline(AA)
# Compute Root
s.compute_root(C_shared)
record_common(dag, s)
def test_record_follow_split_follow_fused_split():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
D = topi.nn.relu(C)
E = topi.nn.relu(D)
dag = auto_scheduler.ComputeDAG([A, B, E])
s = dag.get_init_state()
# Follow Split
s.split(C, s[C].iters[0], [4, 2, 8, 4], True)
split_step0 = len(s.transform_steps) - 1
s.follow_split(C, s[C].iters[5], split_step0, 4)
# Follow Fused Split
its0 = s.split(E, s[E].iters[0], [4, 2, 8, 4], True)
split_step1 = len(s.transform_steps) - 1
its1 = s.split(E, s[E].iters[5], [2, 4, 2, 4], True)
split_step2 = len(s.transform_steps) - 1
its = []
for i0, i1 in zip(its0, its1):
its.append(i0)
its.append(i1)
for i in range(0, 5):
s.fuse(E, [s[E].iters[i], s[E].iters[i + 1]])
s.follow_fused_split(D, s[D].iters[0], [split_step1, split_step2], 2, True)
record_common(dag, s)
def test_record_pragma_storage_align_rfactor():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Rfactor
ko, _ = s.split(C, s[C].iters[2], [16])
s.rfactor(C, ko, 2)
# Pragma
s.pragma(C, s[C].iters[0], "auto_unroll_max_step$64")
# StorageAlign
s.storage_align(C, s[C].iters[-1], 8, 4)
record_common(dag, s)
def test_recover_measure_input():
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
raw_inp = inputs[0]
correct_inp = auto_scheduler.measure.recover_measure_input(raw_inp)
assert str(correct_inp.task.compute_dag) == str(inp.task.compute_dag)
correct_inp = auto_scheduler.measure.recover_measure_input(raw_inp, rebuild_state=True)
assert str(correct_inp.state) == str(inp.state)
def test_workload_dis_factor():
calc = auto_scheduler.utils.calc_workload_dis_factor
decode = auto_scheduler.utils.decode_workload_key
# Identical
target_wkl_key = json.dumps(
["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "float32"]
)
assert calc(decode(target_wkl_key), decode(target_wkl_key)) == 1
# Compatible with a factor
wkl_key = json.dumps(["func1", [1, 3, 112, 112], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == 8 * 2 * 2
# Incompatible argument with zeros
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [1, 1], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [0, 0], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
# Incompatible non-integter argument
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "int8"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
# Incompatible function
wkl_key = json.dumps(["func2", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
# Incompatible due to non-dividable factor
wkl_key = json.dumps(["func1", [8, 3, 223, 223], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
def test_measure_local_builder_runner():
if not tvm.testing.device_enabled("llvm"):
return
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
def test_dag_measure_local_builder_runner():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
D = topi.nn.relu(C)
E = topi.nn.relu(D)
tensors = [A, B, E]
dag = auto_scheduler.ComputeDAG(tensors)
key = workload_registry.register_workload_tensors(dag.workload_key(), tensors)
transfer_data = workload_registry.serialize_workload_registry_entry(key)
f_data = pickle.dumps(transfer_data)
f_new = pickle.loads(f_data)
del workload_registry.WORKLOAD_FUNC_REGISTRY[key]
workload_registry.deserialize_workload_registry_entry(f_new)
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key=key, target=target)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
def test_workload_serialization():
key = tvm.auto_scheduler.utils.get_func_name(matmul_auto_scheduler_test)
transfer_data = workload_registry.serialize_workload_registry_entry(key)
f_data = pickle.dumps(transfer_data)
f_new = pickle.loads(f_data)
del workload_registry.WORKLOAD_FUNC_REGISTRY[key]
workload_registry.deserialize_workload_registry_entry(f_new)
def test_measure_local_builder_rpc_runner():
if not tvm.testing.device_enabled("llvm"):
return
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
rpc_runner = measure_ctx.runner
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = rpc_runner.run([minp], bress)
assert mress[0].error_no == 0
del measure_ctx
def measure_local_builder_rpc_runner_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_measure_local_builder_rpc_runner()
@tvm.testing.requires_llvm
def test_measure_local_builder_rpc_runner_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=measure_local_builder_rpc_runner_spawn)
p.start()
p.join()
@tvm.testing.requires_llvm
def test_measure_target_host():
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target=tvm.target.Target("llvm", "llvm -mtriple=aarch64-linux-gnu"),
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
raw_inp = inputs[0]
recovered_inp = auto_scheduler.measure.recover_measure_input(raw_inp)
assert str(recovered_inp.task.target.host) == str(inp.task.target.host)
@tvm.testing.requires_llvm
def test_measure_special_inputs_map_by_name_local_runner():
@auto_scheduler.register_workload
def foo():
X = te.placeholder(shape=[10], dtype="int32")
Index = te.placeholder(shape=[1], dtype="int32", name="Index")
Y = te.compute((1,), lambda i: X[Index[i]])
return [X, Index, Y]
# This workload cannot use random input for the `Index` input
task = auto_scheduler.SearchTask(
func=foo,
target="llvm",
task_inputs={
"Index": tvm.nd.array(np.array([5], dtype="int32")),
},
)
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(timeout=10)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
@tvm.testing.requires_llvm
def test_measure_special_inputs_map_by_name_rpc_runner():
@auto_scheduler.register_workload
def foo():
X = te.placeholder(shape=[10], dtype="int32")
Index = te.placeholder(shape=[1], dtype="int32", name="Index")
Y = te.compute((1,), lambda i: X[Index[i]])
return [X, Index, Y]
# This workload cannot use random input for the `Index` input
task = auto_scheduler.SearchTask(
func=foo,
target="llvm",
task_inputs={
"Index": tvm.nd.array(np.array([5], dtype="int32")),
},
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
rpc_runner = measure_ctx.runner
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = rpc_runner.run([minp], bress)
assert mress[0].error_no == 0
if __name__ == "__main__":
tvm.testing.main()
| 15,000 | 34.049065 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_lower_tvm_builtin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm.script import tir as T
import numpy as np
@tvm.register_func("tvm.test_matmul")
def my_matmul(a, b, c):
c.copyfrom(np.dot(a.numpy(), b.numpy()))
def check_packed_func(target="llvm"):
ib = tvm.tir.ir_builder.create()
m = n = k = 16
#
# Prepare buffer for a, b and c:
#
a = te.placeholder((m, k), name="a", dtype="float64")
b = te.placeholder((k, n), name="b", dtype="float64")
k = te.reduce_axis((0, k), name="k")
c = te.compute((m, n), lambda i, j: te.sum(a[i, k] * b[k, j], axis=k), name="c")
a_buffer = tvm.tir.decl_buffer(
a.shape, a.dtype, name="a_buffer", offset_factor=1, strides=[te.var("s1"), 1]
)
b_buffer = tvm.tir.decl_buffer(
b.shape, b.dtype, name="b_buffer", offset_factor=1, strides=[te.var("s2"), 1]
)
c_buffer = tvm.tir.decl_buffer(
c.shape, c.dtype, name="c_buffer", offset_factor=1, strides=[te.var("s3"), 1]
)
with ib.for_range(0, 10, "i", kind="parallel"):
ib.emit(tvm.tir.call_packed("tvm.test_matmul", a_buffer, b_buffer, c_buffer))
stmt = ib.get()
# Construct a valid IRModule to be lowered:
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([a_buffer, b_buffer, c_buffer], stmt))
target = tvm.target.Target(target, host="llvm")
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", target))(mod)
mod = tvm.tir.transform.Apply(lambda f: f.with_attr("global_symbol", "main"))(mod)
mod = tvm.tir.transform.MakePackedAPI()(mod)
# Do the lowering:
mod = tvm.tir.transform.LowerTVMBuiltin()(mod)
# Get the PrimFunc from module:
prim_func = mod.functions.items()[0][1]
node = prim_func.body
# Recursively visit PrimFunc until we meet the for-loop:
while True:
if isinstance(
node, (tvm.tir.AssertStmt, tvm.tir.LetStmt, tvm.tir.AttrStmt, tvm.tir.DeclBuffer)
):
node = node.body
elif isinstance(node, tvm.tir.SeqStmt):
node = node[0]
else:
break
# For-loop:
assert isinstance(node, tvm.tir.stmt.For)
#
# let stack_tcode = tir.tvm_stack_alloca("arg_tcode", 4)
#
alloca_tcode = node.body
assert isinstance(alloca_tcode, tvm.tir.LetStmt)
expected_value = tvm.tir.call_intrin(
"handle", tvm.ir.Op.get("tir.tvm_stack_alloca"), "arg_tcode", 4
)
expected_var = alloca_tcode.var
expected_stmt = tvm.tir.LetStmt(expected_var, expected_value, alloca_tcode.body)
tvm.ir.assert_structural_equal(alloca_tcode, expected_stmt, map_free_vars=True)
#
# let stack_value = tir.tvm_stack_alloca("arg_value", 4)
#
alloca_value = alloca_tcode.body.body
assert isinstance(alloca_value, tvm.tir.LetStmt)
expected_value = tvm.tir.call_intrin(
"handle", tvm.ir.Op.get("tir.tvm_stack_alloca"), "arg_value", 4
)
expected_var = alloca_value.var
expected_stmt = tvm.tir.LetStmt(expected_var, expected_value, alloca_value.body)
tvm.ir.assert_structural_equal(alloca_value, expected_stmt, map_free_vars=True)
#
# let stack_array = tir.tvm_stack_alloca("array", 3)
#
alloca_array = alloca_value.body
assert isinstance(alloca_array, tvm.tir.LetStmt)
expected_value = tvm.tir.call_intrin(
"handle", tvm.ir.Op.get("tir.tvm_stack_alloca"), "array", 3
)
expected_var = alloca_array.var
expected_stmt = tvm.tir.LetStmt(expected_var, expected_value, alloca_array.body)
tvm.ir.assert_structural_equal(alloca_array, expected_stmt, map_free_vars=True)
#
# let stack_shape = tir.tvm_stack_alloca("shape", 12)
#
alloca_shape = alloca_array.body
assert isinstance(alloca_shape, tvm.tir.LetStmt)
expected_value = tvm.tir.call_intrin(
"handle", tvm.ir.Op.get("tir.tvm_stack_alloca"), "shape", 12
)
expected_var = alloca_shape.var
expected_stmt = tvm.tir.LetStmt(expected_var, expected_value, alloca_shape.body)
tvm.ir.assert_structural_equal(alloca_shape, expected_stmt, map_free_vars=True)
def test_lower_packed_func():
check_packed_func("llvm")
check_packed_func("stackvm")
@tvm.testing.requires_llvm
def test_call_packed_return_non_i32():
# This call packed that return non i32 types
expected_value = np.array([1.2, 1.4], dtype="float32")
def packed_echo(value):
return tvm.tir.call_intrin(
value.dtype, tvm.ir.Op.get("tir.tvm_call_packed"), "testing.echo", value
)
def build_tir():
Ab = tvm.tir.decl_buffer((2,), "float32")
ib = tvm.tir.ir_builder.create()
Aptr = ib.buffer_ptr(Ab)
# return f32
# Aptr[0] = testing.echo(expected_value[0])
Aptr[0] = packed_echo(tvm.tir.const(expected_value[0], "float32"))
# return handle
# let Aptr_var = testing.echo(Aptr) in Aptr_var[1] = expected_value[1]
Aptr_var = ib.let("Aptr_dup", packed_echo(Aptr.asobject().data))
ib.emit(tvm.tir.BufferStore(Aptr, tvm.tir.const(expected_value[1], "float32"), [1]))
stmt = ib.get()
return tvm.IRModule.from_expr(
tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "packed_test")
)
mod = build_tir()
f = tvm.build(mod, None, "llvm")
a = tvm.nd.array(np.zeros(2, dtype="float32"))
f(a)
tvm.testing.assert_allclose(a.numpy(), expected_value)
def test_lower_overflow_int32():
@T.prim_func
def variance4(rxplaceholder: T.Buffer((T.int64(1), T.int64(32), T.int64(25690112)), "float32")):
T.func_attr({"global_symbol": "variance4", "tir.noalias": True})
rxplaceholder_red = T.allocate([32], "float32", "global")
T_subtract = T.allocate([822083584], "float32", "global")
rxplaceholder_red_1 = T.Buffer((T.int64(32),), data=rxplaceholder_red)
rxplaceholder_1 = T.Buffer((T.int64(822083584),), data=rxplaceholder.data)
T_subtract_1 = T.Buffer((T.int64(822083584),), data=T_subtract)
for ax1, ax2 in T.grid(32, 25690112):
cse_var_1: T.int32 = ax1 * 25690112 + ax2
T_subtract_1[cse_var_1] = rxplaceholder_1[cse_var_1] - rxplaceholder_red_1[ax1]
func = variance4
tvm.build(func, target="llvm") # should not crash
class TestLowerDeviceAllocate(tvm.testing.CompareBeforeAfter):
"""Device allocations are lowered to TVMBackend* calls
This test validates the current behavior of LowerTVMBuiltin. This
unit test may be improved in the future by addressing:
- TVMScript always produces "handle" dtype for
`T.tvm_throw_last_error`, while LowerTVMBuiltin outputs "int32"
dtype.
"""
transform = tvm.tir.transform.LowerTVMBuiltin()
def before():
T.func_attr({"target": T.target("llvm")})
T.attr("dummy", "device_type", 2) # kDLCuda
T.attr("dummy", "device_id", 0)
ptr = T.allocate([16], "float32")
buf = T.decl_buffer(16, "float32", data=ptr)
buf[0] = 0.0
def expected():
T.func_attr({"target": T.target("llvm")})
ptr: T.handle("float32") = T.TVMBackendAllocWorkspace(2, 0, T.uint64(64), 2, 32)
T.attr(ptr, "storage_alignment", 64)
if T.isnullptr(ptr):
T.Call("int32", "tir.tvm_throw_last_error", [])
buf = T.decl_buffer((16,), data=ptr)
buf[0] = T.float32(0)
if T.TVMBackendFreeWorkspace(2, 0, ptr) != 0:
T.Call("int32", "tir.tvm_throw_last_error", [])
def test_compare(self, before, expected, transform):
after = transform(before)
tvm.ir.assert_structural_equal(after, expected, map_free_vars=True)
class TestLowerCPUAllocation(tvm.testing.CompareBeforeAfter):
"""CPU allocations can be handled at codegen time"""
transform = tvm.tir.transform.LowerTVMBuiltin()
def before():
T.func_attr({"target": T.target("llvm")})
T.attr("dummy", "device_type", 1) # kDLCPU
T.attr("dummy", "device_id", 0)
ptr = T.allocate([16], "float32")
buf = T.decl_buffer(16, "float32", data=ptr)
buf[0] = 0.0
def expected():
T.func_attr({"target": T.target("llvm")})
ptr = T.allocate([16], "float32")
buf = T.decl_buffer(16, "float32", data=ptr)
buf[0] = 0.0
class TestLowerAllocateRequiresDeviceID(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.LowerTVMBuiltin()
def before():
T.func_attr({"target": T.target("llvm")})
T.attr("dummy", "device_id", 0)
ptr = T.allocate([16], "float32")
buf = T.decl_buffer(16, "float32", data=ptr)
buf[0] = 0.0
expected = tvm.TVMError
class TestLowerAllocateRequiresDeviceType(tvm.testing.CompareBeforeAfter):
transform = tvm.tir.transform.LowerTVMBuiltin()
def before():
T.func_attr({"target": T.target("llvm")})
T.attr("dummy", "device_id", 0)
ptr = T.allocate([16], "float32")
buf = T.decl_buffer(16, "float32", data=ptr)
buf[0] = 0.0
expected = tvm.TVMError
if __name__ == "__main__":
tvm.testing.main()
| 9,898 | 33.371528 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_container.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import random
import tvm
import tvm.testing
import pickle
from tvm import te
from tvm import nd, relay
from tvm.runtime import container as _container
def test_adt_constructor():
arr = nd.array([1, 2, 3])
fields = [arr, arr]
y = _container.ADT(0, [arr, arr])
assert len(y) == 2
assert isinstance(y, _container.ADT)
y[0:1][-1] == arr
assert y.tag == 0
assert isinstance(arr, nd.NDArray)
def test_tuple_object():
x = relay.var(
"x",
type_annotation=relay.ty.TupleType(
[relay.ty.TensorType((), "int32"), relay.ty.TensorType((), "int32")]
),
)
fn = relay.Function([x], relay.expr.TupleGetItem(x, 0))
mod = tvm.IRModule.from_expr(fn)
f = relay.create_executor(kind="vm", mod=mod, device=nd.cpu(), target="llvm").evaluate()
value_tuple = _container.tuple_object([nd.array(np.array(11)), nd.array(np.array(12))])
# pass an ADT object to evaluate
out = f(value_tuple)
tvm.testing.assert_allclose(out.numpy(), np.array(11))
def test_string():
s = tvm.runtime.String("xyz")
assert isinstance(s, tvm.runtime.String)
assert isinstance(s, str)
assert s.startswith("xy")
assert s + "1" == "xyz1"
y = tvm.testing.echo(s)
assert isinstance(y, tvm.runtime.String)
assert s.__tvm_object__.same_as(y.__tvm_object__)
assert s == y
x = tvm.ir.load_json(tvm.ir.save_json(y))
assert isinstance(x, tvm.runtime.String)
assert x == y
# test pickle
z = pickle.loads(pickle.dumps(s))
assert isinstance(z, tvm.runtime.String)
assert s == z
def test_shape_tuple():
shape = [random.randint(-10, 10) for _ in range(5)]
stuple = _container.ShapeTuple(shape)
len(stuple) == len(shape)
for a, b in zip(stuple, shape):
assert a == b
# ShapleTuple vs. list
assert stuple == list(shape)
# ShapleTuple vs. tuple
assert stuple == tuple(shape)
# ShapleTuple vs. ShapeTuple
assert stuple == _container.ShapeTuple(shape)
# test pickle
z = pickle.loads(pickle.dumps(stuple))
assert isinstance(z, tvm.runtime.ShapeTuple)
assert stuple == z
if __name__ == "__main__":
test_string()
test_adt_constructor()
test_tuple_object()
test_shape_tuple()
| 3,069 | 28.519231 | 92 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_schedule_rule_auto_bind.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation import (
check_sketches,
generate_design_space,
)
from tvm.script import tir as T
from tvm.target import Target
@T.prim_func
def element_wise(var_A: T.handle, var_B: T.handle) -> None:
A = T.match_buffer(var_A, [512, 512], dtype="float32")
B = T.match_buffer(var_B, [512, 512], dtype="float32")
for i, j in T.grid(512, 512):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] + 1.0
@T.prim_func
def reduction_loop_only(
A: T.Buffer(2, "float32"),
B: T.Buffer(2, "float32"),
C: T.Buffer((), "float32"),
) -> None:
for i0 in T.serial(2):
with T.block("C"):
k0 = T.axis.reduce(2, i0)
T.reads(A[k0], B[k0])
T.writes(C[()])
with T.init():
C[()] = T.float32(1.0)
C[()] = T.min(C[()], A[k0] / B[k0])
@T.prim_func
def zero_dim_add(
A: T.Buffer((), "float32"),
B: T.Buffer((), "float32"),
C: T.Buffer((), "float32"),
) -> None:
with T.block("C"):
vi = T.axis.spatial(1, 0)
C[()] = A[()] + B[()]
def test_cuda_element_wise():
@T.prim_func
def elementwise_0(
A: T.Buffer((512, 512), "float32"),
B: T.Buffer((512, 512), "float32"),
) -> None:
# body
# with T.block("root")
for i_j_fused_0 in T.thread_binding(256, thread="blockIdx.x"):
for i_j_fused_1 in T.thread_binding(1024, thread="threadIdx.x"):
with T.block("C"):
vi = T.axis.spatial(512, (i_j_fused_0 * 1024 + i_j_fused_1) // 512)
vj = T.axis.spatial(512, (i_j_fused_0 * 1024 + i_j_fused_1) % 512)
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] + T.float32(1)
decision_0 = [
("SampleCategorical", 5),
]
mod = element_wise
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3080", host="llvm"),
types=ms.schedule_rule.AutoBind,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[elementwise_0],
expected_decisions=[decision_0],
)
def test_cuda_reduction_loop_only():
@T.prim_func
def reduction_loop_only_0(
A: T.Buffer(2, "float32"),
B: T.Buffer(2, "float32"),
C: T.Buffer((), "float32"),
) -> None:
for u_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for u_fused_1 in T.thread_binding(1, thread="threadIdx.x"):
for i0 in T.serial(2):
with T.block("C"):
k0 = T.axis.reduce(2, i0)
T.reads(A[k0], B[k0])
T.writes(C[()])
with T.init():
C[()] = T.float32(1)
C[()] = T.min(C[()], A[k0] / B[k0])
mod = reduction_loop_only
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3080", host="llvm"),
types=ms.schedule_rule.AutoBind,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[reduction_loop_only_0],
expected_decisions=[[]],
)
def test_cuda_zero_dim_add():
@T.prim_func
def zero_dim_add_0(
A: T.Buffer((), "float32"),
B: T.Buffer((), "float32"),
C: T.Buffer((), "float32"),
) -> None:
for u_fused_0 in T.thread_binding(1, thread="blockIdx.x"):
for u_fused_1 in T.thread_binding(1, thread="threadIdx.x"):
with T.block("C"):
vi = T.axis.spatial(1, 0)
T.reads(A[()], B[()])
T.writes(C[()])
C[()] = A[()] + B[()]
mod = zero_dim_add
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3080", host="llvm"),
types=ms.schedule_rule.AutoBind,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[zero_dim_add_0],
expected_decisions=[[]],
)
if __name__ == "__main__":
test_cuda_element_wise()
test_cuda_reduction_loop_only()
test_cuda_zero_dim_add()
| 5,263 | 30.710843 | 93 | py |
tvm | tvm-main/tests/python/unittest/test_te_create_primfunc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import numpy as np
import tvm
import tvm.testing
from tvm import te, tir, topi, relay
from tvm.script import tir as T
import pytest
def test_unique_name_complete_block():
A = te.placeholder((16, 16), name="A")
B = te.compute((16, 16), lambda x, y: A[x, y] * 2, name="main")
C = te.compute((16, 16), lambda x, y: B[x, y] + 1, name="main")
func = te.create_prim_func([A, C])
s = tir.Schedule(func, debug_mask="all")
assert isinstance(s.get_sref(s.get_block("main")), tir.schedule.StmtSRef)
assert isinstance(s.get_sref(s.get_block("main_1")), tir.schedule.StmtSRef)
def test_unique_name_reduction_block():
k1 = te.reduce_axis((0, 16), "k1")
k2 = te.reduce_axis((0, 16), "k2")
A = te.placeholder((16, 16), name="A")
B = te.compute((16,), lambda i: te.sum(A[i, k1], axis=k1), name="sum")
C = te.compute((), lambda: te.sum(B[k2], axis=k2), name="sum")
func = te.create_prim_func([A, C])
s = tir.Schedule(func, debug_mask="all")
assert isinstance(s.get_sref(s.get_block("sum")), tir.schedule.StmtSRef)
assert isinstance(s.get_sref(s.get_block("sum_1")), tir.schedule.StmtSRef)
def _check_workload(te_workload, tir_workload, index_dtype_override=None):
func = te.create_prim_func(te_workload(), index_dtype_override)
tvm.ir.assert_structural_equal(func, tir_workload)
# make sure that we can create schedule from the func
s = tir.Schedule(func, debug_mask="all")
assert s
def te_matmul():
k = te.reduce_axis((0, 128), "k")
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.compute((128, 128), lambda x, y: te.sum(A[x, k] * B[y, k], axis=k), name="C")
return [A, B, C]
@T.prim_func
def tir_matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i0, j0, k0 in T.grid(128, 128, 128):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
with T.init():
C[i, j] = 0.0
C[i, j] += A[i, k] * B[j, k]
@T.prim_func
def tir_matmul_int64(
A: T.Buffer((T.int64(128), T.int64(128)), "float32"),
B: T.Buffer((T.int64(128), T.int64(128)), "float32"),
C: T.Buffer((T.int64(128), T.int64(128)), "float32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, j0, k0 in T.grid(T.int64(128), T.int64(128), T.int64(128)):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
with T.init():
C[i, j] = 0.0
C[i, j] += A[i, k] * B[j, k]
def test_matmul():
_check_workload(te_matmul, tir_matmul)
def test_matmul_int64():
_check_workload(te_matmul, tir_matmul_int64, index_dtype_override="int64")
def te_element_wise():
A = te.placeholder((128, 128), name="A")
B = te.compute((128, 128), lambda x, y: A[x, y] * 2, name="B")
C = te.compute((128, 128), lambda x, y: B[x, y] + 1, name="C")
return [A, C]
@T.prim_func
def tir_element_wise(a: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
for i0, j0 in T.grid(128, 128):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
B[i, j] = A[i, j] * 2.0
for i0, j0 in T.grid(128, 128):
with T.block():
i, j = T.axis.remap("SS", [i0, j0])
C[i, j] = B[i, j] + 1.0
def test_element_wise():
_check_workload(te_element_wise, tir_element_wise)
def te_conv2d():
batch = 16
in_channel = 16
out_channel = 32
size = 14
kernel = 3
A = te.placeholder((batch, in_channel, size, size), name="A")
W = te.placeholder((in_channel, kernel, kernel, out_channel), name="W")
Apad = te.compute(
(batch, in_channel, size + 2, size + 2),
lambda nn, cc, yy, xx: tvm.tir.if_then_else(
tvm.tir.all(yy >= 1, yy - 1 < size, xx >= 1, xx - 1 < size),
A[nn, cc, yy - 1, xx - 1],
0.0,
),
name="Apad",
)
rc = te.reduce_axis((0, in_channel), name="rc")
ry = te.reduce_axis((0, kernel), name="ry")
rx = te.reduce_axis((0, kernel), name="rx")
B = te.compute(
(batch, out_channel, size, size),
lambda nn, ff, yy, xx: te.sum(
Apad[nn, rc, yy + ry, xx + rx] * W[rc, ry, rx, ff], axis=[rc, ry, rx]
),
name="B",
)
return [A, W, B]
@T.prim_func
def tir_conv2d(a: T.handle, w: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, [16, 16, 14, 14])
W = T.match_buffer(w, [16, 3, 3, 32])
B = T.match_buffer(b, [16, 32, 14, 14])
Apad = T.alloc_buffer([16, 16, 16, 16])
for n, c, y, x in T.grid(16, 16, 16, 16):
with T.block("Apad"):
nn, cc, yy, xx = T.axis.remap("SSSS", [n, c, y, x])
Apad[nn, cc, yy, xx] = T.if_then_else(
1 <= yy and yy < 15 and 1 <= xx and xx < 15,
A[nn, cc, yy - 1, xx - 1],
0.0,
dtype="float32",
)
for n, f, y, x, kc, ky, kx in T.grid(16, 32, 14, 14, 16, 3, 3):
with T.block("B"):
nn, ff, yy, xx, rc, ry, rx = T.axis.remap("SSSSRRR", [n, f, y, x, kc, ky, kx])
with T.init():
B[nn, ff, yy, xx] = 0.0
B[nn, ff, yy, xx] += Apad[nn, rc, yy + ry, xx + rx] * W[rc, ry, rx, ff]
def test_conv2d():
_check_workload(te_conv2d, tir_conv2d)
def te_multi_output():
n = te.var("n")
m = te.var("m")
A0 = te.placeholder((m, n), name="A0")
A1 = te.placeholder((m, n), name="A1")
B0, B1 = te.compute((m, n), lambda i, j: (A0[i, j] + 2, A1[i, j] * 3), name="B")
return [A0, A1, B0, B1]
@T.prim_func
def tir_multi_output(a0: T.handle, a1: T.handle, b0: T.handle, b1: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
m = T.int32()
n = T.int32()
A0 = T.match_buffer(a0, (m, n))
A1 = T.match_buffer(a1, (m, n))
B0 = T.match_buffer(b0, (m, n))
B1 = T.match_buffer(b1, (m, n))
for i0, i1 in T.grid(m, n):
with T.block("B.v0"):
i, j = T.axis.remap("SS", [i0, i1])
B0[i, j] = A0[i, j] + 2.0
with T.block("B.v1"):
i, j = T.axis.remap("SS", [i0, i1])
B1[i, j] = A1[i, j] * 3.0
def test_multi_output():
_check_workload(te_multi_output, tir_multi_output)
def te_extern():
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.extern(
(128, 128),
[A, B],
lambda ins, outs: tvm.tir.call_packed(
"tvm.contrib.cblas.matmul", ins[0], ins[1], outs[0], 0, 0
),
name="C",
)
return [A, B, C]
@T.prim_func
def tir_extern(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
off1 = te.var("elem_offset")
off2 = te.var("elem_offset_1")
off3 = te.var("elem_offset_2")
A = T.match_buffer(a, (128, 128), elem_offset=off1)
B = T.match_buffer(b, (128, 128), elem_offset=off2)
C = T.match_buffer(c, (128, 128), elem_offset=off3)
# body
with T.block("C"):
T.reads([A[0:128, 0:128], B[0:128, 0:128]])
T.writes([C[0:128, 0:128]])
T.evaluate(
T.tvm_call_packed(
"tvm.contrib.cblas.matmul",
T.tvm_stack_make_array(
A.data,
T.tvm_stack_make_shape(128, 128, dtype="handle"),
0,
2,
0.0,
off1,
dtype="handle",
),
T.tvm_stack_make_array(
B.data,
T.tvm_stack_make_shape(128, 128, dtype="handle"),
0,
2,
0.0,
off2,
dtype="handle",
),
T.tvm_stack_make_array(
C.data,
T.tvm_stack_make_shape(128, 128, dtype="handle"),
0,
2,
0.0,
off3,
dtype="handle",
),
0,
0,
dtype="int32",
)
)
def test_extern():
_check_workload(te_extern, tir_extern)
def te_reordered_matmul():
k = te.reduce_axis((0, 128), "k")
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.compute((128, 128), lambda x, y: te.sum(A[x, k] * B[y, k], axis=k), name="C")
return [C, A, B]
@T.prim_func
def tir_reordered_matmul(c: T.handle, a: T.handle, b: T.handle) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i0, j0, k0 in T.grid(128, 128, 128):
with T.block():
i, j, k = T.axis.remap("SSR", [i0, j0, k0])
with T.init():
C[i, j] = 0.0
C[i, j] += A[i, k] * B[j, k]
def test_arg_order():
_check_workload(te_reordered_matmul, tir_reordered_matmul)
def te_scan():
m = te.var("m")
n = te.var("n")
X = te.placeholder((m, n), name="X")
s_state = te.placeholder((m, n))
s_init = te.compute((1, n), lambda _, i: X[0, i])
s_update = te.compute((m, n), lambda t, i: s_state[t - 1, i] + X[t, i])
s_scan = tvm.te.scan(s_init, s_update, s_state, inputs=[X])
return [X, s_scan]
def test_error_reporting():
try:
te.create_prim_func(te_scan())
assert False
except TypeError as e:
error_message = str(e)
assert error_message.find("Unsupported Operation: ScanOp.") != -1
return
assert False
def test_constant():
M = 11
A = te.placeholder((M,), name="A")
B = te.compute(tuple(), lambda: 2, name="B")
# Manually craft ProducerLoad because `B[]` is not allowed.
C = te.compute(
(M,), lambda x: A[x] + tvm.tir.expr.ProducerLoad(B, []), name="C", tag="broadcast"
)
func = te.create_prim_func([C, A])
func = tvm.build(func)
a_np = np.random.uniform(size=(M,)).astype(A.dtype)
c = tvm.nd.array(np.zeros(M, dtype=C.dtype))
x = func(c, tvm.nd.array(a_np))
tvm.testing.assert_allclose(a_np + 2, c.numpy())
def test_data_dependent_access():
A = te.placeholder((10,), name="A")
B = te.placeholder((10,), name="B", dtype="int32")
C = te.compute((10,), lambda i: A[B[i]])
func = te.create_prim_func([C, A, B])
func = tvm.build(func)
a_np = np.random.uniform(size=(10,)).astype(A.dtype)
b_np = np.arange(10, dtype=B.dtype)
c = tvm.nd.array(np.zeros(10, dtype=C.dtype))
func(c, tvm.nd.array(a_np), tvm.nd.array(b_np))
tvm.testing.assert_allclose(a_np[b_np], c.numpy())
def test_select_simplify():
placeholder = te.placeholder([1, 128, 10, 10, 4], dtype="float32")
tensor = topi.nn.adaptive_pool(placeholder, [1, 1], "avg", "NCHW4c")
result = te.create_prim_func([placeholder, tensor])
script_func = result.script()
# There should be no Select
assert script_func.find("Select") == -1
# There should be no undefined vars
assert script_func.find("Var") == -1
def test_tensor_attr():
k = te.reduce_axis((0, 128), "k")
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.compute(
(128, 128),
lambda x, y: te.sum(A[x, k] * B[y, k], axis=k),
name="C",
attrs={"layout_free_placeholders": [B]},
)
func = te.create_prim_func([A, B, C])
rt_func = tvm.script.from_source(func.script())
tvm.ir.assert_structural_equal(func, rt_func)
@T.prim_func
def expected_layout_attr(
A: T.Buffer((128, 128), "float32"),
B: T.Buffer((128, 128), "float32"),
D: T.Buffer((128, 128), "float32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
C = T.alloc_buffer([128, 128], dtype="float32")
for i0, i1, i2 in T.grid(128, 128, 128):
with T.block("C"):
x, y, k = T.axis.remap("SSR", [i0, i1, i2])
with T.init():
C[x, y] = T.float32(0)
C[x, y] = C[x, y] + A[x, k] * B[y, k]
for i0, i1 in T.grid(128, 128):
with T.block("D"):
T.block_attr({"layout_free_placeholders": [C]})
x, y = T.axis.remap("SS", [i0, i1])
D[x, y] = C[x, y] + T.float32(1)
@T.prim_func
def expected_layout_attr_int64(
A: T.Buffer((T.int64(128), T.int64(128)), "float32"),
B: T.Buffer((T.int64(128), T.int64(128)), "float32"),
D: T.Buffer((T.int64(128), T.int64(128)), "float32"),
):
T.func_attr({"global_symbol": "main", "tir.noalias": True, "layout_free_buffers": [1]})
C = T.alloc_buffer([T.int64(128), T.int64(128)], dtype="float32")
for x, y, k in T.grid(T.int64(128), T.int64(128), T.int64(128)):
with T.block("C"):
v_x, v_y, v_k = T.axis.remap("SSR", [x, y, k])
T.reads(A[v_x, v_k], B[v_y, v_k])
T.writes(C[v_x, v_y])
with T.init():
C[v_x, v_y] = T.float32(0)
C[v_x, v_y] = C[v_x, v_y] + A[v_x, v_k] * B[v_y, v_k]
for x, y in T.grid(T.int64(128), T.int64(128)):
with T.block("D"):
T.block_attr({"layout_free_placeholders": [C]})
v_x, v_y = T.axis.remap("SS", [x, y])
T.reads(C[v_x, v_y])
T.writes(D[v_x, v_y])
D[v_x, v_y] = C[v_x, v_y] + T.float32(1)
@pytest.mark.parametrize(
"index_dtype_override, expected",
[(None, expected_layout_attr), ("int64", expected_layout_attr_int64)],
)
def test_tensor_layout_attr(index_dtype_override, expected):
k = te.reduce_axis((0, 128), "k")
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
C = te.compute(
(128, 128),
lambda x, y: te.sum(A[x, k] * B[y, k], axis=k),
name="C",
attrs={"layout_free_placeholders": [B]},
)
D = te.compute(
(128, 128),
lambda x, y: C[x, y] + 1,
name="D",
attrs={"layout_free_placeholders": [C]},
)
func = te.create_prim_func([A, B, D], index_dtype_override=index_dtype_override)
tvm.ir.assert_structural_equal(func, expected)
def te_argmax_idx_val():
def f_combine(x, y):
lhs = tvm.tir.Select((x[1] >= y[1]), x[0], y[0])
rhs = tvm.tir.Select((x[1] >= y[1]), x[1], y[1])
return lhs, rhs
def f_identity(dtype0: tvm.DataType, dtype1: tvm.DataType):
return tvm.tir.const(-1, dtype0), tvm.te.min_value(dtype1)
argmax = te.comm_reducer(f_combine, f_identity, name="argmax")
m = te.var("m")
n = te.var("n")
idx = te.placeholder((m, n), name="idx", dtype="int32")
val = te.placeholder((m, n), name="val", dtype="float32")
k = te.reduce_axis((0, n), "k")
max_idx, max_val = te.compute(
(m,), lambda i: argmax((idx[i, k], val[i, k]), axis=k), name="argmax"
)
return [idx, val, max_idx, max_val]
@T.prim_func
def tir_argmax_idx_val(
var_idx: T.handle, var_val: T.handle, var_argmax_v0: T.handle, var_argmax_v1: T.handle
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
m = T.int32()
n = T.int32()
idx = T.match_buffer(var_idx, [m, n], dtype="int32")
val = T.match_buffer(var_val, [m, n], dtype="float32")
argmax_v0 = T.match_buffer(var_argmax_v0, [m], dtype="int32")
argmax_v1 = T.match_buffer(var_argmax_v1, [m], dtype="float32")
for i0, i1 in T.grid(m, n):
with T.block("argmax"):
i, k = T.axis.remap("SR", [i0, i1])
T.reads(val[i, k], idx[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = T.int32(-1)
argmax_v1[i] = T.min_value("float32")
v_argmax_v0: T.int32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v0[i], idx[i, k])
v_argmax_v1: T.float32 = T.Select(argmax_v1[i] >= val[i, k], argmax_v1[i], val[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
def te_argmax_val_idx():
def f_combine(x, y):
lhs = tvm.tir.Select((x[0] >= y[0]), x[0], y[0])
rhs = tvm.tir.Select((x[0] >= y[0]), x[1], y[1])
return lhs, rhs
def f_identity(dtype0: tvm.DataType, dtype1: tvm.DataType):
return tvm.te.min_value(dtype0), tvm.tir.const(-1, dtype1)
argmax = te.comm_reducer(f_combine, f_identity, name="argmax")
m = te.var("m")
n = te.var("n")
val = te.placeholder((m, n), name="val", dtype="float32")
idx = te.placeholder((m, n), name="idx", dtype="int32")
k = te.reduce_axis((0, n), "k")
max_val, max_idx = te.compute(
(m,), lambda i: argmax((val[i, k], idx[i, k]), axis=k), name="argmax"
)
return [val, idx, max_val, max_idx]
@T.prim_func
def tir_argmax_val_idx(
var_val: T.handle, var_idx: T.handle, var_argmax_v0: T.handle, var_argmax_v1: T.handle
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
m = T.int32()
n = T.int32()
val = T.match_buffer(var_val, [m, n], dtype="float32")
idx = T.match_buffer(var_idx, [m, n], dtype="int32")
argmax_v0 = T.match_buffer(var_argmax_v0, [m], dtype="float32")
argmax_v1 = T.match_buffer(var_argmax_v1, [m], dtype="int32")
for i0, i1 in T.grid(m, n):
with T.block("argmax"):
i, k = T.axis.remap("SR", [i0, i1])
T.reads(val[i, k], idx[i, k])
T.writes(argmax_v0[i], argmax_v1[i])
with T.init():
argmax_v0[i] = T.min_value("float32")
argmax_v1[i] = T.int32(-1)
v_argmax_v0: T.float32 = T.Select(argmax_v0[i] >= val[i, k], argmax_v0[i], val[i, k])
v_argmax_v1: T.int32 = T.Select(argmax_v0[i] >= val[i, k], argmax_v1[i], idx[i, k])
argmax_v0[i] = v_argmax_v0
argmax_v1[i] = v_argmax_v1
def test_argmax_idx_val():
_check_workload(te_argmax_idx_val, tir_argmax_idx_val)
def test_argmax_val_idx():
_check_workload(te_argmax_val_idx, tir_argmax_val_idx)
def test_int64_indices():
n = te.var("n", "int64")
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1, name="B")
prim_func = te.create_prim_func([A, B])
loop = prim_func.body.block.body
assert loop.loop_var.dtype == "int64"
assert loop.min.dtype == "int64"
assert loop.extent.dtype == "int64"
def test_zero_dim_add():
def te_func():
a = te.placeholder((), name="a", dtype="int32")
b = te.placeholder((), name="b", dtype="int32")
c = te.compute(a.shape, lambda *i: a(*i) + b(*i), name="c")
return [a, b, c]
@T.prim_func
def expected(
a: T.Buffer((), "int32"),
b: T.Buffer((), "int32"),
c: T.Buffer((), "int32"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
with T.block("root"):
T.reads()
T.writes()
with T.block("c"):
vi = T.axis.spatial(1, 0)
T.reads(a[()], b[()])
T.writes(c[()])
c[()] = a[()] + b[()]
_check_workload(te_func, expected)
def te_reshape():
# The following is possible to be generated by TOPI. So we test this case.
A = te.placeholder((tvm.tir.IntImm("int64", 2), tvm.tir.IntImm("int64", 4)), name="A")
B = topi.reshape(A, (4, 2))
return [A, B]
@T.prim_func
def tir_reshape(
A: T.Buffer((T.int64(2), T.int64(4)), "float32"),
T_reshape: T.Buffer((T.int64(4), T.int64(2)), "float32"),
):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for i0, i1 in T.grid(T.int64(4), T.int64(2)):
with T.block("T_reshape"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(
A[
(ax0 * T.int64(2) + ax1) % T.int64(8) // T.int64(4),
(ax0 * T.int64(2) + ax1) % T.int64(4),
]
)
T.writes(T_reshape[ax0, ax1])
T_reshape[ax0, ax1] = A[
(ax0 * T.int64(2) + ax1) % T.int64(8) // T.int64(4),
(ax0 * T.int64(2) + ax1) % T.int64(4),
]
def test_reshape():
_check_workload(te_reshape, tir_reshape, index_dtype_override="int64")
@T.prim_func
def argmax_expected(
p0: T.Buffer((T.int64(1), T.int64(64), T.int64(56), T.int64(56)), "uint8"),
p0_red: T.Buffer((T.int64(1), T.int64(56), T.int64(56)), "int32"),
):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
p0_red_temp_v0 = T.alloc_buffer([T.int64(1), T.int64(56), T.int64(56)], dtype="int32")
p0_red_temp_v1 = T.alloc_buffer([T.int64(1), T.int64(56), T.int64(56)], dtype="uint8")
for ax0, ax1, ax2, k1 in T.grid(T.int64(1), T.int64(56), T.int64(56), T.int64(64)):
with T.block("p0_red_temp"):
v_ax0, v_ax1, v_ax2, v_k1 = T.axis.remap("SSSR", [ax0, ax1, ax2, k1])
T.reads(p0[v_ax0, v_k1, v_ax1, v_ax2])
T.writes(p0_red_temp_v0[v_ax0, v_ax1, v_ax2], p0_red_temp_v1[v_ax0, v_ax1, v_ax2])
with T.init():
p0_red_temp_v0[v_ax0, v_ax1, v_ax2] = -1
p0_red_temp_v1[v_ax0, v_ax1, v_ax2] = T.uint8(0)
v_p0_red_temp_v0: T.int64 = T.Select(
p0_red_temp_v1[v_ax0, v_ax1, v_ax2] > p0[v_ax0, v_k1, v_ax1, v_ax2]
or (
p0_red_temp_v1[v_ax0, v_ax1, v_ax2] == p0[v_ax0, v_k1, v_ax1, v_ax2]
and T.Cast("int64", p0_red_temp_v0[v_ax0, v_ax1, v_ax2]) < v_k1
),
T.Cast("int64", p0_red_temp_v0[v_ax0, v_ax1, v_ax2]),
v_k1,
)
v_p0_red_temp_v1: T.uint8 = T.Select(
p0_red_temp_v1[v_ax0, v_ax1, v_ax2] > p0[v_ax0, v_k1, v_ax1, v_ax2],
p0_red_temp_v1[v_ax0, v_ax1, v_ax2],
p0[v_ax0, v_k1, v_ax1, v_ax2],
)
p0_red_temp_v0[v_ax0, v_ax1, v_ax2] = T.Cast("int32", v_p0_red_temp_v0)
p0_red_temp_v1[v_ax0, v_ax1, v_ax2] = v_p0_red_temp_v1
for ax0, ax1, ax2 in T.grid(T.int64(1), T.int64(56), T.int64(56)):
with T.block("p0_red"):
v_ax0, v_ax1, v_ax2 = T.axis.remap("SSS", [ax0, ax1, ax2])
T.reads(p0_red_temp_v0[v_ax0, v_ax1, v_ax2])
T.writes(p0_red[v_ax0, v_ax1, v_ax2])
p0_red[v_ax0, v_ax1, v_ax2] = p0_red_temp_v0[v_ax0, v_ax1, v_ax2]
def test_argmax():
data = relay.var("data", shape=(1, 64, 56, 56), dtype="uint8")
mod = tvm.IRModule.from_expr(relay.argmax(data, axis=1))
target = tvm.target.Target("llvm")
opt_mod, _ = relay.optimize(mod, params={}, target=target)
prim_func = relay.backend.te_compiler.lower_to_primfunc(opt_mod["main"].body.op, target)
tvm.ir.assert_structural_equal(prim_func, argmax_expected)
def te_resize2d_symbolic():
oh = tir.Var("oh", "int64")
ow = tir.Var("ow", "int64")
roi = (0.0, 0.0, 0.0, 0.0)
A = te.placeholder((2, 3, 128, 128), "float32", name="A")
B = topi.image.resize2d(
A,
roi,
size=(oh, ow),
method="nearest_neighbor",
coordinate_transformation_mode="asymmetric",
rounding_method="round",
)
return [A, B]
@T.prim_func
def tir_resize2d_symbolic(
A: T.Buffer((T.int64(2), T.int64(3), T.int64(128), T.int64(128)), "float32"),
var_resize: T.handle,
):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
oh = T.int64()
ow = T.int64()
resize = T.match_buffer(var_resize, [T.int64(2), T.int64(3), oh, ow], dtype="float32")
for i0, i1, i2, i3 in T.grid(T.int64(2), T.int64(3), oh, ow):
with T.block("resize"):
v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(A[v_i0, v_i1, T.int64(0) : T.int64(128), T.int64(0) : T.int64(128)])
T.writes(resize[v_i0, v_i1, v_i2, v_i3])
resize[v_i0, v_i1, v_i2, v_i3] = A[
v_i0,
v_i1,
T.max(
T.min(
T.Cast(
"int64",
T.round(
T.float32(128) / T.Cast("float32", oh) * T.Cast("float32", v_i2),
dtype="float32",
),
),
T.int64(127),
),
T.int64(0),
),
T.max(
T.min(
T.Cast(
"int64",
T.round(
T.float32(128) / T.Cast("float32", ow) * T.Cast("float32", v_i3),
dtype="float32",
),
),
T.int64(127),
),
T.int64(0),
),
]
def test_resize2d_symbolic():
_check_workload(te_resize2d_symbolic, tir_resize2d_symbolic, index_dtype_override="int64")
def test_extern_with_explicit_buffer_access():
def te_extern():
A = te.placeholder((128, 128), name="A")
B = te.placeholder((128, 128), name="B")
P = te.placeholder((1,), name="P")
C = te.extern(
(128, 128),
[A, B, P],
lambda ins, outs: tvm.tir.call_extern(
"", "myfunc", ins[0].data, ins[1].data, outs[0].data, ins[2][0]
),
name="C",
)
return [A, B, P, C]
@T.prim_func
def tir_extern(var_A: T.handle, var_B: T.handle, var_P: T.handle, var_C: T.handle):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(var_A, [128, 128], dtype="float32", offset_factor=1)
B = T.match_buffer(var_B, [128, 128], dtype="float32", offset_factor=1)
P = T.match_buffer(var_P, [1], dtype="float32", offset_factor=1)
C = T.match_buffer(var_C, [128, 128], dtype="float32", offset_factor=1)
with T.block("C"):
T.reads(A[0:128, 0:128], B[0:128, 0:128], P[0])
T.writes(C[0:128, 0:128])
T.call_extern("myfunc", A.data, B.data, C.data, P[0], dtype="")
_check_workload(te_extern, tir_extern)
if __name__ == "__main__":
tvm.testing.main()
| 27,754 | 34.132911 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_te_verify_compute.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_verify_compute():
n = te.size_var("n")
m = te.size_var("m")
A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "k")
k_ = te.reduce_axis((0, m - 1), "k_")
f1 = lambda i: te.sum(A[i, k], axis=k)
f2 = lambda i: A[i, 0] + 1
f3 = lambda i: te.sum(A[i, k], axis=k) + 1
f4 = lambda i: A[i, 0] * (te.sum(A[i, k], axis=k) + 1)
f5 = lambda i: (te.sum(A[i, k], axis=k), A[i, 0] + 1)
f6 = lambda i: (te.sum(A[i, k], axis=k), te.sum(A[i, k_], axis=k_))
#
# Valid compute
try:
B = te.compute((n,), f1, name="B")
except tvm._ffi.base.TVMError as ex:
assert False
#
# Valid compute
try:
B = te.compute((n,), f2, name="B")
except tvm._ffi.base.TVMError as ex:
assert False
#
# Invalid compute with non top level reduction
try:
B = te.compute((n,), f3, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
#
# Invalid compute with non top level reduction
try:
B = te.compute((n,), f4, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
#
# Invalid compute with reduction and non-reduction batch ops
try:
B0, B1 = te.compute((n,), f5, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
#
# Invalid compute with unequal batch reduction ops
try:
B0, B1 = te.compute((n,), f6, name="B")
assert False
except tvm._ffi.base.TVMError as ex:
pass
if __name__ == "__main__":
test_verify_compute()
| 2,428 | 28.26506 | 71 | py |
tvm | tvm-main/tests/python/unittest/test_tir_constructor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
def test_expr_constructor():
x = tvm.tir.Var("xx", "float32")
assert isinstance(x, tvm.tir.Var)
assert x.name == "xx"
x = tvm.tir.Reduce(None, [1], [tvm.tir.IterVar((0, 1), "x", 2)], None, 0)
assert isinstance(x, tvm.tir.Reduce)
assert x.combiner == None
assert x.value_index == 0
x = tvm.tir.FloatImm("float32", 1.0)
assert isinstance(x, tvm.tir.FloatImm)
assert x.value == 1.0
assert x.dtype == "float32"
x = tvm.tir.IntImm("int64", 2)
assert isinstance(x, tvm.tir.IntImm)
assert x.value == 2
assert x.dtype == "int64"
x = tvm.tir.StringImm("xyza")
assert isinstance(x, tvm.tir.StringImm)
assert x.value == "xyza"
x = tvm.tir.Cast("float32", tvm.tir.IntImm("uint32", 1))
assert isinstance(x, tvm.tir.Cast)
assert x.dtype == "float32"
assert x.value.value == 1
a = tvm.tir.const(1.0, dtype="float32")
b = te.var("x", dtype="float32")
for cls in [
tvm.tir.Add,
tvm.tir.Sub,
tvm.tir.Mul,
tvm.tir.Div,
tvm.tir.Mod,
tvm.tir.Min,
tvm.tir.Max,
tvm.tir.LT,
tvm.tir.LE,
tvm.tir.GT,
tvm.tir.GE,
]:
x = cls(a, b)
assert isinstance(x, cls)
assert x.a == a
assert x.b.same_as(b)
a = tvm.runtime.convert(te.var("x") > 1)
b = tvm.runtime.convert(te.var("x") == 1)
for cls in [tvm.tir.And, tvm.tir.Or]:
x = cls(a, b)
assert isinstance(x, cls)
assert x.a == a
assert x.b.same_as(b)
x = tvm.tir.Not(a)
assert isinstance(x, tvm.tir.Not)
assert x.a == a
x = tvm.tir.Select(a, a, b)
assert isinstance(x, tvm.tir.Select)
assert x.true_value == a
assert x.false_value == b
assert x.condition == a
buffer_var = tvm.tir.Var("buf", tvm.ir.PointerType(tvm.ir.PrimType("float32")))
buffer = tvm.tir.decl_buffer([16], "float32", data=buffer_var)
x = tvm.tir.BufferLoad(buffer, [1])
assert isinstance(x, tvm.tir.BufferLoad)
assert x.dtype == "float32"
assert x.buffer == buffer
assert x.buffer.data == buffer_var
assert list(x.indices) == [1]
x = tvm.tir.Ramp(1, 2, 10)
assert isinstance(x, tvm.tir.Ramp)
assert x.base.value == 1
assert x.stride.value == 2
assert x.lanes == 10
x = tvm.tir.Broadcast(a, 10)
assert isinstance(x, tvm.tir.Broadcast)
assert x.value == a
assert x.lanes == 10
x = tvm.tir.Shuffle([a], [0])
assert isinstance(x, tvm.tir.Shuffle)
assert x.vectors[0] == a
assert x.indices[0].value == 0
x = tvm.tir.Call("float32", "tir.call_extern", [tvm.tir.StringImm("xyz"), a])
assert isinstance(x, tvm.tir.Call)
assert x.dtype == "float32"
assert x.op.name == "tir.call_extern"
assert x.args[1] == a
v = te.var("aa")
x = tvm.tir.Let(v, 1, v)
assert x.var == v
assert x.value.value == 1
assert x.body == v
def test_stmt_constructor():
v = te.var("aa")
nop = tvm.tir.Evaluate(1)
x = tvm.tir.LetStmt(v, 1, tvm.tir.Evaluate(1))
assert isinstance(x, tvm.tir.LetStmt)
assert x.var == v
assert x.value.value == 1
assert isinstance(x.body, tvm.tir.Evaluate)
x = tvm.tir.AttrStmt(v == 1, "xx", 1, tvm.tir.Evaluate(1))
assert isinstance(x, tvm.tir.AttrStmt)
assert x.value.value == 1
x = tvm.tir.AssertStmt(tvm.tir.const(1, "uint1"), tvm.runtime.convert("hellow"), nop)
assert isinstance(x, tvm.tir.AssertStmt)
assert x.body == nop
x = tvm.tir.For(te.var("x"), 0, 10, tvm.tir.ForKind.SERIAL, nop)
assert isinstance(x, tvm.tir.For)
assert x.min.value == 0
assert x.extent.value == 10
assert x.body == nop
buffer_var = tvm.tir.Var("buf", tvm.ir.PointerType(tvm.ir.PrimType("uint1")))
buffer = tvm.tir.decl_buffer([16], "uint1", data=buffer_var)
x = tvm.tir.BufferStore(buffer, tvm.tir.IntImm("bool", 1), [10])
assert isinstance(x, tvm.tir.BufferStore)
assert x.buffer == buffer
assert x.buffer.data == buffer_var
assert list(x.indices) == [10]
assert x.value.value == 1
buffer_var = tvm.tir.Var("buf", tvm.ir.PointerType(tvm.ir.PrimType("float32")))
x = tvm.tir.Allocate(buffer_var, "float32", [10], tvm.tir.const(1, "uint1"), nop)
assert isinstance(x, tvm.tir.Allocate)
assert x.dtype == "float32"
assert x.buffer_var == buffer_var
assert x.body == nop
storage_scope = "global.texture"
buffer_var = tvm.tir.Var("buf", tvm.ir.PointerType(tvm.ir.PrimType("float32"), storage_scope))
x = tvm.tir.Allocate(buffer_var, "float32", [10], tvm.tir.const(1, "uint1"), nop)
assert isinstance(x, tvm.tir.Allocate)
assert x.dtype == "float32"
assert x.buffer_var == buffer_var
assert x.buffer_var.type_annotation.storage_scope == storage_scope
assert x.body == nop
x = tvm.tir.AttrStmt(buffer_var, "xyz", 1, nop)
assert isinstance(x, tvm.tir.AttrStmt)
assert x.node == buffer_var
assert x.attr_key == "xyz"
assert x.body == nop
x = tvm.tir.IfThenElse(tvm.tir.const(1, "uint1"), tvm.tir.Evaluate(11), nop)
assert isinstance(x, tvm.tir.IfThenElse)
assert x.then_case.value.value == 11
assert x.else_case == nop
b = tvm.tir.decl_buffer((1, 2))
x = tvm.tir.Prefetch(b, [])
assert isinstance(x, tvm.tir.Prefetch)
def test_float_constructor_requires_float_dtype():
with pytest.raises(tvm.TVMError):
tvm.tir.FloatImm("int32", 1.0)
if __name__ == "__main__":
tvm.testing.main()
| 6,343 | 30.562189 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_schedule_rule_parallel_vectorize_unroll.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import tvm
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.space_generation import (
check_sketches,
generate_design_space,
)
from tvm.script import tir as T
from tvm.target import Target
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class ParallelizeVectorizeUnroll:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
with T.block("root"):
T.reads([])
T.writes([])
T.block_attr({"meta_schedule.parallel": 128, "meta_schedule.vectorize": 16, "meta_schedule.unroll_explicit": 2})
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
# from tvm.script import tir as T
@tvm.script.ir_module
class PureSpatial:
@T.prim_func
def main(placeholder: T.Buffer((1, 13, 13, 3, 85), "float32"), placeholder_1: T.Buffer((1, 26, 26, 3, 85), "float32"), placeholder_2: T.Buffer((1, 52, 52, 3, 85), "float32"), T_expand_dims: T.Buffer((1, 80, 10647), "float32")) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": True})
T_strided_slice_with_axes = T.alloc_buffer([1, 52, 52, 3, 1], dtype="float32")
T_sigmoid = T.alloc_buffer([1, 52, 52, 3, 1], dtype="float32")
T_strided_slice_with_axes_1 = T.alloc_buffer([1, 52, 52, 3, 80], dtype="float32")
T_sigmoid_1 = T.alloc_buffer([1, 52, 52, 3, 80], dtype="float32")
T_multiply = T.alloc_buffer([1, 52, 52, 3, 80], dtype="float32")
T_reshape = T.alloc_buffer([8112, 80], dtype="float32")
T_strided_slice_with_axes_2 = T.alloc_buffer([1, 26, 26, 3, 1], dtype="float32")
T_sigmoid_2 = T.alloc_buffer([1, 26, 26, 3, 1], dtype="float32")
T_strided_slice_with_axes_3 = T.alloc_buffer([1, 26, 26, 3, 80], dtype="float32")
T_sigmoid_3 = T.alloc_buffer([1, 26, 26, 3, 80], dtype="float32")
T_multiply_1 = T.alloc_buffer([1, 26, 26, 3, 80], dtype="float32")
T_reshape_1 = T.alloc_buffer([2028, 80], dtype="float32")
T_strided_slice_with_axes_4 = T.alloc_buffer([1, 13, 13, 3, 1], dtype="float32")
T_sigmoid_4 = T.alloc_buffer([1, 13, 13, 3, 1], dtype="float32")
T_strided_slice_with_axes_5 = T.alloc_buffer([1, 13, 13, 3, 80], dtype="float32")
T_sigmoid_5 = T.alloc_buffer([1, 13, 13, 3, 80], dtype="float32")
T_multiply_2 = T.alloc_buffer([1, 13, 13, 3, 80], dtype="float32")
T_reshape_2 = T.alloc_buffer([507, 80], dtype="float32")
T_concat = T.alloc_buffer([10647, 80], dtype="float32")
T_transpose = T.alloc_buffer([80, 10647], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 52, 52, 3, 1):
with T.block("T_strided_slice_with_axes"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder_2[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(4)])
T.writes(T_strided_slice_with_axes[ax0, ax1, ax2, ax3, ax4])
T_strided_slice_with_axes[ax0, ax1, ax2, ax3, ax4] = placeholder_2[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(4)]
for i0, i1, i2, i3, i4 in T.grid(1, 52, 52, 3, 1):
with T.block("T_sigmoid"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_strided_slice_with_axes[ax0, ax1, ax2, ax3, ax4])
T.writes(T_sigmoid[ax0, ax1, ax2, ax3, ax4])
T_sigmoid[ax0, ax1, ax2, ax3, ax4] = T.sigmoid(T_strided_slice_with_axes[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 52, 52, 3, 80):
with T.block("T_strided_slice_with_axes_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder_2[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(5)])
T.writes(T_strided_slice_with_axes_1[ax0, ax1, ax2, ax3, ax4])
T_strided_slice_with_axes_1[ax0, ax1, ax2, ax3, ax4] = placeholder_2[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(5)]
for i0, i1, i2, i3, i4 in T.grid(1, 52, 52, 3, 80):
with T.block("T_sigmoid_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_strided_slice_with_axes_1[ax0, ax1, ax2, ax3, ax4])
T.writes(T_sigmoid_1[ax0, ax1, ax2, ax3, ax4])
T_sigmoid_1[ax0, ax1, ax2, ax3, ax4] = T.sigmoid(T_strided_slice_with_axes_1[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 52, 52, 3, 80):
with T.block("T_multiply"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_sigmoid[ax0, ax1, ax2, ax3, 0], T_sigmoid_1[ax0, ax1, ax2, ax3, ax4])
T.writes(T_multiply[ax0, ax1, ax2, ax3, ax4])
T_multiply[ax0, ax1, ax2, ax3, ax4] = T_sigmoid[ax0, ax1, ax2, ax3, 0] * T_sigmoid_1[ax0, ax1, ax2, ax3, ax4]
for i0, i1 in T.grid(8112, 80):
with T.block("T_reshape"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(T_multiply[0, (ax1 // 80 + ax0) % 8112 // 156, (ax1 // 80 + ax0) % 156 // 3, (ax1 // 80 + ax0) % 3, ax1 % 80])
T.writes(T_reshape[ax0, ax1])
T_reshape[ax0, ax1] = T_multiply[0, (ax1 // 80 + ax0) % 8112 // 156, (ax1 // 80 + ax0) % 156 // 3, (ax1 // 80 + ax0) % 3, ax1 % 80]
for i0, i1, i2, i3, i4 in T.grid(1, 26, 26, 3, 1):
with T.block("T_strided_slice_with_axes_2"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder_1[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(4)])
T.writes(T_strided_slice_with_axes_2[ax0, ax1, ax2, ax3, ax4])
T_strided_slice_with_axes_2[ax0, ax1, ax2, ax3, ax4] = placeholder_1[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(4)]
for i0, i1, i2, i3, i4 in T.grid(1, 26, 26, 3, 1):
with T.block("T_sigmoid_2"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_strided_slice_with_axes_2[ax0, ax1, ax2, ax3, ax4])
T.writes(T_sigmoid_2[ax0, ax1, ax2, ax3, ax4])
T_sigmoid_2[ax0, ax1, ax2, ax3, ax4] = T.sigmoid(T_strided_slice_with_axes_2[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 26, 26, 3, 80):
with T.block("T_strided_slice_with_axes_3"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder_1[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(5)])
T.writes(T_strided_slice_with_axes_3[ax0, ax1, ax2, ax3, ax4])
T_strided_slice_with_axes_3[ax0, ax1, ax2, ax3, ax4] = placeholder_1[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(5)]
for i0, i1, i2, i3, i4 in T.grid(1, 26, 26, 3, 80):
with T.block("T_sigmoid_3"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_strided_slice_with_axes_3[ax0, ax1, ax2, ax3, ax4])
T.writes(T_sigmoid_3[ax0, ax1, ax2, ax3, ax4])
T_sigmoid_3[ax0, ax1, ax2, ax3, ax4] = T.sigmoid(T_strided_slice_with_axes_3[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 26, 26, 3, 80):
with T.block("T_multiply_1"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_sigmoid_2[ax0, ax1, ax2, ax3, 0], T_sigmoid_3[ax0, ax1, ax2, ax3, ax4])
T.writes(T_multiply_1[ax0, ax1, ax2, ax3, ax4])
T_multiply_1[ax0, ax1, ax2, ax3, ax4] = T_sigmoid_2[ax0, ax1, ax2, ax3, 0] * T_sigmoid_3[ax0, ax1, ax2, ax3, ax4]
for i0, i1 in T.grid(2028, 80):
with T.block("T_reshape_1"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(T_multiply_1[0, (ax1 // 80 + ax0) % 2028 // 78, (ax1 // 80 + ax0) % 78 // 3, (ax1 // 80 + ax0) % 3, ax1 % 80])
T.writes(T_reshape_1[ax0, ax1])
T_reshape_1[ax0, ax1] = T_multiply_1[0, (ax1 // 80 + ax0) % 2028 // 78, (ax1 // 80 + ax0) % 78 // 3, (ax1 // 80 + ax0) % 3, ax1 % 80]
for i0, i1, i2, i3, i4 in T.grid(1, 13, 13, 3, 1):
with T.block("T_strided_slice_with_axes_4"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(4)])
T.writes(T_strided_slice_with_axes_4[ax0, ax1, ax2, ax3, ax4])
T_strided_slice_with_axes_4[ax0, ax1, ax2, ax3, ax4] = placeholder[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(4)]
for i0, i1, i2, i3, i4 in T.grid(1, 13, 13, 3, 1):
with T.block("T_sigmoid_4"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_strided_slice_with_axes_4[ax0, ax1, ax2, ax3, ax4])
T.writes(T_sigmoid_4[ax0, ax1, ax2, ax3, ax4])
T_sigmoid_4[ax0, ax1, ax2, ax3, ax4] = T.sigmoid(T_strided_slice_with_axes_4[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 13, 13, 3, 80):
with T.block("T_strided_slice_with_axes_5"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(5)])
T.writes(T_strided_slice_with_axes_5[ax0, ax1, ax2, ax3, ax4])
T_strided_slice_with_axes_5[ax0, ax1, ax2, ax3, ax4] = placeholder[ax0, ax1, ax2, ax3, T.cast(ax4, "int64") + T.int64(5)]
for i0, i1, i2, i3, i4 in T.grid(1, 13, 13, 3, 80):
with T.block("T_sigmoid_5"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_strided_slice_with_axes_5[ax0, ax1, ax2, ax3, ax4])
T.writes(T_sigmoid_5[ax0, ax1, ax2, ax3, ax4])
T_sigmoid_5[ax0, ax1, ax2, ax3, ax4] = T.sigmoid(T_strided_slice_with_axes_5[ax0, ax1, ax2, ax3, ax4], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(1, 13, 13, 3, 80):
with T.block("T_multiply_2"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(T_sigmoid_4[ax0, ax1, ax2, ax3, 0], T_sigmoid_5[ax0, ax1, ax2, ax3, ax4])
T.writes(T_multiply_2[ax0, ax1, ax2, ax3, ax4])
T_multiply_2[ax0, ax1, ax2, ax3, ax4] = T_sigmoid_4[ax0, ax1, ax2, ax3, 0] * T_sigmoid_5[ax0, ax1, ax2, ax3, ax4]
for i0, i1 in T.grid(507, 80):
with T.block("T_reshape_2"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(T_multiply_2[0, (ax1 // 80 + ax0) % 507 // 39, (ax1 // 80 + ax0) % 39 // 3, (ax1 // 80 + ax0) % 3, ax1 % 80])
T.writes(T_reshape_2[ax0, ax1])
T_reshape_2[ax0, ax1] = T_multiply_2[0, (ax1 // 80 + ax0) % 507 // 39, (ax1 // 80 + ax0) % 39 // 3, (ax1 // 80 + ax0) % 3, ax1 % 80]
for i0, i1 in T.grid(10647, 80):
with T.block("T_concat"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(T_reshape[ax0 - 2535, ax1], T_reshape_1[ax0 - 507, ax1], T_reshape_2[ax0, ax1])
T.writes(T_concat[ax0, ax1])
T_concat[ax0, ax1] = T.if_then_else(2535 <= ax0, T_reshape[ax0 - 2535, ax1], T.if_then_else(507 <= ax0, T_reshape_1[ax0 - 507, ax1], T_reshape_2[ax0, ax1], dtype="float32"), dtype="float32")
for i0, i1 in T.grid(80, 10647):
with T.block("T_transpose"):
ax0, ax1 = T.axis.remap("SS", [i0, i1])
T.reads(T_concat[ax1, ax0])
T.writes(T_transpose[ax0, ax1])
T_transpose[ax0, ax1] = T_concat[ax1, ax0]
for i0, i1, i2 in T.grid(1, 80, 10647):
with T.block("T_expand_dims"):
ax0, ax1, ax2 = T.axis.remap("SSS", [i0, i1, i2])
T.reads(T_transpose[ax1, ax2])
T.writes(T_expand_dims[ax0, ax1, ax2])
T_expand_dims[ax0, ax1, ax2] = T_transpose[ax1, ax2]
# pylint: enable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks
# fmt: on
def test_parallel_vectorize_unroll():
@T.prim_func
def Matmul_0(
A: T.Buffer((1024, 1024), "float32"),
B: T.Buffer((1024, 1024), "float32"),
C: T.Buffer((1024, 1024), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main"})
# body
with T.block("root"):
T.reads()
T.writes()
T.block_attr(
{
"meta_schedule.parallel": 512,
"meta_schedule.unroll_explicit": 16,
"meta_schedule.vectorize": 32,
}
)
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
T.reads(A[vi, vk], B[vk, vj])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
decision_0 = [
("SampleCategorical", 1),
]
mod = Matmul
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm --num-cores=32"),
types=None,
sch_rules=[
ms.schedule_rule.ParallelizeVectorizeUnroll(
max_jobs_per_core=16,
max_vectorize_extent=32,
unroll_max_steps=[0, 16, 64, 512],
unroll_explicit=True,
),
],
)
check_sketches(
mod,
sketches=actual,
expected_mods=[Matmul_0],
expected_decisions=[decision_0],
)
def test_parallel_vectorize_unroll_spatial():
mod = PureSpatial
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm --num-cores=32"),
types=None,
sch_rules=[
ms.schedule_rule.ParallelizeVectorizeUnroll(
max_jobs_per_core=-1,
max_vectorize_extent=-1,
unroll_max_steps=[0, 16, 64, 512],
unroll_explicit=True,
),
],
)
assert len(actual) == 1
trace = actual[0].trace.simplified(remove_postproc=True)
assert not trace.insts
if __name__ == "__main__":
test_parallel_vectorize_unroll()
test_parallel_vectorize_unroll_spatial()
| 16,942 | 54.733553 | 239 | py |
tvm | tvm-main/tests/python/unittest/test_tir_transform_decorate_device_scope.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
def test_decorate_device():
x = te.var("x")
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([x], tvm.tir.Evaluate(x)))
stmt = tvm.tir.transform.DecorateDeviceScope()(mod)["main"].body
assert stmt.attr_key == "device_scope"
if __name__ == "__main__":
test_decorate_device()
| 1,111 | 34.870968 | 76 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_reindex.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.schedule import ScheduleError
from tvm.tir.schedule.testing import verify_trace_roundtrip
@T.prim_func
def transpose_elementwise(
A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32")
) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vj, vi] * 2.0
@T.prim_func
def transpose_elementwise_reindex_read(
A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32")
) -> None:
A_reindex = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("A_reindex"):
vi, vj = T.axis.remap("SS", [i, j])
A_reindex[vi, vj] = A[vj, vi]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A_reindex[vi, vj] * 2.0
@T.prim_func
def conv2d_nhwc(
Input: T.Buffer((1, 224, 224, 3), "float32"),
Weight: T.Buffer((7, 7, 3, 64), "float32"),
Conv2d_nhwc: T.Buffer((1, 112, 112, 64), "float32"),
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
((((i1_1 >= 3) and (i1_1 < 227)) and (i2_1 >= 3)) and (i2_1 < 227)),
Input[i0_1, (i1_1 - 3), (i2_1 - 3), i3_1],
T.float32(0),
dtype="float32",
)
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 112, 112, 64, 7, 7, 3):
with T.block("conv2d_nhwc"):
n, h, w, co, rh, rw, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
with T.init():
Conv2d_nhwc[n, h, w, co] = T.float32(0)
Conv2d_nhwc[n, h, w, co] = Conv2d_nhwc[n, h, w, co] + (
PadInput[n, ((h * 2) + rh), ((w * 2) + rw), ((T.floordiv(co, 64) * 3) + rc)]
* Weight[rh, rw, rc, co]
)
@T.prim_func
def conv2d_nhwc_reindex_data(
Input: T.Buffer((1, 224, 224, 3), "float32"),
Weight: T.Buffer((7, 7, 3, 64), "float32"),
Conv2d_nhwc: T.Buffer((1, 112, 112, 64), "float32"),
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
ReindexInput = T.alloc_buffer([1, 112, 112, 7, 7, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
((((i1_1 >= 3) and (i1_1 < 227)) and (i2_1 >= 3)) and (i2_1 < 227)),
Input[i0_1, (i1_1 - 3), (i2_1 - 3), i3_1],
T.float32(0),
dtype="float32",
)
for i0, i1, i2, i3, i4, i5 in T.grid(1, 112, 112, 7, 7, 3):
with T.block("ReindexInput"):
n, h, w, rh, rw, rc = T.axis.remap("SSSSSS", [i0, i1, i2, i3, i4, i5])
ReindexInput[n, h, w, rh, rw, rc] = PadInput[n, ((h * 2) + rh), ((w * 2) + rw), rc]
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 112, 112, 64, 7, 7, 3):
with T.block("conv2d_nhwc"):
n, h, w, co, rh, rw, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
with T.init():
Conv2d_nhwc[n, h, w, co] = T.float32(0)
Conv2d_nhwc[n, h, w, co] = Conv2d_nhwc[n, h, w, co] + (
ReindexInput[n, h, w, rh, rw, rc] * Weight[rh, rw, rc, co]
)
@T.prim_func
def conv2d_nhwc_reindex_weight(
var_inputs: T.handle, var_weight: T.handle, var_conv2d_nhwc: T.handle
) -> None:
inputs = T.match_buffer(var_inputs, [1, 224, 224, 3], dtype="float32")
weight = T.match_buffer(var_weight, [7, 7, 3, 64], dtype="float32")
conv2d_nhwc = T.match_buffer(var_conv2d_nhwc, [1, 112, 112, 64], dtype="float32")
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
weight_reindex = T.alloc_buffer([64, 7, 7, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
i1_1 >= 3 and i1_1 < 227 and i2_1 >= 3 and i2_1 < 227,
inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1],
T.float32(0),
dtype="float32",
)
for ax3, ax4, ax5, ax6 in T.grid(64, 7, 7, 3):
with T.block("weight_reindex"):
v3, v4, v5, v6 = T.axis.remap("SSSS", [ax3, ax4, ax5, ax6])
T.reads(weight[v4, v5, v6, v3])
T.writes(weight_reindex[v3, v4, v5, v6])
weight_reindex[v3, v4, v5, v6] = weight[v4, v5, v6, v3]
for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 112, 112, 64, 7, 7, 3):
with T.block("conv2d_nhwc"):
n, h, w, co, rh, rw, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6])
T.reads(
PadInput[n, h * 2 + rh, w * 2 + rw, co // 64 * 3 + rc],
weight_reindex[co, rh, rw, rc],
)
T.writes(conv2d_nhwc[n, h, w, co])
with T.init():
conv2d_nhwc[n, h, w, co] = T.float32(0)
conv2d_nhwc[n, h, w, co] = (
conv2d_nhwc[n, h, w, co]
+ PadInput[n, h * 2 + rh, w * 2 + rw, co // 64 * 3 + rc]
* weight_reindex[co, rh, rw, rc]
)
@T.prim_func
def matmul(
A: T.Buffer((512, 512), "float32"),
B: T.Buffer((512, 512), "float32"),
C: T.Buffer((512, 512), "float32"),
) -> None:
for i0, i1, i2 in T.grid(512, 512, 512):
with T.block("matmul"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C[i, j], A[i, k], B[k, j])
T.writes(C[i, j])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
@T.prim_func
def matmul_reindex_write(
A: T.Buffer((512, 512), "float32"),
B: T.Buffer((512, 512), "float32"),
C: T.Buffer((512, 512), "float32"),
) -> None:
C_reindex = T.alloc_buffer([512, 512], dtype="float32")
for i0, i1, i2 in T.grid(512, 512, 512):
with T.block("matmul"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C_reindex[i, j], A[i, k], B[k, j])
T.writes(C_reindex[i, j])
with T.init():
C_reindex[i, j] = T.float32(0)
C_reindex[i, j] = C_reindex[i, j] + A[i, k] * B[k, j]
for i0, i1 in T.grid(512, 512):
with T.block("C_reindex"):
v0, v1 = T.axis.remap("SS", [i0, i1])
T.reads(C_reindex[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_reindex[v0, v1]
@T.prim_func
def multiple_read(A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32")) -> None:
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vj, vi] + A[vi, vj]
@T.prim_func
def mixed_dtype(
p0: T.Buffer((T.int64(2), 1280), "float16"),
p1: T.Buffer((1280, 1280), "float16"),
T_matmul_NT: T.Buffer((T.int64(2), 1280), "float16"),
) -> None:
for i0, i1, i2 in T.grid(T.int64(2), 1280, 1280):
with T.block("T_matmul_NT"):
i = T.axis.spatial(T.int64(2), i0)
j, k = T.axis.remap("SR", [i1, i2])
T.reads(p0[i, k], p1[j, k])
T.writes(T_matmul_NT[i, j])
with T.init():
T_matmul_NT[i, j] = T.float16(0)
T_matmul_NT[i, j] = T_matmul_NT[i, j] + p0[i, k] * p1[j, k]
@T.prim_func
def mixed_dtype_reindex_write(
p0: T.Buffer((T.int64(2), 1280), "float16"),
p1: T.Buffer((1280, 1280), "float16"),
T_matmul_NT: T.Buffer((T.int64(2), 1280), "float16"),
) -> None:
T_matmul_NT_reindex = T.alloc_buffer([T.int64(2), 1280], dtype="float16")
for i0, i1, i2 in T.grid(T.int64(2), 1280, 1280):
with T.block("T_matmul_NT"):
i = T.axis.spatial(T.int64(2), i0)
j, k = T.axis.remap("SR", [i1, i2])
T.reads(p0[i, k], p1[j, k])
T.writes(T_matmul_NT_reindex[i, j])
with T.init():
T_matmul_NT_reindex[i, j] = T.float16(0)
T_matmul_NT_reindex[i, j] = T_matmul_NT_reindex[i, j] + p0[i, k] * p1[j, k]
for ax0, ax1 in T.grid(T.int64(2), 1280):
with T.block("T_matmul_NT_reindex"):
v0 = T.axis.spatial(T.int64(2), ax0)
v1 = T.axis.remap("S", [ax1])
T.reads(T_matmul_NT_reindex[v0, v1])
T.writes(T_matmul_NT[v0, v1])
T_matmul_NT[v0, v1] = T_matmul_NT_reindex[v0, v1]
@T.prim_func
def matmul_unit_dim(
A: T.Buffer((1, 512), "float32"),
B: T.Buffer((512, 1), "float32"),
C: T.Buffer((1, 1), "float32"),
) -> None:
for i0, i1, i2 in T.grid(1, 1, 512):
with T.block("matmul"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C[i, j], A[i, k], B[k, j])
T.writes(C[i, j])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
@T.prim_func
def matmul_unit_dim_reindex_write(
A: T.Buffer((1, 512), "float32"),
B: T.Buffer((512, 1), "float32"),
C: T.Buffer((1, 1), "float32"),
) -> None:
C_reindex = T.alloc_buffer([1, 1], dtype="float32")
for i0, i1, i2 in T.grid(1, 1, 512):
with T.block("matmul"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(C_reindex[i, j], A[i, k], B[k, j])
T.writes(C_reindex[i, j])
with T.init():
C_reindex[i, j] = T.float32(0)
C_reindex[i, j] = C_reindex[i, j] + A[i, k] * B[k, j]
for i0, i1 in T.grid(1, 1):
with T.block("C_reindex"):
v0, v1 = T.axis.remap("SS", [i0, i1])
T.reads(C_reindex[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_reindex[v0, v1]
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
use_buffer_name = tvm.testing.parameter(by_dict={"buffer_index": False, "buffer_name": True})
def test_reindex_read_basic(use_block_name, use_buffer_name):
sch = tir.Schedule(transpose_elementwise)
block = "B" if use_block_name else sch.get_block("B")
buf = "A" if use_buffer_name else ("read", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(transpose_elementwise_reindex_read, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=transpose_elementwise)
def test_conv2d_reindex_weight(use_block_name, use_buffer_name):
sch = tir.Schedule(conv2d_nhwc)
block = "conv2d_nhwc" if use_block_name else sch.get_block("conv2d_nhwc")
buf = "Weight" if use_buffer_name else ("read", 1)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(conv2d_nhwc_reindex_weight, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=conv2d_nhwc)
def test_conv2d_reindex_data(use_block_name, use_buffer_name):
sch = tir.Schedule(conv2d_nhwc)
block = "conv2d_nhwc" if use_block_name else sch.get_block("conv2d_nhwc")
buf = "PadInput" if use_buffer_name else ("read", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(conv2d_nhwc_reindex_data, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=conv2d_nhwc)
def test_matmul_reindex_write(use_block_name, use_buffer_name):
sch = tir.Schedule(matmul)
block = "matmul" if use_block_name else sch.get_block("matmul")
buf = "C" if use_buffer_name else ("write", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(matmul_reindex_write, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=matmul)
def test_reindex_fail_multiple_read(use_block_name, use_buffer_name):
sch = tir.Schedule(multiple_read)
block = "B" if use_block_name else sch.get_block("B")
buf = "A" if use_buffer_name else ("read", 0)
with pytest.raises(ScheduleError):
sch.reindex(block, buf)
def test_reindex_mixed_dtype(use_block_name, use_buffer_name):
sch = tir.Schedule(mixed_dtype)
block = "T_matmul_NT" if use_block_name else sch.get_block("T_matmul_NT")
buf = "T_matmul_NT" if use_buffer_name else ("write", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(mixed_dtype_reindex_write, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=mixed_dtype)
def test_matmul_unit_dim_reindex_write(use_block_name, use_buffer_name):
sch = tir.Schedule(matmul_unit_dim)
block = "matmul" if use_block_name else sch.get_block("matmul")
buf = "C" if use_buffer_name else ("write", 0)
sch.reindex(block, buf)
tvm.ir.assert_structural_equal(matmul_unit_dim_reindex_write, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=matmul_unit_dim)
if __name__ == "__main__":
tvm.testing.main()
| 14,090 | 39.375358 | 98 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_tensorize_mfma.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import tvm
from tvm import te
from tvm.tir.tensor_intrin.rocm import (
shared_16x4_to_local_64x1_layout_A,
shared_4x16_to_local_64x1_layout_B,
shared_16x16_to_local_64x4_layout_A,
shared_16x16_to_local_64x4_layout_B,
shared_16x16_to_local_64x4_layout_C,
ROCM_MFMA_fill_16x16_f32_INTRIN,
ROCM_MFMA_LOAD_16x4_A_SHARED_f32_INTRIN,
ROCM_MFMA_LOAD_16x4_B_SHARED_f32_INTRIN,
ROCM_MFMA_f32f32f32_INTRIN,
ROCM_MFMA_STORE_16x16_f32_INTRIN,
ROCM_MFMA_LOAD_16x16_A_SHARED_f16_INTRIN,
ROCM_MFMA_LOAD_16x16_B_SHARED_f16_INTRIN,
ROCM_MFMA_f16f16f32_INTRIN,
ROCM_MFMA_STORE_16x16_f32_INTRIN,
ROCM_MFMA_fill_16x16_i32_INTRIN,
ROCM_MFMA_LOAD_16x16_A_SHARED_s8_INTRIN,
ROCM_MFMA_LOAD_16x16_B_SHARED_s8_INTRIN,
ROCM_MFMA_s8s8s32_INTRIN,
ROCM_MFMA_STORE_16x16_s32_INTRIN,
)
import tvm.testing
import numpy as np
from tvm.testing.tir import mfma_schedule
M = 1024
N = 1024
K = 1024
measure_perf = False
gflops = (N * M * K) * 2 / 1e9
def matmul(m, n, k, in_dtype, out_dtype, b_transposed):
b_shape = (n, k) if b_transposed else (k, n)
a = te.placeholder((m, k), name="A", dtype=in_dtype)
b = te.placeholder(b_shape, name="B", dtype=in_dtype)
k = te.reduce_axis((0, k), name="k")
def maybe_cast(v):
if in_dtype != out_dtype:
return tvm.tir.Cast(out_dtype, v)
return v
def maybe_swap(i, j):
if b_transposed:
return j, i
return i, j
c = te.compute(
(m, n),
lambda i, j: te.sum(maybe_cast(a[i, k]) * maybe_cast(b[maybe_swap(k, j)]), axis=[k]),
name="C",
)
return (a, b, c)
def run_test(
k_inner,
in_dtype,
out_dtype,
b_transposed,
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
ldmatrix_a_intrin,
ldmatrix_b_intrin,
mma_intrin,
mma_fill_intrin,
mma_store_intrin,
):
sch = mfma_schedule(
te.create_prim_func(matmul(M, N, K, in_dtype, out_dtype, b_transposed)),
k_inner,
in_dtype,
b_transposed,
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
ldmatrix_a_intrin,
ldmatrix_b_intrin,
mma_intrin,
mma_fill_intrin,
mma_store_intrin,
)
f = tvm.build(sch.mod["main"], target="rocm", name="dense")
dev = tvm.device("rocm", 0)
if in_dtype == "float32":
a_np = np.random.uniform(size=(M, K)).astype("float32")
if b_transposed:
b_np = np.random.uniform(size=(N, K)).astype("float32")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32").transpose()).astype(
out_dtype
)
else:
b_np = np.random.uniform(size=(K, N)).astype("float32")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32")).astype(out_dtype)
elif in_dtype == "float16":
a_np = np.random.uniform(size=(M, K)).astype("float16")
if b_transposed:
b_np = np.random.uniform(size=(N, K)).astype("float16")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32").transpose()).astype(
out_dtype
)
else:
b_np = np.random.uniform(size=(K, N)).astype("float16")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32")).astype(out_dtype)
else:
a_np = np.random.randint(-128, 128, (M, K)).astype("int8")
if b_transposed:
b_np = np.random.randint(-128, 128, (N, K)).astype("int8")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32").transpose()).astype(
"int32"
)
else:
b_np = np.random.randint(-128, 128, (K, N)).astype("int8")
c_np = np.dot(a_np.astype("float32"), b_np.astype("float32")).astype("int32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros((M, N), dtype=out_dtype), dev)
f(a, b, c)
if in_dtype != "float16":
# The numpy reference is computed with fp32 precision (otherwise too slow).
# So there is non-trivial accuracy difference if TVM result is computed with fp16 accumulation.
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-2, atol=1e-2)
return lambda: f.time_evaluator(f.entry_name, dev, number=500)(a, b, c)
@tvm.testing.requires_matrixcore
def test_i8i8i32_m16n16k16():
def index_map_A(i, j):
return (
i // 16,
j // 16,
*shared_16x16_to_local_64x4_layout_A(i % 16, j % 16),
)
def index_map_B(i, j):
return (
i // 16,
j // 16,
*shared_16x16_to_local_64x4_layout_B(i % 16, j % 16),
)
def index_map_C(i, j):
return (
i // 16,
j // 16,
*shared_16x16_to_local_64x4_layout_C(i % 16, j % 16),
)
k_inner = 16
in_dtype = "int8"
out_dtype = "int32"
i_factors, j_factors, k_factors = [1, 8, 2, 4, 1], [1, 16, 2, 1, 2], [32, 2, 1]
timer = run_test(
k_inner,
in_dtype,
out_dtype,
False, # b_transposed
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
ROCM_MFMA_LOAD_16x16_A_SHARED_s8_INTRIN,
ROCM_MFMA_LOAD_16x16_B_SHARED_s8_INTRIN,
ROCM_MFMA_s8s8s32_INTRIN,
ROCM_MFMA_fill_16x16_i32_INTRIN,
ROCM_MFMA_STORE_16x16_s32_INTRIN,
)
if measure_perf and timer:
print("test_i8i8i32_m16n16k16: %f GFLOPS" % (gflops / (timer().mean)))
@tvm.testing.requires_matrixcore
def test_f16f16f32_m16n16k16():
def index_map_A(i, j):
return (
i // 16,
j // 16,
*shared_16x16_to_local_64x4_layout_A(i % 16, j % 16),
)
def index_map_B(i, j):
return (
i // 16,
j // 16,
*shared_16x16_to_local_64x4_layout_B(i % 16, j % 16),
)
def index_map_C(i, j):
return (
i // 16,
j // 16,
*shared_16x16_to_local_64x4_layout_C(i % 16, j % 16),
)
k_inner = 16
in_dtype = "float16"
out_dtype = "float32"
i_factors, j_factors, k_factors = [1, 8, 2, 4, 1], [1, 16, 2, 1, 2], [32, 2, 1]
timer = run_test(
k_inner,
in_dtype,
out_dtype,
False, # b_transposed
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
ROCM_MFMA_LOAD_16x16_A_SHARED_f16_INTRIN,
ROCM_MFMA_LOAD_16x16_B_SHARED_f16_INTRIN,
ROCM_MFMA_f16f16f32_INTRIN,
ROCM_MFMA_fill_16x16_f32_INTRIN,
ROCM_MFMA_STORE_16x16_f32_INTRIN,
)
if measure_perf and timer:
print("f16f16f32_m16n16k16: %f GFLOPS" % (gflops / (timer().mean)))
@tvm.testing.requires_matrixcore
def test_f32f32f32_m16n16k4():
def index_map_A(i, j):
return (
i // 16,
j // 16,
*shared_16x4_to_local_64x1_layout_A(i % 16, j % 16),
)
def index_map_B(i, j):
return (
i // 16,
j // 16,
*shared_4x16_to_local_64x1_layout_B(i % 16, j % 16),
)
def index_map_C(i, j):
return (
i // 16,
j // 16,
*shared_16x16_to_local_64x4_layout_C(i % 16, j % 16),
)
k_inner = 4
in_dtype = "float32"
out_dtype = "float32"
i_factors, j_factors, k_factors = [4, 2, 1, 4, 2], [4, 2, 2, 1, 4], [128, 2, 1]
timer = run_test(
k_inner,
in_dtype,
out_dtype,
False, # b_transposed
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
ROCM_MFMA_LOAD_16x4_A_SHARED_f32_INTRIN,
ROCM_MFMA_LOAD_16x4_B_SHARED_f32_INTRIN,
ROCM_MFMA_f32f32f32_INTRIN,
ROCM_MFMA_fill_16x16_f32_INTRIN,
ROCM_MFMA_STORE_16x16_f32_INTRIN,
)
if measure_perf and timer:
print("test_f32f32f32_m16n16k4: %f GFLOPS" % (gflops / (timer().mean)))
if __name__ == "__main__":
tvm.testing.main()
| 9,097 | 27.88254 | 103 | py |
tvm | tvm-main/tests/python/unittest/test_target_codegen_hexagon.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import os
import pytest
import re
import sys
import tvm
import tvm.relay
import tvm.testing
import tvm.contrib.hexagon as hexagon
@pytest.fixture(autouse=True)
def register_linker():
original_linker = hexagon.hexagon_link()
# Register a phony linker, so that we can test codegen without a Hexagon toolchain.
hexagon.register_linker(lambda: "/bin/true")
yield None
# Restore registration.
hexagon.register_linker(original_linker)
@tvm.testing.requires_hexagon
def test_basic():
target = tvm.target.hexagon("v66", hvx=128)
def check_add(offload):
A = tvm.te.placeholder((128,), dtype="uint8", name="A")
B = tvm.te.placeholder((128,), dtype="uint8", name="A")
C = tvm.te.compute((128,), lambda i: A[i] + B[i], name="C")
s = tvm.te.create_schedule(C.op)
if offload:
xo, xi = s[C].split(s[C].op.axis[0], nparts=1)
s[C].bind(xo, tvm.te.thread_axis("pipeline"))
m = tvm.build(s, [C, A, B], target=target, name="offload_add")
hexm = m.imported_modules[0]
else:
hexm = tvm.build(
s, [C, A, B], target=tvm.target.Target(target, target), name="native_add"
)
asm = hexm.get_source("s")
vadds = re.findall(r"v[0-9]+.b = vadd\(v[0-9]+.b,v[0-9]+.b\)", asm)
assert vadds # Check that it's non-empty
check_add(True)
check_add(False)
@tvm.testing.requires_hexagon
def test_llvm_target_features():
target = tvm.target.hexagon("v66", hvx=128)
# Define some trivial compute
A = tvm.te.placeholder((128,), dtype="uint8", name="A")
C = tvm.te.compute((128,), lambda i: A[i] + 1, name="C")
s = tvm.te.create_schedule(C.op)
m = tvm.build(s, [C, A], target=tvm.target.Target(target, target), name="add_one")
llvm_ir = m.get_source("ll")
# Make sure we find +hvx-length128b in "attributes".
fs = re.findall(r"attributes.*\+hvx-length128b", llvm_ir)
assert fs # Check that it's non-empty
@tvm.testing.requires_hexagon
def test_alloc_vtcm():
target = tvm.target.hexagon("v66")
buf_len = 2048
A = tvm.te.placeholder((buf_len,), name="A", dtype="int8")
B = tvm.te.placeholder((buf_len,), name="B", dtype="int8")
A_buf = tvm.te.compute((buf_len,), lambda *i: A(*i), "A_buf")
B_buf = tvm.te.compute((buf_len,), lambda *i: B(*i), "B_buf")
C = tvm.te.compute((buf_len,), lambda *i: A_buf(*i) + B_buf(*i), name="C")
s = tvm.te.create_schedule(C.op)
# Use VTCM for each buffer.
s[A_buf].set_scope("local.vtcm")
s[B_buf].set_scope("local.vtcm")
config = {"tir.add_lower_pass": hexagon.ir_lower_vtcm_pass()}
with tvm.transform.PassContext(config=config):
irmod = tvm.lower(s, [A, B, C], name="alloc_vtcm")
calls = re.findall("HexagonBackend[A-Za-z]*VTCM", str(irmod["alloc_vtcm"]))
assert "HexagonBackendAllocateVTCM" in calls
assert "HexagonBackendFreeVTCM" in calls
@tvm.testing.requires_hexagon
def test_llvm_options():
target = tvm.target.hexagon("v66", llvm_options="-hexagon-noopt")
Zero = tvm.te.compute((10,), lambda _: tvm.tir.const(0, "int32"))
s = tvm.te.create_schedule(Zero.op)
tvm.build(s, [Zero], target=target, name="zero")
# Check that BuildHexagon hasn't crashed because of target attribute
# type mismatch.
assert re.search("-hexagon-noopt", str(target))
if __name__ == "__main__":
tvm.testing.main()
| 4,253 | 34.45 | 89 | py |
tvm | tvm-main/tests/python/unittest/test_testing.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
import tvm.testing
def test_check_numerical_grads():
# Functions and their derivatives
functions = [
lambda x: (x * x * x, 3 * x * x),
lambda x: (x * x, 2 * x),
lambda x: (np.abs(x), np.sign(x)),
lambda x: (np.log(np.abs(x)), 1 / x),
lambda x: (np.sqrt(np.abs(x)), np.sign(x) / (2 * np.sqrt(np.abs(x)))),
lambda x: (1 / x, -1 / (x * x)),
lambda x: (np.sign(np.sin(1 / x)), np.zeros_like(x)),
lambda x: (x * np.sin(1 / x), np.sin(1 / x) - np.cos(1 / x) / x),
lambda x: (np.sin(1 / x), -np.cos(1 / x) / (x * x)),
lambda x: (np.tan(x), 1.0 / (np.cos(x) * np.cos(x))),
]
np.random.seed(0)
# Avoid values too close to 0 since singularities of our functions are there
min_x = 0.5
for func in functions:
x_input = np.random.uniform(min_x, 10, size=(3, 4))
# We need a function returning a scalar, so sum the results
func_forw = lambda x: np.sum(func(x)[0])
grads = [func(x_input)[1]]
tvm.testing.check_numerical_grads(func_forw, [x_input], grads)
# Check functions with multiple arguments
for f1 in functions:
for f2 in functions:
x_input = np.random.uniform(min_x, 10, size=(3, 4))
y_input = np.random.uniform(min_x, 10, size=(3, 4))
func_forw = lambda x, y: np.sum(f1(x)[0] + f2(y)[0])
grads = [f1(x_input)[1], f2(y_input)[1]]
tvm.testing.check_numerical_grads(func_forw, [x_input, y_input], grads)
# Same thing but with keyword arguments
func_forw = lambda x, y: np.sum(f1(x)[0] + f2(y)[0])
grads = {"x": f1(x_input)[1], "y": f2(y_input)[1]}
tvm.testing.check_numerical_grads(func_forw, {"x": x_input, "y": y_input}, grads)
def _noise1(x, atol=1e-2, rtol=0.1):
# We go in random direction using twice the original tolerance to be sure this
# results in an error
sqrt_n = np.sqrt(float(np.prod(x.shape)))
tol = 2 * (np.linalg.norm(x) * rtol + atol * sqrt_n)
noise = np.random.normal(size=x.shape)
noise = tol * noise / np.linalg.norm(noise)
return x + noise
def _noise2(x, atol=1e-2, rtol=0.1):
# This noise affects just a single component
sqrt_n = np.sqrt(float(np.prod(x.shape)))
tol = 2 * (np.linalg.norm(x) * rtol + atol * sqrt_n)
n = np.random.randint(np.prod(x.shape))
noise = np.zeros_like(x)
noise.reshape(-1)[n] = tol
return x + noise
# Add noise to gradients and check that the function throws
for f1 in functions:
for f2 in functions:
x_input = np.random.uniform(min_x, 10, size=(3, 4))
y_input = np.random.uniform(min_x, 10, size=(3, 4))
func_forw = lambda x, y: np.sum(f1(x)[0] + f2(y)[0])
grads = [_noise1(f1(x_input)[1]), _noise1(f2(y_input)[1])]
try:
tvm.testing.check_numerical_grads(func_forw, [x_input, y_input], grads)
except AssertionError as e:
pass
else:
raise AssertionError("tvm.testing.check_numerical_grads didn't raise an exception")
func_forw = lambda x, y: np.sum(f1(x)[0] + f2(y)[0])
grads = {"x": _noise2(f1(x_input)[1]), "y": _noise2(f2(y_input)[1])}
try:
tvm.testing.check_numerical_grads(func_forw, {"x": x_input, "y": y_input}, grads)
except AssertionError as e:
pass
else:
raise AssertionError("tvm.testing.check_numerical_grads didn't raise an exception")
if __name__ == "__main__":
test_tvm.testing.check_numerical_grads()
| 4,564 | 38.353448 | 99 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm.script import tir as T
@T.prim_func
def get_valid_counts(
data: T.handle,
valid_count: T.handle,
out: T.handle,
out_indices: T.handle,
score_threshold: T.float32,
id_index: T.int32,
score_index: T.int32,
) -> None:
data_buf = T.match_buffer(data, (1, 2500, 6), "float32")
valid_count_buf = T.match_buffer(valid_count, (1,), "int32")
out_buf = T.match_buffer(out, (1, 2500, 6), "float32")
out_indices_buf = T.match_buffer(out_indices, (1, 2500), "int32")
with T.block("init"):
vi = T.axis.S(1, 0)
valid_count_buf[vi] = T.int32(0)
for j in range(2500):
with T.block("update"):
vj = T.axis.S(2500, j)
T.reads([data_buf[vi, vj, 6]])
T.writes([valid_count_buf[vi], out_indices_buf[vi, vj], out_buf[vi, vj, 6]])
if (data_buf[vi, vj, score_index] > score_threshold) and (
(id_index < 0) or (data_buf[vi, vj, id_index] >= T.float32(0))
):
for k in T.serial(0, 6):
out_buf[vi, valid_count_buf[vi], k] = data_buf[vi, vj, k]
out_indices_buf[vi, valid_count_buf[vi]] = vj
valid_count_buf[vi] = valid_count_buf[vi] + 1
if vj >= valid_count_buf[vi]:
for k in T.serial(0, 6):
out_buf[vi, vj, k] = T.float32(-1)
out_indices_buf[vi, vj] = T.int32(-1)
def _check_get_valid_counts_with_numpy(f, dshape, score_threshold, id_index, score_index):
dtype = "float32"
ctx = tvm.cpu()
batch_size, num_anchor, elem_length = dshape
np_data = np.random.uniform(low=-2, high=2, size=dshape).astype(dtype)
np_out1 = np.zeros(shape=(batch_size,), dtype="int32")
np_out2 = np.zeros(shape=dshape).astype(dtype)
np_out3 = np.zeros(shape=(batch_size, num_anchor), dtype="int32")
for i in range(batch_size):
np_out1[i] = 0
inter_idx = 0
for j in range(num_anchor):
score = np_data[i, j, score_index]
if score > score_threshold and (id_index < 0 or np_data[i, j, id_index] >= 0):
for k in range(elem_length):
np_out2[i, inter_idx, k] = np_data[i, j, k]
np_out1[i] += 1
np_out3[i, inter_idx] = j
inter_idx += 1
if j >= np_out1[i]:
for k in range(elem_length):
np_out2[i, j, k] = -1.0
np_out3[i, j] = -1
in_data = tvm.nd.array(np_data, ctx)
out1 = tvm.nd.array(np_out1, ctx)
out2 = tvm.nd.array(np_out2, ctx)
out3 = tvm.nd.array(np_out3, ctx)
f(in_data, out1, out2, out3, score_threshold, id_index, score_index)
tvm.testing.assert_allclose(out1.numpy(), np_out1, rtol=1e-5)
tvm.testing.assert_allclose(out2.numpy(), np_out2, rtol=1e-5)
tvm.testing.assert_allclose(out3.numpy(), np_out3, rtol=1e-5)
print("test get_valid_counts end")
def test_get_valid_counts_script_func():
device = "llvm"
# check lowering
print(get_valid_counts.script())
mod = tvm.ir.IRModule({"get_valid_counts": get_valid_counts})
print(mod.script())
# check building
f = tvm.build(mod["get_valid_counts"], target=device)
_check_get_valid_counts_with_numpy(f, (1, 2500, 6), 0.0, 0, 1)
@T.prim_func
def alloc_zero_dim_buffer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, [], dtype="float32")
B = T.match_buffer(b, [], dtype="float32")
# body
# tir.with block("root")
C = T.alloc_buffer([], dtype="float32")
A[()] = T.float32(2)
C[()] = A[()] + B[()]
B[()] = C[()]
@T.prim_func
def alloc_zero_dim_buffer_block(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (), "float32")
B = T.match_buffer(b, (), "float32")
with T.block("root"):
T.reads([])
T.writes([])
C = T.alloc_buffer((), "float32")
A[()] = T.float32(2)
C[()] = A[()] + B[()]
B[()] = C[()]
def _check_alloc_zero_dim_buffer(f):
dtype = "float32"
ctx = tvm.cpu()
np_data = np.zeros(shape=()).astype(dtype)
np_out = np.zeros(shape=()).astype(dtype)
tvm_data = tvm.nd.array(np_data, ctx)
tvm_out = tvm.nd.array(np_out, ctx)
# np func exection
np_inter = np.array(1)
np_data[()] = 2.0
np_inter[()] = np_data[()] + np_out[()]
np_out[()] = np_inter[()]
# tvm func execution
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_out, rtol=1e-5)
def test_alloc_zero_dim_buffer_round_trip():
func = alloc_zero_dim_buffer
func_with_block = alloc_zero_dim_buffer_block
rt_func = tvm.script.from_source(func.script())
rt_func_with_block = tvm.script.from_source(func_with_block.script())
rt_mod = tvm.build(rt_func, "llvm")
rt_mod_with_block = tvm.build(rt_func_with_block, "llvm")
tvm.ir.assert_structural_equal(func, func_with_block)
tvm.ir.assert_structural_equal(rt_func, rt_func_with_block)
_check_alloc_zero_dim_buffer(rt_mod)
_check_alloc_zero_dim_buffer(rt_mod_with_block)
@T.prim_func
def ceildiv_test(A: T.Buffer(16, "int32")):
for i in range(16):
A[i] = T.ceildiv(A[i], 4)
@tvm.testing.requires_llvm
def test_ceildiv():
f = tvm.build(ceildiv_test, "llvm")
a = tvm.nd.array(np.arange(16).astype("int32"))
f(a)
ref = (np.arange(16) + 3) // 4
tvm.testing.assert_allclose(a.numpy(), ref)
@T.prim_func
def slice_op_test(
A: T.Buffer((10,), "float32"), B: T.Buffer((10,), "float32"), C: T.Buffer((10,), "uint32")
):
B[0:5] = A[0:5] + B[0:5]
B[0:5] = A[0:5] - B[0:5]
B[0:5] = A[0:5] * B[0:5]
B[0:5] = A[0:5] / B[0:5]
C[0:5] = C[0:5] % T.broadcast(T.uint32(5), 5)
B[0:5] = -B[0:5]
C[0:5] = C[0:5] >> 4
C[0:5] = C[0:5] << 4
C[0:5] = C[0:5] << C[0:5]
C[0:5] = C[0:5] >> C[0:5]
T.evaluate(A[0:5] > B[0:5])
T.evaluate(A[0:5] > 5)
T.evaluate(A[0:5] >= B[0:5])
T.evaluate(A[0:5] >= 5)
T.evaluate(A[0:5] < B[0:5])
T.evaluate(A[0:5] < 5)
T.evaluate(A[0:5] <= B[0:5])
T.evaluate(A[0:5] <= 5)
T.evaluate(A[0:5] == B[0:5])
T.evaluate(A[0:5] == 5)
T.evaluate(A[0:5] != B[0:5])
T.evaluate(A[0:5] != 5)
T.evaluate((A[0:5] > 0) and (B[0:5] > 0))
T.evaluate((A[0:5] > 0) or (B[0:5] > 0))
T.evaluate((A[0:5] < 0) and (1 > 0))
T.evaluate((A[0:5] > 0) or (1 > 0))
@T.prim_func
def slice_op_test_ref(
A: T.Buffer((10,), "float32"), B: T.Buffer((10,), "float32"), C: T.Buffer((10,), "uint32")
):
B[0:5] = A[0:5] + B[0:5]
B[0:5] = A[0:5] - B[0:5]
B[0:5] = A[0:5] * B[0:5]
B[0:5] = A[0:5] / B[0:5]
C[0:5] = C[0:5] % T.Broadcast(T.uint32(5), 5)
B[0:5] = B[0:5] * T.Broadcast(T.float32(-1), 5)
C[0:5] = T.shift_right(C[0:5], T.Broadcast(T.uint32(4), 5))
C[0:5] = T.shift_left(C[0:5], T.Broadcast(T.uint32(4), 5))
C[0:5] = T.shift_left(C[0:5], C[0:5])
C[0:5] = T.shift_right(C[0:5], C[0:5])
T.evaluate(A[0:5] > B[0:5])
T.evaluate(A[0:5] > T.Broadcast(T.float32(5), 5))
T.evaluate(A[0:5] >= B[0:5])
T.evaluate(A[0:5] >= T.Broadcast(T.float32(5), 5))
T.evaluate(A[0:5] < B[0:5])
T.evaluate(A[0:5] < T.Broadcast(T.float32(5), 5))
T.evaluate(A[0:5] <= B[0:5])
T.evaluate(A[0:5] <= T.Broadcast(T.float32(5), 5))
T.evaluate(A[0:5] == B[0:5])
T.evaluate(A[0:5] == T.Broadcast(T.float32(5), 5))
T.evaluate(A[0:5] != B[0:5])
T.evaluate(A[0:5] != T.Broadcast(T.float32(5), 5))
T.bitwise_and(A[0:5] > T.Broadcast(T.float32(0), 5), B[0:5] > T.Broadcast(T.float32(0), 5))
T.bitwise_or(A[0:5] > T.Broadcast(T.float32(0), 5), B[0:5] > T.Broadcast(T.float32(0), 5))
T.bitwise_and(A[0:5] < T.Broadcast(T.float32(0), 5), T.Broadcast(T.bool(1), 5))
T.bitwise_or(A[0:5] > T.Broadcast(T.float32(0), 5), T.Broadcast(T.bool(1), 5))
def test_slice_op():
tvm.ir.assert_structural_equal(slice_op_test, slice_op_test_ref)
if __name__ == "__main__":
test_get_valid_counts_script_func()
test_alloc_zero_dim_buffer_round_trip()
test_slice_op()
| 8,972 | 34.607143 | 95 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_profiling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
from io import StringIO
import csv
import os
import json
import platform
import tvm.testing
import tvm.utils
from tvm.runtime import profiler_vm
from tvm import relay
from tvm.relay.testing import mlp
from tvm.contrib.debugger import debug_executor
from tvm import rpc
from tvm.contrib import utils
from tvm.runtime.profiling import Report
from tvm.script import tir as T
def read_csv(report):
f = StringIO(report.csv())
headers = []
rows = []
reader = csv.reader(f, delimiter=",")
# force parsing
in_header = True
for row in reader:
if in_header:
headers = row
in_header = False
rows = [[] for x in headers]
else:
for i in range(len(row)):
rows[i].append(row[i])
return dict(zip(headers, rows))
@pytest.mark.skipif(not profiler_vm.enabled(), reason="VM Profiler not enabled")
@tvm.testing.skip_if_wheel_test
@tvm.testing.parametrize_targets
def test_vm(target, dev):
dtype = "float32"
x = relay.var("x", shape=(relay.Any(), relay.Any()), dtype=dtype)
y = relay.var("y", shape=(relay.Any(), relay.Any()), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], relay.add(x, y))
exe = relay.vm.compile(mod, target)
vm = profiler_vm.VirtualMachineProfiler(exe, dev)
data = np.random.rand(28, 28).astype("float32")
report = vm.profile(data, data, func_name="main")
assert "fused_add" in str(report)
assert "Total" in str(report)
assert "AllocTensorReg" in str(report)
assert "AllocStorage" in str(report)
assert report.configuration["Executor"] == "VM"
csv = read_csv(report)
assert "Hash" in csv.keys()
# Ops should have a duration greater than zero.
assert all(
[
float(dur) > 0
for dur, name in zip(csv["Duration (us)"], csv["Name"])
if name[:5] == "fused"
]
)
# AllocTensor or AllocStorage may be cached, so their duration could be 0.
assert all(
[
float(dur) >= 0
for dur, name in zip(csv["Duration (us)"], csv["Name"])
if name[:5] != "fused"
]
)
@tvm.testing.parametrize_targets
def test_graph_executor(target, dev):
mod, params = mlp.get_workload(1)
exe = relay.build(mod, target, params=params)
gr = debug_executor.create(exe.get_graph_json(), exe.lib, dev)
data = np.random.rand(1, 1, 28, 28).astype("float32")
report = gr.profile(data=data)
assert "fused_nn_softmax" in str(report)
assert "Total" in str(report)
assert "Hash" in str(report)
assert "Graph" in str(report)
@tvm.testing.parametrize_targets("cuda", "llvm")
@pytest.mark.skipif(
tvm.get_global_func("runtime.profiling.PAPIMetricCollector", allow_missing=True) is None,
reason="PAPI profiling not enabled",
)
def test_papi(target, dev):
target = tvm.target.Target(target)
if str(target.kind) == "llvm":
metric = "PAPI_FP_OPS"
elif str(target.kind) == "cuda":
metric = "cuda:::event:shared_load:device=0"
else:
pytest.skip(f"Target {target.kind} not supported by this test")
mod, params = mlp.get_workload(1)
exe = relay.vm.compile(mod, target, params=params)
vm = profiler_vm.VirtualMachineProfiler(exe, dev)
data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"), device=dev)
report = vm.profile(
data,
func_name="main",
collectors=[tvm.runtime.profiling.PAPIMetricCollector({dev: [metric]})],
)
assert metric in str(report)
csv = read_csv(report)
assert metric in csv.keys()
assert any([float(x) > 0 for x in csv[metric]])
@tvm.testing.requires_llvm
def test_json():
mod, params = mlp.get_workload(1)
exe = relay.vm.compile(mod, "llvm", params=params)
vm = profiler_vm.VirtualMachineProfiler(exe, tvm.cpu())
data = np.random.rand(1, 1, 28, 28).astype("float32")
report = vm.profile(data, func_name="main")
parsed = json.loads(report.json())
assert "device_metrics" in parsed
assert "calls" in parsed
assert "configuration" in parsed
assert "Duration (us)" in parsed["calls"][0]
assert "microseconds" in parsed["calls"][0]["Duration (us)"]
assert len(parsed["calls"]) > 0
for call in parsed["calls"]:
assert isinstance(call["Name"]["string"], str)
assert isinstance(call["Count"]["count"], int)
assert isinstance(call["Duration (us)"]["microseconds"], float)
@tvm.testing.requires_llvm
def test_rpc_vm():
server = rpc.Server(key="profiling")
remote = rpc.connect("127.0.0.1", server.port, key="profiling")
mod, params = mlp.get_workload(1)
exe = relay.vm.compile(mod, "llvm", params=params)
temp = utils.tempdir()
path = temp.relpath("lib.tar")
exe.mod.export_library(path)
remote.upload(path)
rexec = remote.load_module("lib.tar")
vm = profiler_vm.VirtualMachineProfiler(rexec, remote.cpu())
report = vm.profile(tvm.nd.array(np.ones((1, 1, 28, 28), dtype="float32"), device=remote.cpu()))
assert len(report.calls) > 0
def test_rpc_graph():
server = rpc.Server(key="profiling")
remote = rpc.connect("127.0.0.1", server.port, key="profiling")
mod, params = mlp.get_workload(1)
exe = relay.build(mod, "llvm", params=params)
temp = utils.tempdir()
path = temp.relpath("lib.tar")
exe.export_library(path)
remote.upload(path)
rexec = remote.load_module("lib.tar")
gr = debug_executor.create(exe.get_graph_json(), rexec, remote.cpu())
data = np.random.rand(1, 1, 28, 28).astype("float32")
report = gr.profile(data=data)
assert len(report.calls) > 0
def test_report_serialization():
mod, params = mlp.get_workload(1)
exe = relay.vm.compile(mod, "llvm", params=params)
vm = profiler_vm.VirtualMachineProfiler(exe, tvm.cpu())
data = np.random.rand(1, 1, 28, 28).astype("float32")
report = vm.profile(data, func_name="main")
report2 = Report.from_json(report.json())
# Equality on reports compares pointers, so we compare the printed
# results instead.
# Use .table() instead of str(), because str() includes aggregate
# and column summations whose values may be impacted by otherwise
# negligible conversion errors. (2 occurrences / 3000 trials)
assert report.table(aggregate=False, col_sums=False) == report2.table(
aggregate=False, col_sums=False
)
@T.prim_func
def axpy_cpu(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [10], "float64")
B = T.match_buffer(b, [10], "float64")
C = T.match_buffer(c, [10], "float64")
for i in range(10):
C[i] = A[i] + B[i]
@T.prim_func
def axpy_gpu(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [10], "float64")
B = T.match_buffer(b, [10], "float64")
C = T.match_buffer(c, [10], "float64")
for i in T.thread_binding(0, 10, "threadIdx.x"):
C[i] = A[i] + B[i]
@tvm.testing.parametrize_targets("cuda", "llvm")
@pytest.mark.skipif(
tvm.get_global_func("runtime.profiling.PAPIMetricCollector", allow_missing=True) is None,
reason="PAPI profiling not enabled",
)
def test_profile_function(target, dev):
target = tvm.target.Target(target)
if str(target.kind) == "llvm":
metric = "PAPI_FP_OPS"
func = axpy_cpu
elif str(target.kind) == "cuda":
metric = (
"cuda:::gpu__compute_memory_access_throughput.max.pct_of_peak_sustained_region:device=0"
)
func = axpy_gpu
else:
pytest.skip(f"Target {target.kind} not supported by this test")
f = tvm.build(func, target=target)
a = tvm.nd.array(np.ones(10), device=dev)
b = tvm.nd.array(np.ones(10), device=dev)
c = tvm.nd.array(np.zeros(10), device=dev)
report = tvm.runtime.profiling.profile_function(
f, dev, [tvm.runtime.profiling.PAPIMetricCollector({dev: [metric]})]
)(a, b, c)
assert metric in report.keys()
assert report[metric].value > 0
if __name__ == "__main__":
tvm.testing.main()
| 8,916 | 32.272388 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_printer_metadata.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import tvm.testing
from tvm.script.parser import ir as I
from tvm.script.parser import tir as T
def test_str_metadata():
# This test is to check we reuse the existing metadata element for the same tir.StringImm
# So metadata["tir.StringImm"][0] will occur in the printed script for three times
str_imm = T.StringImm("aaa\nbbb\n")
@I.ir_module
class Module:
@T.prim_func
def foo() -> None:
A = str_imm
B = str_imm
@T.prim_func
def foo1() -> None:
A = str_imm
printed_str = Module.script(verbose_expr=True)
assert (
printed_str.count('metadata["tir.StringImm"][0]') == 3
and printed_str.count('metadata["tir.StringImm"][1]') == 0
)
if __name__ == "__main__":
tvm.testing.main()
| 1,624 | 32.854167 | 93 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_cache_read_write.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
########## Function before schedule ##########
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_shape_int64(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (T.int64(128), T.int64(128)))
B = T.alloc_buffer((T.int64(128), T.int64(128)))
C = T.match_buffer(c, (T.int64(128), T.int64(128)))
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_reindex_cache_read(
A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")
):
B = T.alloc_buffer((128, 128))
B_shared = T.alloc_buffer((128, 64, 2), scope="shared")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("B_shared"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(B_shared[vj, vi // 2, vi % 2])
B_shared[vj, vi // 2, vi % 2] = B[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B_shared[vj, vi // 2, vi % 2])
T.writes(C[vi, vj])
C[vi, vj] = B_shared[vj, vi // 2, vi % 2] + T.float32(1)
@T.prim_func
def elementwise_reindex_cache_write(
A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")
):
B = T.alloc_buffer((128, 128))
B_shared = T.alloc_buffer((128, 128), scope="shared")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(B_shared[vj, vi])
B_shared[vj, vi] = A[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("B_shared"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B_shared[vj, vi])
T.writes(B[vi, vj])
B[vi, vj] = B_shared[vj, vi]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def reduce(A: T.Buffer((128, 128, 128, 128), "float32"), C: T.Buffer((128, 128), "float32")):
B = T.alloc_buffer((128, 128, 128), dtype="float32")
for i, j, k in T.grid(128, 128, 128):
for l in range(128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSR", [i, j, k, l])
with T.init():
B[vi, vj, vk] = T.float32(0)
B[vi, vj, vk] = B[vi, vj, vk] + A[vi, vj, vk, vl]
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + B[vi, vj, vk]
@T.prim_func
def reduce_reindex_cache_write_0(
A: T.Buffer((128, 128, 128, 128), "float32"), C: T.Buffer((128, 128), "float32")
):
B = T.alloc_buffer((128, 128, 128))
B_shared = T.alloc_buffer((128, 128, 128), scope="shared")
for i, j, k in T.grid(128, 128, 128):
for l in range(128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSR", [i, j, k, l])
T.reads(A[vi, vj, vk, vl])
T.writes(B_shared[vj, vi, vk])
with T.init():
B_shared[vj, vi, vk] = T.float32(0)
B_shared[vj, vi, vk] = B_shared[vj, vi, vk] + A[vi, vj, vk, vl]
with T.block("B_shared"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads(B_shared[vj, vi, vk])
T.writes(B[vi, vj, vk])
B[vi, vj, vk] = B_shared[vj, vi, vk]
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
T.reads(B[vi, vj, vk])
T.writes(C[vi, vj])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + B[vi, vj, vk]
@T.prim_func
def reduce_reindex_cache_write_1(
A: T.Buffer((128, 128, 128, 128), "float32"), C: T.Buffer((128, 128), "float32")
):
B = T.alloc_buffer((128, 128, 128))
B_shared = T.alloc_buffer((128, 128, 128), scope="shared")
C_shared = T.alloc_buffer((128, 128), scope="shared")
for i, j, k in T.grid(128, 128, 128):
for l in range(128):
with T.block("B"):
vi, vj, vk, vl = T.axis.remap("SSSR", [i, j, k, l])
T.reads(A[vi, vj, vk, vl])
T.writes(B_shared[vj, vi, vk])
with T.init():
B_shared[vj, vi, vk] = T.float32(0)
B_shared[vj, vi, vk] = B_shared[vj, vi, vk] + A[vi, vj, vk, vl]
with T.block("B_shared"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
T.reads(B_shared[vj, vi, vk])
T.writes(B[vi, vj, vk])
B[vi, vj, vk] = B_shared[vj, vi, vk]
with T.block("C"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
T.reads(B[vi, vj, vk])
T.writes(C_shared[vj, vi])
with T.init():
C_shared[vj, vi] = T.float32(0)
C_shared[vj, vi] = C_shared[vj, vi] + B[vi, vj, vk]
for i, j in T.grid(128, 128):
with T.block("C_shared"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(C_shared[vj, vi])
T.writes(C[vi, vj])
C[vi, vj] = C_shared[vj, vi]
@T.prim_func
def func_nested_seq(b: T.handle, c: T.handle) -> None:
A = T.alloc_buffer((128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = 2.0
for i, j in T.grid(8, 8):
for x, y in T.grid(16, 16):
with T.block("B0"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = 1.0
for x, y in T.grid(16, 16):
with T.block("B1"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = A[vi, vj] + B[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def access_under_scope(b: T.handle, c: T.handle) -> None:
A = T.alloc_buffer((128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i0, j0 in T.grid(8, 8):
with T.block("scope"):
i, j = T.axis.remap("SS", [i0, j0])
for x, y in T.grid(16, 16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
A[vi, vj] = 1.0
for x, y in T.grid(16, 16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = A[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def opaque_access(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), dtype="float16")
B = T.match_buffer(b, (128, 128), dtype="float16")
C = T.match_buffer(c, (128, 128), dtype="float16")
D = T.match_buffer(d, (128, 128), dtype="float16")
for i, j in T.grid(128, 128):
with T.block("load_store"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(D[vi, vj])
D[vi, vj] = A[vi, vj]
for i, j in T.grid(8, 8):
with T.block("opaque"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.evaluate(
T.tvm_load_matrix_sync(
B.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A.data,
vi * 2048 + vj * 16,
128,
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
for i, j in T.grid(8, 8):
with T.block("match_buffer"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A0 = T.match_buffer(
A[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
C0 = T.match_buffer(
C[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
T.evaluate(
T.tvm_load_matrix_sync(
C0.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A0.data,
A0.elem_offset,
A0.strides[0],
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
@T.prim_func
def func_multi_consumer() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
C = T.alloc_buffer((128))
for i in T.grid(8):
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A[vi] = 1.0
for j in T.grid(16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + j)
B[vi] = A[vi] + 1.0
for i in T.grid(128):
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = A[vi]
@T.prim_func
def reindex_cache_read_multi_consumer() -> None:
A = T.alloc_buffer((128,))
B = T.alloc_buffer((128,))
C = T.alloc_buffer((128,))
A_shared = T.alloc_buffer((4, 32), scope="shared")
for i in range(8):
for j in range(16):
with T.block("A"):
vi = T.axis.spatial(128, i * 16 + j)
T.reads()
T.writes(A[vi])
A[vi] = T.float32(1)
for j in range(16):
with T.block("A_shared"):
vi = T.axis.spatial(128, i * 16 + j)
T.reads(A[vi])
T.writes(A_shared[vi // 32, vi % 32])
A_shared[vi // 32, vi % 32] = A[vi]
for j in range(16):
with T.block("B"):
vi = T.axis.spatial(128, i * 16 + j)
T.reads(A_shared[vi // 32, vi % 32])
T.writes(B[vi])
B[vi] = A_shared[vi // 32, vi % 32] + T.float32(1)
for i in range(128):
with T.block("C"):
vi = T.axis.spatial(128, i)
T.reads(A[vi])
T.writes(C[vi])
C[vi] = A[vi]
@T.prim_func
def func_multi_producer() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
for i in range(128):
with T.block("A0"):
vi = T.axis.S(128, i)
A[vi] = 1.0
for i in range(128):
with T.block("A1"):
vi = T.axis.S(128, i)
A[vi] = 2.0
for i in range(128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi]
@T.prim_func
def func_with_block_predicate() -> None:
A = T.alloc_buffer((120))
B = T.alloc_buffer((120))
for i, j in T.grid(16, 8):
with T.block("producer"):
T.where(i * 8 + j < 120)
ax = T.axis.S(120, i * 8 + j)
A[ax] = 0.0
for i, j in T.grid(16, 8):
with T.block("consumer"):
T.where(i * 8 + j < 120)
ax = T.axis.S(120, i * 8 + j)
B[ax] = A[ax] + 1.0
@T.prim_func
def inplace_func(data_io: T.Buffer((64), "int32")):
data_1d = T.alloc_buffer([64], dtype="int32")
for i0 in T.serial(64):
with T.block("copy_in"):
v0 = T.axis.remap("S", [i0])
data_1d[v0] = data_io[v0]
for i0 in T.serial(1):
with T.block("ext_call"):
T.reads(data_1d[:64])
T.writes(data_1d[:64])
T.evaluate(T.call_extern("call_impl", data_1d.data, dtype=""))
for i0 in T.serial(64):
with T.block("copy_out"):
v0 = T.axis.remap("S", [i0])
data_io[v0] = data_1d[v0]
@T.prim_func
def inplace_call(data_io: T.Buffer((64), "int32")):
for i0 in T.serial(1):
with T.block("ext_call"):
T.reads(data_io[:64])
T.writes(data_io[:64])
T.evaluate(T.call_extern("call_impl", data_io.data, dtype=""))
@T.prim_func
def cache_read_nested_seq_target(
B: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")
) -> None:
A = T.alloc_buffer([128, 128], dtype="float32")
A_global = T.alloc_buffer([128, 128], dtype="float32")
for i, j in T.grid(128, 128):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads()
T.writes(A[vi, vj])
A[vi, vj] = T.float32(2)
for i, j in T.grid(8, 8):
for x, y in T.grid(16, 16):
with T.block("B0"):
vi = T.axis.spatial(128, i * 16 + x)
vj = T.axis.spatial(128, j * 16 + y)
T.reads()
T.writes(B[vi, vj])
B[vi, vj] = T.float32(1)
for x, y in T.grid(16, 16):
with T.block("B1"):
vi = T.axis.spatial(128, i * 16 + x)
vj = T.axis.spatial(128, j * 16 + y)
T.reads(A[vi, vj], B[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] + B[vi, vj]
for ax0, ax1 in T.grid(128, 128):
with T.block("A_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(A[v0, v1])
T.writes(A_global[v0, v1])
A_global[v0, v1] = A[v0, v1]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = A_global[vi, vj] * T.float32(2)
########## Expected function after cache_read ##########
@T.prim_func
def cache_read_elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
A_global = T.alloc_buffer((128, 128))
B_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("A_global"):
vi, vj = T.axis.remap("SS", [i, j])
A_global[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A_global[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B_local"):
vi, vj = T.axis.remap("SS", [i, j])
B_local[vi, vj] = B[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B_local[vi, vj] + 1.0
@T.prim_func
def cache_read_under_scope(b: T.handle, c: T.handle) -> None:
A = T.alloc_buffer((128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
A_global = T.alloc_buffer((128, 128))
for i0, j0 in T.grid(8, 8):
with T.block("scope"):
i, j = T.axis.remap("SS", [i0, j0])
A_local = T.alloc_buffer((16, 16), scope="local")
for x, y in T.grid(16, 16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
A[vi, vj] = 1.0
for x, y in T.grid(16, 16):
with T.block("A_local"):
vi = T.axis.S(16, x)
vj = T.axis.S(16, y)
A_local[vi, vj] = A[i * 16 + vi, j * 16 + vj]
for x, y in T.grid(16, 16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B[vi, vj] = A_local[vi - i * 16, vj - j * 16] + 1.0
for i, j in T.grid(128, 128):
with T.block("A_global"):
vi, vj = T.axis.remap("SS", [i, j])
A_global[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A_global[vi, vj] * 2.0
@T.prim_func
def cache_read_opaque_access(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), dtype="float16")
B = T.match_buffer(b, (128, 128), dtype="float16")
C = T.match_buffer(c, (128, 128), dtype="float16")
D = T.match_buffer(d, (128, 128), dtype="float16")
A_global = T.alloc_buffer((128, 128), dtype="float16")
for i, j in T.grid(128, 128):
with T.block("A_global"):
vi, vj = T.axis.remap("SS", [i, j])
A_global[vi, vj] = A[vi, vj]
for i, j in T.grid(128, 128):
with T.block("load_store"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi, vj])
T.writes(D[vi, vj])
D[vi, vj] = A_global[vi, vj]
for i, j in T.grid(8, 8):
with T.block("opaque"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.evaluate(
T.tvm_load_matrix_sync(
B.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A_global.data,
vi * 2048 + vj * 16,
128,
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
for i, j in T.grid(8, 8):
with T.block("match_buffer"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A0 = T.match_buffer(
A_global[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
C0 = T.match_buffer(
C[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
T.evaluate(
T.tvm_load_matrix_sync(
C0.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A0.data,
A0.elem_offset,
A0.strides[0],
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
@T.prim_func
def cache_read_multi_consumer() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
C = T.alloc_buffer((128))
A_global = T.alloc_buffer((128))
for i in T.grid(8):
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A[vi] = 1.0
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A_global[vi] = A[vi]
for j in T.grid(16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + j)
B[vi] = A_global[vi] + 1.0
for i in T.grid(128):
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = A_global[vi]
@T.prim_func
def cache_read_multi_consumer_target() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
C = T.alloc_buffer((128))
A_global = T.alloc_buffer((128))
for i in T.grid(8):
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A[vi] = 1.0
for j in T.grid(16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + j)
B[vi] = A[vi] + 1.0
for i in T.grid(128):
with T.block("A"):
vi = T.axis.S(128, i)
A_global[vi] = A[vi]
for i in T.grid(128):
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = A_global[vi]
@T.prim_func
def continuous_cache_read(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
B_shared = T.alloc_buffer((128, 128), scope="shared")
B_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B_shared"):
vi, vj = T.axis.remap("SS", [i, j])
B_shared[vi, vj] = B[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B_local"):
vi, vj = T.axis.remap("SS", [i, j])
B_local[vi, vj] = B_shared[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B_local[vi, vj] + 1.0
@T.prim_func
def block_predicate_cache_read() -> None:
A = T.alloc_buffer([120], dtype="float32")
B = T.alloc_buffer([120], dtype="float32")
A_shared = T.alloc_buffer([120], dtype="float32", scope="shared")
for i, j in T.grid(16, 8):
with T.block("producer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
A[ax] = T.float32(0)
for ax0 in T.serial(120):
with T.block("A_shared"):
v0 = T.axis.spatial(120, ax0)
A_shared[v0] = A[v0]
for i, j in T.grid(16, 8):
with T.block("consumer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
B[ax] = A_shared[ax] + T.float32(1)
@T.prim_func
def cache_read_shape_int64(var_A: T.handle, var_C: T.handle) -> None:
A = T.match_buffer(var_A, (T.int64(128), T.int64(128)), dtype="float32")
C = T.match_buffer(var_C, (T.int64(128), T.int64(128)), dtype="float32")
B = T.alloc_buffer([T.int64(128), T.int64(128)], dtype="float32")
A_global = T.alloc_buffer([T.int64(128), T.int64(128)], dtype="float32")
for ax0, ax1 in T.grid(T.int64(128), T.int64(128)):
with T.block("A_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(A[v0, v1])
T.writes(A_global[v0, v1])
A_global[v0, v1] = A[v0, v1]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A_global[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = A_global[vi, vj] * T.float32(2)
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + T.float32(1)
@T.prim_func
def cache_read_inplace(data_io: T.Buffer(64, "int32")) -> None:
data_1d = T.alloc_buffer([64], dtype="int32")
data_io_local = T.alloc_buffer([64], dtype="int32", scope="local")
for ax0 in T.serial(64):
with T.block("data_io_local"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io[v0])
T.writes(data_io_local[v0])
data_io_local[v0] = data_io[v0]
for i0 in T.serial(64):
with T.block("copy_in"):
v0 = T.axis.spatial(64, i0)
T.reads(data_io_local[v0])
T.writes(data_1d[v0])
data_1d[v0] = data_io_local[v0]
for i0 in T.serial(1):
with T.block("ext_call"):
T.reads(data_1d[0:64])
T.writes(data_1d[0:64])
T.evaluate(T.call_extern("call_impl", data_1d.data, dtype=""))
for i0 in T.serial(64):
with T.block("copy_out"):
v0 = T.axis.spatial(64, i0)
T.reads(data_1d[v0])
T.writes(data_io[v0])
data_io[v0] = data_1d[v0]
@T.prim_func
def cache_inplace_buffer(data_io: T.Buffer(64, "int32")) -> None:
data_io_local = T.alloc_buffer([64], dtype="int32", scope="local")
data_io_global = T.alloc_buffer([64], dtype="int32")
data_io_global_1 = T.alloc_buffer([64], dtype="int32")
for ax0 in T.serial(64):
with T.block("data_io_global"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io[v0])
T.writes(data_io_global[v0])
data_io_global[v0] = data_io[v0]
for i0 in T.serial(1):
for ax0 in T.serial(64):
with T.block("data_io_local"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io_global[v0])
T.writes(data_io_local[v0])
data_io_local[v0] = data_io_global[v0]
with T.block("ext_call"):
T.reads(data_io_local[0:64])
T.writes(data_io_local[0:64])
T.evaluate(T.call_extern("call_impl", data_io_local.data, dtype=""))
for ax0 in T.serial(64):
with T.block("data_io_local"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io_local[v0])
T.writes(data_io_global_1[v0])
data_io_global_1[v0] = data_io_local[v0]
for ax0 in T.serial(64):
with T.block("data_io_global"):
v0 = T.axis.spatial(64, ax0)
T.reads(data_io_global_1[v0])
T.writes(data_io[v0])
data_io[v0] = data_io_global_1[v0]
########## Expected function after cache_write ##########
@T.prim_func
def cache_write_elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
C = T.match_buffer(c, (128, 128))
B = T.alloc_buffer((128, 128))
B_global = T.alloc_buffer((128, 128), scope="local")
C_local = T.alloc_buffer((128, 128))
for i, j in T.grid(128, 128):
with T.block("B_global"):
vi, vj = T.axis.remap("SS", [i, j])
B_global[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = B_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C_local"):
vi, vj = T.axis.remap("SS", [i, j])
C_local[vi, vj] = B[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = C_local[vi, vj]
@T.prim_func
def cache_write_under_scope(b: T.handle, c: T.handle) -> None:
A = T.alloc_buffer((128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
A_global = T.alloc_buffer((128, 128))
for i0, j0 in T.grid(8, 8):
with T.block("scope"):
i, j = T.axis.remap("SS", [i0, j0])
A_local = T.alloc_buffer((16, 16), scope="local")
B_global = T.alloc_buffer((16, 16))
for x, y in T.grid(16, 16):
with T.block("A_local"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
A_local[vi - i * 16, vj - j * 16] = 1.0
for x, y in T.grid(16, 16):
with T.block("A"):
vi = T.axis.S(16, x)
vj = T.axis.S(16, y)
A_global[i * 16 + vi, j * 16 + vj] = A_local[vi, vj]
for x, y in T.grid(16, 16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + x)
vj = T.axis.S(128, j * 16 + y)
B_global[vi - i * 16, vj - j * 16] = A_global[vi, vj] + 1.0
for x, y in T.grid(16, 16):
with T.block("B_global"):
vi = T.axis.S(16, x)
vj = T.axis.S(16, y)
B[i * 16 + vi, j * 16 + vj] = B_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("A_global"):
vi, vj = T.axis.remap("SS", [i, j])
A[vi, vj] = A_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def cache_write_opaque_access(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, (128, 128), dtype="float16")
B = T.match_buffer(b, (128, 128), dtype="float16")
C = T.match_buffer(c, (128, 128), dtype="float16")
D = T.match_buffer(d, (128, 128), dtype="float16")
D_global = T.alloc_buffer((128, 128), dtype="float16")
B_global = T.alloc_buffer((128, 128), dtype="float16")
C_global = T.alloc_buffer((128, 128), dtype="float16")
for i, j in T.grid(128, 128):
with T.block("load_store"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(D_global[vi, vj])
D_global[vi, vj] = A[vi, vj]
for i, j in T.grid(8, 8):
with T.block("opaque"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(B_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.evaluate(
T.tvm_load_matrix_sync(
B_global.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A.data,
vi * 2048 + vj * 16,
128,
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
for i, j in T.grid(8, 8):
with T.block("match_buffer"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
T.writes(C_global[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16])
A0 = T.match_buffer(
A[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
C0 = T.match_buffer(
C_global[
vi * 16 : vi * 16 + 16,
vj * 16 : vj * 16 + 16,
],
(16, 16),
"float16",
strides=[128, 1],
offset_factor=1,
)
T.evaluate(
T.tvm_load_matrix_sync(
C0.data,
16,
16,
16,
vi * 8 + vj,
T.tvm_access_ptr(
T.type_annotation(dtype="float16"),
A0.data,
A0.elem_offset,
A0.strides[0],
1,
dtype="handle",
),
128,
"row_major",
dtype="handle",
)
)
for i, j in T.grid(128, 128):
with T.block("D"):
vi, vj = T.axis.remap("SS", [i, j])
D[vi, vj] = D_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = B_global[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = C_global[vi, vj]
@T.prim_func
def cache_write_multi_consumer() -> None:
A = T.alloc_buffer((128))
B = T.alloc_buffer((128))
C = T.alloc_buffer((128))
A_global = T.alloc_buffer((128))
for i in T.grid(8):
for j in T.grid(16):
with T.block("A_global"):
vi = T.axis.S(128, i * 16 + j)
A_global[vi] = 1.0
for j in T.grid(16):
with T.block("A"):
vi = T.axis.S(128, i * 16 + j)
A[vi] = A_global[vi]
for j in T.grid(16):
with T.block("B"):
vi = T.axis.S(128, i * 16 + j)
B[vi] = A[vi] + 1.0
for i in T.grid(128):
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = A[vi]
@T.prim_func
def cache_write_multi_consumer_B_consume_cache():
A = T.alloc_buffer([128], dtype="float32")
B = T.alloc_buffer([128], dtype="float32")
C = T.alloc_buffer([128], dtype="float32")
A_global = T.alloc_buffer([128], dtype="float32")
for i in T.serial(8):
for j in T.serial(16):
with T.block("A"):
vi = T.axis.spatial(128, i * 16 + j)
A_global[vi] = 1.0
for j in T.serial(16):
with T.block("B"):
vi = T.axis.spatial(128, i * 16 + j)
B[vi] = A_global[vi] + 1.0
for ax0 in T.serial(128):
with T.block("A_global"):
v0 = T.axis.spatial(128, ax0)
A[v0] = A_global[v0]
for i in T.serial(128):
with T.block("C"):
vi = T.axis.spatial(128, i)
C[vi] = A[vi]
@T.prim_func
def cache_write_multi_consumer_C_consume_cache():
A = T.alloc_buffer([128], dtype="float32")
B = T.alloc_buffer([128], dtype="float32")
C = T.alloc_buffer([128], dtype="float32")
A_global = T.alloc_buffer([128], dtype="float32")
for i in T.serial(8):
for j in T.serial(16):
with T.block("A"):
vi = T.axis.spatial(128, i * 16 + j)
A_global[vi] = T.float32(1)
for ax0 in T.serial(16):
with T.block("A_global"):
v0 = T.axis.spatial(128, i * 16 + ax0)
A[v0] = A_global[v0]
for j in T.serial(16):
with T.block("B"):
vi = T.axis.spatial(128, i * 16 + j)
B[vi] = A[vi] + T.float32(1)
for i in T.serial(128):
with T.block("C"):
vi = T.axis.spatial(128, i)
C[vi] = A_global[vi]
@T.prim_func
def cache_write_multi_consumer_all_consume_cache():
A = T.alloc_buffer([128], dtype="float32")
B = T.alloc_buffer([128], dtype="float32")
C = T.alloc_buffer([128], dtype="float32")
A_global = T.alloc_buffer([128], dtype="float32")
for i in T.serial(8):
for j in T.serial(16):
with T.block("A"):
vi = T.axis.spatial(128, i * 16 + j)
A_global[vi] = T.float32(1)
for j in T.serial(16):
with T.block("B"):
vi = T.axis.spatial(128, i * 16 + j)
B[vi] = A_global[vi] + T.float32(1)
for i in T.serial(128):
with T.block("C"):
vi = T.axis.spatial(128, i)
C[vi] = A_global[vi]
for ax0 in T.serial(128):
with T.block("A_global"):
v0 = T.axis.spatial(128, ax0)
A[v0] = A_global[v0]
@T.prim_func
def continuous_cache_write(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.alloc_buffer((128, 128))
C = T.match_buffer(c, (128, 128))
B_shared = T.alloc_buffer((128, 128), scope="shared")
B_local = T.alloc_buffer((128, 128), scope="local")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_local[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_shared[vi, vj] = B_local[vi, vj]
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = B_shared[vi, vj]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def block_predicate_cache_write_intermediate_buf() -> None:
A = T.alloc_buffer([120], dtype="float32")
B = T.alloc_buffer([120], dtype="float32")
A_shared = T.alloc_buffer([120], dtype="float32", scope="shared")
for i, j in T.grid(16, 8):
with T.block("producer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
A_shared[ax] = T.float32(0)
for ax0 in T.serial(120):
with T.block("A_shared"):
v0 = T.axis.spatial(120, ax0)
A[v0] = A_shared[v0]
for i, j in T.grid(16, 8):
with T.block("consumer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
B[ax] = A[ax] + 1.0
@T.prim_func
def block_predicate_cache_write_output_buf() -> None:
A = T.alloc_buffer([120], dtype="float32")
B = T.alloc_buffer([120], dtype="float32")
B_shared = T.alloc_buffer([120], dtype="float32", scope="shared")
for i, j in T.grid(16, 8):
with T.block("producer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
A[ax] = T.float32(0)
for i, j in T.grid(16, 8):
with T.block("consumer"):
ax = T.axis.spatial(120, i * 8 + j)
T.where(i * 8 + j < 120)
B_shared[ax] = A[ax] + T.float32(1)
for ax0 in T.serial(120):
with T.block("B_shared"):
v0 = T.axis.spatial(120, ax0)
B[v0] = B_shared[v0]
@T.prim_func
def symbolic_matmul_blocked(var_A: T.handle, var_B: T.handle, var_C: T.handle, n: T.int32):
A = T.match_buffer(var_A, ((n + 31) // 32 * 32, 4))
B = T.match_buffer(var_B, (4, (n + 31) // 32 * 32))
C = T.match_buffer(var_C, ((n + 31) // 32 * 32, (n + 31) // 32 * 32))
for i0_0, i1_0 in T.grid((n + 31) // 32, (n + 31) // 32):
with T.block("matmul_o"):
v_i0_o, v_i1_o = T.axis.remap("SS", [i0_0, i1_0])
T.reads(
A[v_i0_o * 32 : v_i0_o * 32 + 32, 0:4],
B[0:4, v_i1_o * 32 : v_i1_o * 32 + 32],
)
T.writes(C[v_i0_o * 32 : v_i0_o * 32 + 32, v_i1_o * 32 : v_i1_o * 32 + 32])
for i0_1, i1_1, k in T.grid(32, 32, 4):
with T.block("matmul"):
v_i0_i, v_i1_i, v_k_i = T.axis.remap("SSR", [i0_1, i1_1, k])
T.reads(A[v_i0_o * 32 + v_i0_i, v_k_i], B[v_k_i, v_i1_o * 32 + v_i1_i])
T.writes(C[v_i0_o * 32 + v_i0_i, v_i1_o * 32 + v_i1_i])
with T.init():
C[v_i0_o * 32 + v_i0_i, v_i1_o * 32 + v_i1_i] = T.float32(0)
C[v_i0_o * 32 + v_i0_i, v_i1_o * 32 + v_i1_i] = (
C[v_i0_o * 32 + v_i0_i, v_i1_o * 32 + v_i1_i]
+ A[v_i0_o * 32 + v_i0_i, v_k_i] * B[v_k_i, v_i1_o * 32 + v_i1_i]
)
@T.prim_func
def symbolic_matmul_blocked_cache_read(
var_A: T.handle, var_B: T.handle, var_C: T.handle, n: T.int32
):
A = T.match_buffer(var_A, ((n + 31) // 32 * 32, 4))
B = T.match_buffer(var_B, (4, (n + 31) // 32 * 32))
C = T.match_buffer(var_C, ((n + 31) // 32 * 32, (n + 31) // 32 * 32))
for i0_0, i1_0 in T.grid((n + 31) // 32, (n + 31) // 32):
with T.block("matmul_o"):
v_i0_o, v_i1_o = T.axis.remap("SS", [i0_0, i1_0])
T.reads(
A[v_i0_o * 32 : v_i0_o * 32 + 32, 0:4],
B[0:4, v_i1_o * 32 : v_i1_o * 32 + 32],
)
T.writes(C[v_i0_o * 32 : v_i0_o * 32 + 32, v_i1_o * 32 : v_i1_o * 32 + 32])
A_shared = T.alloc_buffer((32, 4), scope="shared")
for ax0, ax1 in T.grid(32, 4):
with T.block("A_shared"):
v0 = T.axis.spatial(32, ax0)
v1 = T.axis.spatial(4, ax1)
T.reads(A[v_i0_o * 32 + v0, v1])
T.writes(A_shared[v0, v1])
A_shared[v0, v1] = A[v_i0_o * 32 + v0, v1]
for i0_1, i1_1, k in T.grid(32, 32, 4):
with T.block("matmul"):
v_i0_i, v_i1_i, v_k_i = T.axis.remap("SSR", [i0_1, i1_1, k])
T.reads(A_shared[v_i0_i, v_k_i], B[v_k_i, v_i1_o * 32 + v_i1_i])
T.writes(C[v_i0_o * 32 + v_i0_i, v_i1_o * 32 + v_i1_i])
with T.init():
C[v_i0_o * 32 + v_i0_i, v_i1_o * 32 + v_i1_i] = T.float32(0)
C[v_i0_o * 32 + v_i0_i, v_i1_o * 32 + v_i1_i] = (
C[v_i0_o * 32 + v_i0_i, v_i1_o * 32 + v_i1_i]
+ A_shared[v_i0_i, v_k_i] * B[v_k_i, v_i1_o * 32 + v_i1_i]
)
@T.prim_func
def symbolic_matmul_blocked_cache_write(
var_A: T.handle, var_B: T.handle, var_C: T.handle, n: T.int32
):
A = T.match_buffer(var_A, ((n + 31) // 32 * 32, 4))
B = T.match_buffer(var_B, (4, (n + 31) // 32 * 32))
C = T.match_buffer(var_C, ((n + 31) // 32 * 32, (n + 31) // 32 * 32))
for i0_0, i1_0 in T.grid((n + 31) // 32, (n + 31) // 32):
with T.block("matmul_o"):
v_i0_o, v_i1_o = T.axis.remap("SS", [i0_0, i1_0])
T.reads(
A[v_i0_o * 32 : v_i0_o * 32 + 32, 0:4],
B[0:4, v_i1_o * 32 : v_i1_o * 32 + 32],
)
T.writes(C[v_i0_o * 32 : v_i0_o * 32 + 32, v_i1_o * 32 : v_i1_o * 32 + 32])
C_pad_local = T.alloc_buffer((32, 32), scope="local")
for i0_1, i1_1, k in T.grid(32, 32, 4):
with T.block("matmul"):
v_i0_i, v_i1_i, v_k_i = T.axis.remap("SSR", [i0_1, i1_1, k])
T.reads(A[v_i0_o * 32 + v_i0_i, v_k_i], B[v_k_i, v_i1_o * 32 + v_i1_i])
T.writes(C_pad_local[v_i0_i, v_i1_i])
with T.init():
C_pad_local[v_i0_i, v_i1_i] = T.float32(0)
C_pad_local[v_i0_i, v_i1_i] = (
C_pad_local[v_i0_i, v_i1_i]
+ A[v_i0_o * 32 + v_i0_i, v_k_i] * B[v_k_i, v_i1_o * 32 + v_i1_i]
)
for ax0, ax1 in T.grid(32, 32):
with T.block("C_pad_local"):
v0 = T.axis.spatial(32, ax0)
v1 = T.axis.spatial(32, ax1)
T.reads(C_pad_local[v0, v1])
T.writes(C[v_i0_o * 32 + v0, v_i1_o * 32 + v1])
C[v_i0_o * 32 + v0, v_i1_o * 32 + v1] = C_pad_local[v0, v1]
########## Testcases for cache_read ##########
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def test_cache_read_elementwise(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
block_c = sch.get_block("C")
if use_block_name:
cached_a = sch.cache_read("B", "A", "global")
cached_b = sch.cache_read("C", "B", "local")
else:
cached_a = sch.cache_read(block_b, 0, "global")
cached_b = sch.cache_read(block_c, 0, "local")
assert sch.get(cached_a) == sch.get(sch.get_block("A_global"))
assert sch.get(cached_b) == sch.get(sch.get_block("B_local"))
assert sch.get(block_b) == sch.get(sch.get_block("B"))
assert sch.get(block_c) == sch.get(sch.get_block("C"))
tvm.ir.assert_structural_equal(cache_read_elementwise, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_cache_read_under_scope(use_block_name):
sch = tir.Schedule(access_under_scope, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_b, 0, "local")
sch.cache_read(block_c, 0, "global")
tvm.ir.assert_structural_equal(cache_read_under_scope, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=access_under_scope)
def test_cache_read_opaque_access(use_block_name):
sch = tir.Schedule(opaque_access, debug_mask="all")
block = "load_store" if use_block_name else sch.get_block("load_store")
sch.cache_read(block, 0, "global")
tvm.ir.assert_structural_equal(cache_read_opaque_access, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=opaque_access)
def test_cache_read_location(use_block_name):
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
sch.cache_read(block_b, 0, "global")
tvm.ir.assert_structural_equal(cache_read_multi_consumer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
# Test that specific consumer block targeting works.
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_b, 0, "global", consumer_blocks=[block_c])
tvm.ir.assert_structural_equal(cache_read_multi_consumer_target, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
# Also test setting multiple consumers yields same result as unspecified.
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_b, 0, "global", consumer_blocks=[block_b, block_c])
tvm.ir.assert_structural_equal(cache_read_multi_consumer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
def test_continuous_cache_read(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_c, 0, "shared")
sch.cache_read(block_c, 0, "local")
tvm.ir.assert_structural_equal(continuous_cache_read, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_cache_read_with_block_predicate(use_block_name):
sch = tir.Schedule(func_with_block_predicate, debug_mask="all")
block = "consumer" if use_block_name else sch.get_block("consumer")
sch.cache_read(block, 0, "shared")
tvm.ir.assert_structural_equal(block_predicate_cache_read, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_with_block_predicate)
def test_cache_read_non_int32_shape(use_block_name):
sch = tir.Schedule(elementwise_shape_int64, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
sch.cache_read(block_b, 0, "global")
tvm.ir.assert_structural_equal(cache_read_shape_int64, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise_shape_int64)
def test_cache_read_fail_multi_producer(use_block_name):
sch = tir.Schedule(func_multi_producer, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_read(block_b, 0, "global")
def test_cache_read_fail_index_out_of_bound(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_read(block_b, 1, "global")
def test_cache_read_fail_invalid_storage_scope(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_read(block_b, 0, "test_scope")
def test_inplace_cache_read():
sch = tvm.tir.Schedule(inplace_func, debug_mask="all")
block = sch.get_block("copy_in")
sch.cache_read(block, 0, "local", [block])
tvm.ir.assert_structural_equal(cache_read_inplace, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=inplace_func)
def test_cache_inplace():
# cache_inplace could introduce WAR, which is expected but stage pipeline property changes
debug_mask = tvm.tir.schedule.state.ScheduleDebugMask.VERIFY_SREF_TREE
sch = tvm.tir.Schedule(inplace_call, debug_mask=debug_mask)
block = sch.get_block("ext_call")
blocks = sch.cache_inplace(block, 0, "local")
block = sch.cache_read(blocks[0], 0, "global", [blocks[0]])
block = sch.cache_write(blocks[1], 0, "global")
tvm.ir.assert_structural_equal(cache_inplace_buffer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=inplace_call, debug_mask=debug_mask)
def test_cache_read_nested_seq(use_block_name):
sch = tir.Schedule(func_nested_seq, debug_mask="all")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_read(block_c, 0, "global", consumer_blocks=[block_c])
tvm.ir.assert_structural_equal(cache_read_nested_seq_target, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_nested_seq)
########## Testcases for cache_write ##########
def test_cache_write_elementwise(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = sch.get_block("B")
block_c = sch.get_block("C")
cached_b = sch.cache_write("B" if use_block_name else block_b, 0, "local")
cached_c = sch.cache_write("C" if use_block_name else block_c, 0, "global")
assert sch.get(cached_b) == sch.get(sch.get_block("B_local"))
assert sch.get(cached_c) == sch.get(sch.get_block("C_global"))
assert sch.get(block_b) == sch.get(sch.get_block("B"))
assert sch.get(block_c) == sch.get(sch.get_block("C"))
tvm.ir.assert_structural_equal(cache_write_elementwise, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_cache_write_under_scope(use_block_name):
sch = tir.Schedule(access_under_scope, debug_mask="all")
block_a = "A" if use_block_name else sch.get_block("A")
block_b = "B" if use_block_name else sch.get_block("B")
block_scope = sch.get_block("scope")
sch.cache_write(block_a, 0, "local")
sch.cache_write(block_b, 0, "global")
sch.cache_write(block_scope, 0, "global")
tvm.ir.assert_structural_equal(cache_write_under_scope, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=access_under_scope)
def test_cache_write_opaque_access(use_block_name):
sch = tir.Schedule(opaque_access, debug_mask="all")
block_store = "load_store" if use_block_name else sch.get_block("load_store")
block_opaque = "opaque" if use_block_name else sch.get_block("opaque")
block_match_buffer = "match_buffer" if use_block_name else sch.get_block("match_buffer")
sch.cache_write(block_store, 0, "global")
sch.cache_write(block_opaque, 0, "global")
sch.cache_write(block_match_buffer, 0, "global")
tvm.ir.assert_structural_equal(cache_write_opaque_access, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=opaque_access)
def test_cache_write_location(use_block_name):
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_a = "A" if use_block_name else sch.get_block("A")
sch.cache_write(block_a, 0, "global")
tvm.ir.assert_structural_equal(cache_write_multi_consumer, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
# Test that specific consumer block targeting works.
# B read cache buffer and C read original output buffer
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_a = "A" if use_block_name else sch.get_block("A")
block_b = "B" if use_block_name else sch.get_block("B")
sch.cache_write(block_a, 0, "global", consumer_blocks=[block_b])
tvm.ir.assert_structural_equal(cache_write_multi_consumer_B_consume_cache, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
# Test that specific consumer block targeting works.
# B read original output buffer and C read cache buffer
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_a = "A" if use_block_name else sch.get_block("A")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_write(block_a, 0, "global", consumer_blocks=[block_c])
tvm.ir.assert_structural_equal(cache_write_multi_consumer_C_consume_cache, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
# Test that specific consumer block targeting works.
# B and C read cache buffer
sch = tir.Schedule(func_multi_consumer, debug_mask="all")
block_a = "A" if use_block_name else sch.get_block("A")
block_b = "B" if use_block_name else sch.get_block("B")
block_c = "C" if use_block_name else sch.get_block("C")
sch.cache_write(block_a, 0, "global", consumer_blocks=[block_b, block_c])
tvm.ir.assert_structural_equal(cache_write_multi_consumer_all_consume_cache, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_multi_consumer)
def test_continuous_cache_write(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
sch.cache_write(block_b, 0, "shared")
sch.cache_write(block_b, 0, "local")
tvm.ir.assert_structural_equal(continuous_cache_write, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_cache_write_with_block_predicate(use_block_name):
# cache write for intermediate buffer
sch = tir.Schedule(func_with_block_predicate, debug_mask="all")
block = "producer" if use_block_name else sch.get_block("producer")
sch.cache_write(block, 0, "shared")
tvm.ir.assert_structural_equal(block_predicate_cache_write_intermediate_buf, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_with_block_predicate)
# cache write for external buffer
sch = tir.Schedule(func_with_block_predicate, debug_mask="all")
block = "consumer" if use_block_name else sch.get_block("consumer")
sch.cache_write(block, 0, "shared")
tvm.ir.assert_structural_equal(block_predicate_cache_write_output_buf, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func_with_block_predicate)
def test_cache_write_fail_multi_producer(use_block_name):
sch = tir.Schedule(func_multi_producer, debug_mask="all")
block_a0 = "A0" if use_block_name else sch.get_block("A0")
block_a1 = "A1" if use_block_name else sch.get_block("A1")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_write(block_a0, 0, "global")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_write(block_a1, 0, "global")
def test_cache_write_fail_index_out_of_bound(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_write(block_b, 1, "global")
def test_cache_write_fail_invalid_storage_scope(use_block_name):
sch = tir.Schedule(elementwise, debug_mask="all")
block_b = "B" if use_block_name else sch.get_block("B")
with pytest.raises(tvm.tir.ScheduleError):
sch.cache_write(block_b, 0, "test_scope")
@pytest.mark.parametrize("use_decl_buffer", [True, False])
def test_cache_write_allocate_const(use_decl_buffer):
def apply_decl_buffer(*args, **kwargs):
if use_decl_buffer:
return T.decl_buffer(*args, **kwargs)
else:
return T.Buffer(*args, **kwargs)
@T.prim_func
def before(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float16")):
B = T.alloc_buffer([128, 128], dtype="float32")
const1 = T.allocate_const([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7], "float32", [8])
const1_buf = apply_decl_buffer([8], dtype="float32", data=const1)
const2 = T.allocate_const([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7], "float32", [8])
const2_buf = apply_decl_buffer([8], dtype="float32", data=const2)
for i, j in T.grid(128, 128):
for x in range(8):
with T.block("B"):
vi, vj, vx = T.axis.remap("SSS", [i, j, x])
T.reads(A[vi, vj], const1_buf[vx], const2_buf[vx])
T.writes(B[vi, vj])
B[vi, vj] = A[vi, vj] * const1_buf[vx] + const2_buf[vx]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def expected(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float16")):
B = T.alloc_buffer([128, 128], dtype="float32")
A_global = T.alloc_buffer([128, 128], dtype="float32")
C_global = T.alloc_buffer([128, 128], dtype="float16")
const1 = T.allocate_const([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7], "float32", [8])
const1_buf = apply_decl_buffer([8], dtype="float32", data=const1)
const2 = T.allocate_const([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7], "float32", [8])
const2_buf = apply_decl_buffer([8], dtype="float32", data=const2)
for ax0, ax1 in T.grid(128, 128):
with T.block("A_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(A[v0, v1])
T.writes(A_global[v0, v1])
A_global[v0, v1] = A[v0, v1]
for i, j, x in T.grid(128, 128, 8):
with T.block("B"):
vi, vj, vx = T.axis.remap("SSS", [i, j, x])
T.reads(A_global[vi, vj], const1_buf[vx], const2_buf[vx])
T.writes(B[vi, vj])
B[vi, vj] = A_global[vi, vj] * const1_buf[vx] + const2_buf[vx]
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(C_global[vi, vj])
C_global[vi, vj] = B[vi, vj] + T.float32(1)
for ax0, ax1 in T.grid(128, 128):
with T.block("C_global"):
v0, v1 = T.axis.remap("SS", [ax0, ax1])
T.reads(C_global[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_global[v0, v1]
sch = tir.Schedule(before)
block_b = sch.get_block("B")
block_c = sch.get_block("C")
sch.cache_read(block_b, 0, "global")
sch.cache_write(block_c, 0, "global")
after = sch.mod["main"]
tvm.ir.assert_structural_equal(expected, after)
verify_trace_roundtrip(sch=sch, mod=before)
def test_reindex_cache_read():
sch = tir.Schedule(elementwise, debug_mask="all")
sch.reindex_cache_read("C", 0, "shared", lambda i, j: (j, i // 2, i % 2))
tvm.ir.assert_structural_equal(elementwise_reindex_cache_read, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_reindex_cache_read_multi_consumer():
sch = tir.Schedule(func_multi_consumer)
sch.reindex_cache_read("B", 0, "shared", lambda i: (i // 32, i % 32))
tvm.ir.assert_structural_equal(reindex_cache_read_multi_consumer, sch.mod["main"])
# NOTE(zihao): we do not verify trace roundtrip because of in set analysis issues.
def test_reindex_cache_read_fail_not_match():
sch = tir.Schedule(elementwise, debug_mask="all")
with pytest.raises(tvm.tir.ScheduleError):
sch.reindex_cache_read(
"C",
0,
"shared",
lambda i, j: j * 2,
)
def test_reindex_cache_read_failed_not_single_point():
sch = tir.Schedule(access_under_scope, debug_mask="all")
with pytest.raises(tvm.tir.ScheduleError):
sch.reindex_cache_read("scope", 0, "shared", lambda i, j: (i, j))
def test_reindex_cache_write():
sch = tir.Schedule(elementwise, debug_mask="all")
sch.reindex_cache_write("B", 0, "shared", lambda i, j: (j, i))
tvm.ir.assert_structural_equal(elementwise_reindex_cache_write, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=elementwise)
def test_reindex_cache_write_reduce():
sch = tir.Schedule(reduce, debug_mask="all")
sch.reindex_cache_write("B", 0, "shared", lambda i, j, k, l: (j, i, k))
tvm.ir.assert_structural_equal(reduce_reindex_cache_write_0, sch.mod["main"])
sch.reindex_cache_write("C", 0, "shared", lambda i, j, k: [j, i])
tvm.ir.assert_structural_equal(reduce_reindex_cache_write_1, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=reduce)
def test_reindex_cache_write_fail_not_match():
sch = tir.Schedule(elementwise, debug_mask="all")
with pytest.raises(tvm.tir.ScheduleError):
sch.reindex_cache_write(
"B",
0,
"shared",
lambda i, j: i,
)
def test_reindex_cache_write_fail_not_single_point():
sch = tir.Schedule(access_under_scope, debug_mask="all")
with pytest.raises(tvm.tir.ScheduleError):
sch.reindex_cache_write("scope", 0, "shared", lambda i, j: (i, j))
def test_symbolic_matmul_blocked_cache_read(use_block_name):
sch = tir.Schedule(symbolic_matmul_blocked, debug_mask="all")
block = "matmul" if use_block_name else sch.get_block("matmul")
sch.cache_read(block=block, read_buffer_index=0, storage_scope="shared")
tvm.ir.assert_structural_equal(sch.mod["main"], symbolic_matmul_blocked_cache_read)
verify_trace_roundtrip(sch=sch, mod=symbolic_matmul_blocked)
def test_symbolic_matmul_blocked_cache_write(use_block_name):
sch = tir.Schedule(symbolic_matmul_blocked, debug_mask="all")
block = "matmul" if use_block_name else sch.get_block("matmul")
sch.cache_write(block=block, write_buffer_index=0, storage_scope="local")
tvm.ir.assert_structural_equal(sch.mod["main"], symbolic_matmul_blocked_cache_write)
verify_trace_roundtrip(sch=sch, mod=symbolic_matmul_blocked)
if __name__ == "__main__":
tvm.testing.main()
| 65,425 | 37.713609 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_tvm_testing_before_after.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.script import tir as T, ir_module
class BaseBeforeAfter(tvm.testing.CompareBeforeAfter):
def transform(self):
return lambda x: x
class TestBeforeAfterPrimFunc(BaseBeforeAfter):
@T.prim_func
def before():
T.evaluate(0)
expected = before
class TestBeforeAfterMethod(BaseBeforeAfter):
def before(self):
@T.prim_func
def func():
T.evaluate(0)
return func
expected = before
class TestBeforeAfterFixture(BaseBeforeAfter):
@tvm.testing.fixture
def before(self):
@T.prim_func
def func():
T.evaluate(0)
return func
expected = before
class TestBeforeAfterDelayedPrimFunc(BaseBeforeAfter):
def before():
T.evaluate(0)
expected = before
class TestBeforeAfterParametrizedFixture(BaseBeforeAfter):
n = tvm.testing.parameter(1, 8, 16)
@tvm.testing.fixture
def before(self, n):
@T.prim_func
def func(A: T.Buffer(n, "float32")):
for i in T.serial(n):
A[i] = 0.0
return func
expected = before
class TestBeforeAfterIRModule(BaseBeforeAfter):
"""The preferred form for writing TIR unit tests
All evaluation is done at test-time, with the minimal amount of
additional lines. The `@tvm.testing.fixture`, `@ir_module`, and
`@T.prim_func` annotations are handled by
`tvm.testing.CompareBeforeAfter`.
"""
class before:
def func_A(A: T.Buffer(16, "float32")):
for i in T.serial(16):
A[i] = 0.0
def func_B(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 42
expected = before
class TestBeforeAfterIRModuleExplicitFixture(BaseBeforeAfter):
"""Like TestBeforeAfterIRModule, but with an explicit fixture
If the IRModule depends on additional fixtures, this form can be
used.
"""
@tvm.testing.fixture
def before(self):
@ir_module
class mod:
@T.prim_func
def func_A(A: T.Buffer(16, "float32")):
for i in T.serial(16):
A[i] = 0.0
@T.prim_func
def func_B(A: T.Buffer(16, "int32")):
for i in T.serial(16):
A[i] = 42
return mod
expected = before
if __name__ == "__main__":
tvm.testing.main()
| 3,215 | 23.549618 | 68 | py |
tvm | tvm-main/tests/python/unittest/test_tir_buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm.tir import Buffer
import numpy as np
def test_buffer():
m = te.size_var("m")
n = te.size_var("n")
l = te.size_var("l")
Ab = tvm.tir.decl_buffer((m, n), "float32")
Bb = tvm.tir.decl_buffer((n, l), "float32")
assert isinstance(Ab, tvm.tir.Buffer)
assert Ab.dtype == "float32"
assert tuple(Ab.shape) == (m, n)
def test_buffer_access_ptr():
m = te.size_var("m")
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((m, n), "float32", strides=[n + 1, 1])
aptr = Ab.access_ptr("rw")
assert tvm.ir.structural_equal(aptr.args[3], Ab.strides[0] * m)
assert aptr.args[0].dtype == Ab.dtype
assert aptr.args[4].value == Buffer.READ | Buffer.WRITE
aptr = Ab.access_ptr("w")
assert aptr.args[4].value == Buffer.WRITE
def test_buffer_access_ptr_offset():
m = te.size_var("m")
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((m, n), "float32")
aptr = Ab.access_ptr("rw", offset=100)
tvm.testing.assert_prim_expr_equal(aptr.args[2], 100)
assert aptr.args[4].value == Buffer.READ | Buffer.WRITE
v = te.size_var("int32")
aptr = Ab.access_ptr("rw", offset=100 + 100 + v)
tvm.testing.assert_prim_expr_equal(aptr.args[2], 200 + v)
assert aptr.args[4].value == Buffer.READ | Buffer.WRITE
aptr = Ab.access_ptr("rw", offset=tvm.tir.call_extern("int32", "test_call", 100 + 100 + v))
tvm.testing.assert_prim_expr_equal(
aptr.args[2], tvm.tir.call_extern("int32", "test_call", 200 + v)
)
assert aptr.args[4].value == Buffer.READ | Buffer.WRITE
def test_buffer_access_ptr_extent():
m = te.size_var("m")
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((m, n), "float32")
aptr = Ab.access_ptr("rw")
assert tvm.ir.structural_equal(aptr.args[3], m * n)
aptr = Ab.access_ptr("rw", offset=100)
assert tvm.ir.structural_equal(aptr.args[3], m * n - 100)
Ab = tvm.tir.decl_buffer((m, n), "float32", strides=[n + 1, 1])
aptr = Ab.access_ptr("rw", offset=100)
assert tvm.ir.structural_equal(aptr.args[3], Ab.strides[0] * m - 100)
# Test extent from input params
aptr = Ab.access_ptr("rw", extent=200)
assert tvm.ir.structural_equal(aptr.args[3], 200)
aptr = Ab.access_ptr("rw", offset=100, extent=100)
assert tvm.ir.structural_equal(aptr.args[3], 100)
def test_buffer_vload():
m = te.size_var("m")
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((m, n), "float32", elem_offset=100)
load = Ab.vload([2, 3])
tvm.ir.assert_structural_equal(load.indices, [2, 3])
def test_buffer_offset_of():
m = te.size_var("m")
n = te.size_var("n")
Ab = tvm.tir.decl_buffer((m, n), "float32", elem_offset=100)
offset = Ab.offset_of([2, 3])
tvm.ir.assert_structural_equal(offset, [n * 2 + 103])
def test_buffer_index_merge_mult_mod():
m = te.size_var("m")
n = te.size_var("n")
s = te.size_var("s")
k0 = te.size_var("k0")
k1 = te.size_var("k1")
A = tvm.tir.decl_buffer((m, n), "float32")
A_stride = tvm.tir.decl_buffer((m, n), "float32", strides=(s, 1))
def assert_simplified_equal(index_simplified, index_direct):
assert tvm.ir.structural_equal(
index_simplified, index_direct
), "index_simplified=%s, index_direct=%s" % (index_simplified, index_direct)
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
# Test Case1
index_simplified = A_stride.offset_of(
(idxd(idxm(k0, k1), s), idxm(idxm(k0, k1), s) + idxd(k0, k1) * k1)
)
index_direct = A_stride.offset_of((0, k0))
assert_simplified_equal(index_simplified, index_direct)
# Test Case2
index_simplified = A.offset_of(
(idxd(idxm(k0, idxd(k1, s)), n), idxm(idxm(k0, idxd(k1, s)), n) + idxm(k0, k1))
)
index_direct = A.offset_of((0, idxm(k0, idxd(k1, s)) + idxm(k0, k1)))
assert_simplified_equal(index_simplified, index_direct)
# Test Case3
index_simplified = A.offset_of(
(
idxd((idxd(k0, idxd(k1, s)) * idxd(k1, s)), n) + idxd(idxm(k0, idxd(k1, s)), n),
idxm((idxd(k0, idxd(k1, s)) * idxd(k1, s)), n) + idxm(idxm(k0, idxd(k1, s)), n),
)
)
index_direct = A.offset_of((0, k0))
assert_simplified_equal(index_simplified, index_direct)
# Test Case4 (not able to simplify)
index_simplified = A.offset_of(
(idxd(idxm(k0, idxd(k1, s)), n), idxm(idxm(k0, idxd(k1, n)), n) + idxm(k0, k1))
)
index_direct = A.offset_of(
(0, idxd(idxm(k0, idxd(k1, s)), n) * n + (idxm(idxm(k0, idxd(k1, n)), n) + idxm(k0, k1)))
)
assert_simplified_equal(index_simplified, index_direct)
# Test Case5
B = tvm.tir.decl_buffer((1, 14, 14, 1024))
i = te.size_var("i")
j = te.size_var("j")
k = te.size_var("k")
index_simplified1 = B.offset_of(
(
idxd(idxd(idxd((i * 50176 + j * 28672 + k), 1024), 14), 14),
idxm(idxd(idxd((i * 50176 + j * 28672 + k), 1024), 14), 14),
idxm(idxd((i * 50176 + j * 28672 + k), 1024), 14),
idxm((i * 50176 + j * 28672 + k), 1024),
)
)
index_simplified2 = B.offset_of(
(
idxd(idxd(i * 49 + j * 28 + idxd(k, 1024), 14), 14),
idxm(idxd(i * 49 + j * 28 + idxd(k, 1024), 14), 14),
idxm(i * 7 + idxd(k, 1024), 14),
idxm(k, 1024),
)
)
index_direct = B.offset_of((0, 0, 0, (i * 50176 + j * 28672 + k)))
assert_simplified_equal(index_simplified1, index_direct)
assert_simplified_equal(index_simplified2, index_direct)
@tvm.testing.requires_llvm
def test_buffer_broadcast():
m0, m1, m2 = te.size_var("m0"), te.size_var("m1"), te.size_var("m2")
n0, n1, n2 = te.size_var("n0"), te.size_var("n1"), te.size_var("n2")
o0, o1, o2 = te.size_var("o0"), te.size_var("o1"), te.size_var("o2")
A = te.placeholder((m0, m1, m2), name="A")
B = te.placeholder((n0, n1, n2), name="B")
C = te.compute((o0, o1, o2), lambda i, j, k: A[i, j, k] + B[i, j, k], name="C")
Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name="Ab", buffer_type="auto_broadcast")
Bb = tvm.tir.decl_buffer(B.shape, B.dtype, name="Bb", buffer_type="auto_broadcast")
s = te.create_schedule(C.op)
def check():
fadd = tvm.build(s, [A, B, C], target="llvm", name="bcast_add", binds={A: Ab, B: Bb})
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(2, 4, 3)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(2, 1, 1)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((2, 4, 3), dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check()
@tvm.testing.requires_llvm
def test_buffer_broadcast_expr():
n0, m0, x = te.size_var("n0"), te.size_var("m0"), te.size_var("x")
n1, m1 = te.size_var("n1"), te.size_var("m1")
o0, o1 = te.size_var("o0"), te.size_var("o1")
A = te.placeholder((m0, n0), name="A")
B = te.placeholder((m1, n1), name="B")
C = te.compute((o0, o1 // x), lambda i, j: A[i, j] + B[i, j], name="C")
Ab = tvm.tir.decl_buffer(A.shape, A.dtype, name="Ab", buffer_type="auto_broadcast")
Bb = tvm.tir.decl_buffer(B.shape, B.dtype, name="Bb", buffer_type="auto_broadcast")
Cc = tvm.tir.decl_buffer(C.shape, C.dtype, name="Cc", buffer_type="auto_broadcast")
s = te.create_schedule(C.op)
def check_stride():
fadd = tvm.build(
s, [A, B, C, o1, x], target="llvm", name="bcast_add", binds={A: Ab, B: Bb, C: Cc}
)
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((2, 4), dtype=C.dtype), dev)
fadd(a, b, c, 4, 1)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
def check_no_stride():
fadd = tvm.build(
s, [A, B, C, o1, x], target="llvm", name="bcast_add", binds={A: Ab, B: Bb, C: Cc}
)
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(1, 4)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((2, 4), dtype=C.dtype), dev)
fadd(a, b, c, 4, 1)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
def check_auto_bind():
# Let build bind buffers
fadd = tvm.build(s, [A, B, C, o1, x], target="llvm", name="bcast_add")
dev = tvm.cpu(0)
a = tvm.nd.array(np.random.uniform(size=(1, 4)).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=(2, 4)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((2, 4), dtype=C.dtype), dev)
fadd(a, b, c, 4, 1)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_stride()
check_no_stride()
check_auto_bind()
def test_buffer_flatten():
"""A buffer should flatten to a 1-d shape"""
buf = tvm.tir.decl_buffer([16, 32])
flat = buf.get_flattened_buffer()
assert buf.data.same_as(flat.data)
tvm.ir.assert_structural_equal(flat.shape, [16 * 32])
def test_buffer_flatten_preserves_identity():
"""Flattening a 1-d buffer should return the original"""
buf = tvm.tir.decl_buffer([16])
flat = buf.get_flattened_buffer()
assert buf.same_as(flat)
def test_buffer_flatten_uses_axis_separators():
"""Flattening to N-d physical buffers uses the axis separators"""
buf = tvm.tir.decl_buffer([4, 16, 32], axis_separators=[2])
flat = buf.get_flattened_buffer()
tvm.ir.assert_structural_equal(flat.axis_separators, [1])
tvm.ir.assert_structural_equal(flat.shape, [4 * 16, 32])
if __name__ == "__main__":
tvm.testing.main()
| 10,627 | 36.822064 | 97 | py |
tvm | tvm-main/tests/python/unittest/test_arith_domain_touched.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.script import tir as T
@T.prim_func
def scalar_func(a: T.handle, b: T.handle):
m = T.int32()
n = T.meta_var(100)
A = T.match_buffer(a, (n, m))
B = T.match_buffer(b, (n, m))
for i, j in T.grid(n, m):
A[i, j] = B[i - 1, j + 1] + A[i - 1, j - 1]
def test_domain_touched():
func = scalar_func
a, b = [func.buffer_map[var] for var in func.params]
ir = func.body
a_domain_r = tvm.arith._ffi_api.DomainTouched(ir, a, True, False)
assert a_domain_r[0].min.value == -1
assert a_domain_r[0].extent.value == 100
assert a_domain_r[1].min.value == -1
assert a_domain_r[1].extent.name == "m"
a_domain_w = tvm.arith._ffi_api.DomainTouched(ir, a, False, True)
assert a_domain_w[0].min.value == 0
assert a_domain_w[0].extent.value == 100
assert a_domain_w[1].min.value == 0
assert a_domain_w[1].extent.name == "m"
a_domain_rw = tvm.arith._ffi_api.DomainTouched(ir, a, True, True)
assert a_domain_rw[0].min.value == -1
assert a_domain_rw[0].extent.value == 101
assert a_domain_rw[1].min.value == -1
assert isinstance(a_domain_rw[1].extent, tvm.tir.Add)
assert a_domain_rw[1].extent.a.name == "m"
assert a_domain_rw[1].extent.b.value == 1
b_domain_r = tvm.arith._ffi_api.DomainTouched(ir, b, True, False)
assert b_domain_r
assert b_domain_r[0].min.value == -1
assert b_domain_r[0].extent.value == 100
assert b_domain_r[1].min.value == 1
assert b_domain_r[1].extent.name == "m"
b_domain_w = tvm.arith._ffi_api.DomainTouched(ir, b, False, True)
assert isinstance(b_domain_w, tvm.container.Array)
assert len(b_domain_w) == 0
def test_domain_touched_vector():
m = tvm.runtime.convert(128)
@T.prim_func
def func(a: T.handle, b: T.handle):
n = T.int32()
A = T.match_buffer(a, (n * m,))
B = T.match_buffer(b, (n * m,))
for i in T.serial(n):
A[i * m : (i + 1) * m : 1] = A[i * m : (i + 1) * m : 1] + B[i * m : (i + 1) * m : 1]
a, b = [func.buffer_map[var] for var in func.params]
assert tvm.arith._ffi_api.DomainTouched(func.body, a, True, False)[0].extent.value == 128
assert tvm.arith._ffi_api.DomainTouched(func.body, a, True, False)[0].extent.value == 128
assert tvm.arith._ffi_api.DomainTouched(func.body, a, True, True)[0].extent.value == 128
assert tvm.arith._ffi_api.DomainTouched(func.body, b, True, False)[0].extent.value == 128
assert tvm.arith._ffi_api.DomainTouched(func.body, b, True, False)[0].extent.value == 128
if __name__ == "__main__":
test_domain_touched()
| 3,422 | 35.414894 | 96 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_schedule_rule_mlt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring
import tvm.testing
from tvm import meta_schedule as ms
from tvm import target, te
from tvm.meta_schedule.testing import te_workload
from tvm.meta_schedule.testing.space_generation import (
check_sketches,
generate_design_space,
print_sketches,
)
from tvm.script import tir as T
from tvm.target import Target
def test_cpu_matmul():
@T.prim_func
def cpu_matmul_0(
A: T.Buffer((512, 512), "float32"),
B: T.Buffer((512, 512), "float32"),
C: T.Buffer((512, 512), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
C_global = T.alloc_buffer([512, 512], dtype="float32")
for i0_0, i1_0, i0_1, i1_1 in T.grid(1, 8, 8, 1):
for i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(16, 2, 8, 32, 32, 8):
with T.block("C"):
i = T.axis.spatial(512, i0_0 * 512 + i0_1 * 64 + i0_2 * 32 + i0_3)
j = T.axis.spatial(512, i1_0 * 64 + i1_1 * 64 + i1_2 * 8 + i1_3)
k = T.axis.reduce(512, i2_0 * 32 + i2_1)
T.reads(A[i, k], B[k, j])
T.writes(C_global[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
with T.init():
C_global[i, j] = T.float32(0)
C_global[i, j] = C_global[i, j] + A[i, k] * B[k, j]
for ax0, ax1 in T.grid(64, 64):
with T.block("C_global"):
v0 = T.axis.spatial(512, i0_1 * 64 + ax0)
v1 = T.axis.spatial(512, i1_0 * 64 + ax1)
T.reads(C_global[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_global[v0, v1]
@T.prim_func
def cpu_matmul_1(
A: T.Buffer((512, 512), "float32"),
B: T.Buffer((512, 512), "float32"),
C: T.Buffer((512, 512), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
C_global = T.alloc_buffer([512, 512], dtype="float32")
for i0_0, i1_0 in T.grid(1, 8):
for i0_1, i1_1, i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(8, 1, 16, 2, 8, 32, 32, 8):
with T.block("C"):
i = T.axis.spatial(512, i0_0 * 512 + i0_1 * 64 + i0_2 * 32 + i0_3)
j = T.axis.spatial(512, i1_0 * 64 + i1_1 * 64 + i1_2 * 8 + i1_3)
k = T.axis.reduce(512, i2_0 * 32 + i2_1)
T.reads(A[i, k], B[k, j])
T.writes(C_global[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
with T.init():
C_global[i, j] = T.float32(0)
C_global[i, j] = C_global[i, j] + A[i, k] * B[k, j]
for ax0, ax1 in T.grid(512, 64):
with T.block("C_global"):
v0 = T.axis.spatial(512, ax0)
v1 = T.axis.spatial(512, i1_0 * 64 + ax1)
T.reads(C_global[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_global[v0, v1]
@T.prim_func
def cpu_matmul_2(
A: T.Buffer((512, 512), "float32"),
B: T.Buffer((512, 512), "float32"),
C: T.Buffer((512, 512), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0_0, i1_0, i0_1, i1_1, i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(
1, 8, 8, 1, 16, 2, 8, 32, 32, 8
):
with T.block("C"):
i = T.axis.spatial(512, i0_0 * 512 + i0_1 * 64 + i0_2 * 32 + i0_3)
j = T.axis.spatial(512, i1_0 * 64 + i1_1 * 64 + i1_2 * 8 + i1_3)
k = T.axis.reduce(512, i2_0 * 32 + i2_1)
T.reads(A[i, k], B[k, j])
T.writes(C[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
decision_0 = [
("SamplePerfectTile", [1, 8, 2, 32]),
("SamplePerfectTile", [8, 1, 8, 8]),
("SamplePerfectTile", [16, 32]),
]
decision_1 = [
("SamplePerfectTile", [1, 8, 2, 32]),
("SamplePerfectTile", [8, 1, 8, 8]),
("SamplePerfectTile", [16, 32]),
]
decision_2 = [
("SamplePerfectTile", [1, 8, 2, 32]),
("SamplePerfectTile", [8, 1, 8, 8]),
("SamplePerfectTile", [16, 32]),
]
mod = te.create_prim_func(te_workload.matmul(512, 512, 512))
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm"),
types=ms.schedule_rule.MultiLevelTiling,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[cpu_matmul_0, cpu_matmul_1, cpu_matmul_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cpu_matmul_relu():
@T.prim_func
def cpu_matmul_relu_0(
A: T.Buffer((512, 512), "float32"),
B: T.Buffer((512, 512), "float32"),
compute: T.Buffer((512, 512), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
C = T.alloc_buffer([512, 512], dtype="float32")
for i0_0, i1_0, i0_1, i1_1, i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(
256, 4, 1, 4, 64, 1, 32, 8, 2, 1
):
with T.block("C"):
i = T.axis.spatial(512, i0_0 * 2 + i0_1 * 2 + i0_2 * 2 + i0_3)
j = T.axis.spatial(512, i1_0 * 128 + i1_1 * 32 + i1_2 + i1_3)
k = T.axis.reduce(512, i2_0 * 8 + i2_1)
T.reads(A[i, k], B[k, j])
T.writes(C[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
for i0, i1 in T.grid(512, 512):
with T.block("compute"):
i0_4, i1_4 = T.axis.remap("SS", [i0, i1])
T.reads(C[i0_4, i1_4])
T.writes(compute[i0_4, i1_4])
compute[i0_4, i1_4] = T.max(C[i0_4, i1_4], T.float32(0))
@T.prim_func
def cpu_matmul_relu_1(
A: T.Buffer((512, 512), "float32"),
B: T.Buffer((512, 512), "float32"),
compute: T.Buffer((512, 512), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
C = T.alloc_buffer([512, 512], dtype="float32")
for i0_0, i1_0, i0_1, i1_1 in T.grid(256, 4, 1, 4):
for i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(64, 1, 32, 8, 2, 1):
with T.block("C"):
i = T.axis.spatial(512, i0_0 * 2 + i0_1 * 2 + i0_2 * 2 + i0_3)
j = T.axis.spatial(512, i1_0 * 128 + i1_1 * 32 + i1_2 + i1_3)
k = T.axis.reduce(512, i2_0 * 8 + i2_1)
T.reads(A[i, k], B[k, j])
T.writes(C[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
for ax0, ax1 in T.grid(2, 32):
with T.block("compute"):
i0 = T.axis.spatial(512, i0_0 * 2 + ax0)
i1 = T.axis.spatial(512, i1_0 * 128 + i1_1 * 32 + ax1)
T.reads(C[i0, i1])
T.writes(compute[i0, i1])
compute[i0, i1] = T.max(C[i0, i1], T.float32(0))
@T.prim_func
def cpu_matmul_relu_2(
A: T.Buffer((512, 512), "float32"),
B: T.Buffer((512, 512), "float32"),
compute: T.Buffer((512, 512), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
C = T.alloc_buffer([512, 512], dtype="float32")
for i0_0, i1_0 in T.grid(256, 4):
for i0_1, i1_1, i2_0, i0_2, i1_2, i2_1, i0_3, i1_3 in T.grid(1, 4, 64, 1, 32, 8, 2, 1):
with T.block("C"):
i = T.axis.spatial(512, i0_0 * 2 + i0_1 * 2 + i0_2 * 2 + i0_3)
j = T.axis.spatial(512, i1_0 * 128 + i1_1 * 32 + i1_2 + i1_3)
k = T.axis.reduce(512, i2_0 * 8 + i2_1)
T.reads(A[i, k], B[k, j])
T.writes(C[i, j])
T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"})
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[k, j]
for ax0, ax1 in T.grid(2, 128):
with T.block("compute"):
i0 = T.axis.spatial(512, i0_0 * 2 + ax0)
i1 = T.axis.spatial(512, i1_0 * 128 + ax1)
T.reads(C[i0, i1])
T.writes(compute[i0, i1])
compute[i0, i1] = T.max(C[i0, i1], T.float32(0))
decision_0 = [
("SamplePerfectTile", [256, 1, 1, 2]),
("SamplePerfectTile", [4, 4, 32, 1]),
("SamplePerfectTile", [64, 8]),
]
decision_1 = [
("SamplePerfectTile", [256, 1, 1, 2]),
("SamplePerfectTile", [4, 4, 32, 1]),
("SamplePerfectTile", [64, 8]),
]
decision_2 = [
("SamplePerfectTile", [256, 1, 1, 2]),
("SamplePerfectTile", [4, 4, 32, 1]),
("SamplePerfectTile", [64, 8]),
]
mod = te.create_prim_func(te_workload.matmul_relu(512, 512, 512))
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm"),
types=ms.schedule_rule.MultiLevelTiling,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[cpu_matmul_relu_0, cpu_matmul_relu_1, cpu_matmul_relu_2],
expected_decisions=[decision_0, decision_1, decision_2],
)
def test_cuda_matmul():
@T.prim_func
def cuda_matmul_0(
A: T.Buffer((512, 512), "float32"),
B: T.Buffer((512, 512), "float32"),
C: T.Buffer((512, 512), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
C_local = T.alloc_buffer([512, 512], dtype="float32", scope="local")
A_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(128, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(8, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(4, thread="threadIdx.x"):
for i2_0 in T.serial(128):
for ax0_ax1_fused in T.serial(256):
with T.block("A_shared"):
v0 = T.axis.spatial(
512, i0_0_i1_0_fused // 16 * 64 + ax0_ax1_fused // 4
)
v1 = T.axis.spatial(512, i2_0 * 4 + ax0_ax1_fused % 4)
T.reads(A[v0, v1])
T.writes(A_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 2})
A_shared[v0, v1] = A[v0, v1]
for ax0_ax1_fused in T.serial(128):
with T.block("B_shared"):
v0 = T.axis.spatial(512, i2_0 * 4 + ax0_ax1_fused // 32)
v1 = T.axis.spatial(
512, i0_0_i1_0_fused % 16 * 32 + ax0_ax1_fused % 32
)
T.reads(B[v0, v1])
T.writes(B_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 1})
B_shared[v0, v1] = B[v0, v1]
for i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in T.grid(2, 1, 1, 2, 16, 4):
with T.block("C"):
i = T.axis.spatial(
512,
i0_0_i1_0_fused // 16 * 64
+ i0_1_i1_1_fused // 2 * 16
+ i0_3 * 16
+ i0_4,
)
j = T.axis.spatial(
512,
i0_0_i1_0_fused % 16 * 32
+ i0_1_i1_1_fused % 2 * 16
+ i0_2_i1_2_fused * 4
+ i1_3 * 4
+ i1_4,
)
k = T.axis.reduce(512, i2_0 * 4 + i2_1 * 2 + i2_2)
T.reads(A_shared[i, k], B_shared[k, j])
T.writes(C_local[i, j])
T.block_attr(
{
"meta_schedule.thread_extent_high_inclusive": 1024,
"meta_schedule.thread_extent_low_inclusive": 32,
"meta_schedule.tiling_structure": "SSSRRSRS",
}
)
with T.init():
C_local[i, j] = T.float32(0)
C_local[i, j] = C_local[i, j] + A_shared[i, k] * B_shared[k, j]
for ax0, ax1 in T.grid(16, 4):
with T.block("C_local"):
v0 = T.axis.spatial(
512, i0_0_i1_0_fused // 16 * 64 + i0_1_i1_1_fused // 2 * 16 + ax0
)
v1 = T.axis.spatial(
512,
i0_0_i1_0_fused % 16 * 32
+ i0_1_i1_1_fused % 2 * 16
+ i0_2_i1_2_fused * 4
+ ax1,
)
T.reads(C_local[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_local[v0, v1]
decision_0 = [
("SamplePerfectTile", [8, 4, 1, 1, 16]),
("SamplePerfectTile", [16, 2, 4, 1, 4]),
("SamplePerfectTile", [128, 2, 2]),
("SampleCategorical", 1),
("SampleCategorical", 0),
]
mod = te.create_prim_func(te_workload.matmul(512, 512, 512))
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-2080"), # disable async trace using sm75
types=ms.schedule_rule.MultiLevelTiling,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[cuda_matmul_0],
expected_decisions=[decision_0],
)
def test_cuda_matmul_relu():
@T.prim_func
def cuda_matmul_relu_0(
A: T.Buffer((512, 512), "float32"),
B: T.Buffer((512, 512), "float32"),
compute: T.Buffer((512, 512), "float32"),
) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
C = T.alloc_buffer([512, 512], dtype="float32")
C_local = T.alloc_buffer([512, 512], dtype="float32", scope="local")
A_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
B_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared")
for i0_0_i1_0_fused in T.thread_binding(64, thread="blockIdx.x"):
for i0_1_i1_1_fused in T.thread_binding(64, thread="vthread.x"):
for i0_2_i1_2_fused in T.thread_binding(8, thread="threadIdx.x"):
for i2_0 in T.serial(8):
for ax0_ax1_fused in T.serial(4096):
with T.block("A_shared"):
v0 = T.axis.spatial(
512, i0_0_i1_0_fused // 8 * 64 + ax0_ax1_fused // 64
)
v1 = T.axis.spatial(512, i2_0 * 64 + ax0_ax1_fused % 64)
T.reads(A[v0, v1])
T.writes(A_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 2})
A_shared[v0, v1] = A[v0, v1]
for ax0_ax1_fused in T.serial(4096):
with T.block("B_shared"):
v0 = T.axis.spatial(512, i2_0 * 64 + ax0_ax1_fused // 64)
v1 = T.axis.spatial(
512, i0_0_i1_0_fused % 8 * 64 + ax0_ax1_fused % 64
)
T.reads(B[v0, v1])
T.writes(B_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 4})
B_shared[v0, v1] = B[v0, v1]
for i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in T.grid(8, 2, 1, 8, 2, 2):
with T.block("C"):
i = T.axis.spatial(
512,
i0_0_i1_0_fused // 8 * 64
+ i0_1_i1_1_fused // 8 * 8
+ i0_2_i1_2_fused // 4 * 4
+ i0_3 * 2
+ i0_4,
)
j = T.axis.spatial(
512,
i0_0_i1_0_fused % 8 * 64
+ i0_1_i1_1_fused % 8 * 8
+ i0_2_i1_2_fused % 4 * 2
+ i1_3 * 2
+ i1_4,
)
k = T.axis.reduce(512, i2_0 * 64 + i2_1 * 8 + i2_2)
T.reads(A_shared[i, k], B_shared[k, j])
T.writes(C_local[i, j])
T.block_attr(
{
"meta_schedule.thread_extent_high_inclusive": 1024,
"meta_schedule.thread_extent_low_inclusive": 32,
"meta_schedule.tiling_structure": "SSSRRSRS",
}
)
with T.init():
C_local[i, j] = T.float32(0)
C_local[i, j] = C_local[i, j] + A_shared[i, k] * B_shared[k, j]
for ax0, ax1 in T.grid(4, 2):
with T.block("C_local"):
v0 = T.axis.spatial(
512,
i0_0_i1_0_fused // 8 * 64
+ i0_1_i1_1_fused // 8 * 8
+ i0_2_i1_2_fused // 4 * 4
+ ax0,
)
v1 = T.axis.spatial(
512,
i0_0_i1_0_fused % 8 * 64
+ i0_1_i1_1_fused % 8 * 8
+ i0_2_i1_2_fused % 4 * 2
+ ax1,
)
T.reads(C_local[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_local[v0, v1]
for i0, i1 in T.grid(512, 512):
with T.block("compute"):
i0_1, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(C[i0_1, i1_1])
T.writes(compute[i0_1, i1_1])
compute[i0_1, i1_1] = T.max(C[i0_1, i1_1], T.float32(0))
decision_0 = [
("SamplePerfectTile", [8, 8, 2, 2, 2]),
("SamplePerfectTile", [8, 8, 4, 1, 2]),
("SamplePerfectTile", [8, 8, 8]),
("SampleCategorical", 1),
("SampleCategorical", 3),
]
mod = te.create_prim_func(te_workload.matmul_relu(512, 512, 512))
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-2080"), # disable async trace using sm75
types=ms.schedule_rule.MultiLevelTiling,
)
check_sketches(
mod,
sketches=actual,
expected_mods=[cuda_matmul_relu_0],
expected_decisions=[decision_0],
)
def test_cuda_sum_with_trivial_block_iter():
@T.prim_func
def sum_with_trivial_block_iter(
A: T.Buffer((1, 64, 768), "float32"),
B: T.Buffer((1, 64, 1), "float32"),
) -> None:
for i0, i1, i2, i3 in T.grid(1, 64, 1, 768):
with T.block("sum"):
ax0, ax1, ax2, k2 = T.axis.remap("SSSR", [i0, i1, i2, i3])
T.reads(A[ax0, ax1, k2])
T.writes(B[ax0, ax1, ax2])
with T.init():
B[ax0, ax1, ax2] = T.float32(0)
B[ax0, ax1, ax2] = B[ax0, ax1, ax2] + A[ax0, ax1, k2]
# Expect nothing to happen - the rule is not supposed to be applied in this case
mod = sum_with_trivial_block_iter
(sch,) = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-3080"),
types=ms.schedule_rule.MultiLevelTiling,
)
assert not sch.trace.simplified(remove_postproc=True).insts
def test_multi_level_tiling_hexagon():
@T.prim_func
def cpu_conv2d_nhwc(
inputs: T.Buffer((1, 56, 56, 64), "float16"),
weight: T.Buffer((3, 3, 64, 64), "float16"),
conv2d_nhwc: T.Buffer((1, 56, 56, 64), "float16"),
) -> None:
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
PadInput = T.alloc_buffer((1, 58, 58, 64), "float16")
for i0, i1, i2, i3 in T.grid(1, 58, 58, 64):
with T.block("PadInput"):
v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3])
T.writes(PadInput[v_i0, v_i1, v_i2, v_i3])
PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(
1 <= v_i1 and v_i1 < 57 and 1 <= v_i2 and v_i2 < 57,
inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3],
T.float16(0),
)
for (
n_0,
h_0,
w_0,
co_0,
rh_0,
rw_0,
rc_0,
n_1,
h_1,
w_1,
co_1,
rh_1,
rw_1,
rc_1,
n_2,
h_2,
w_2,
co_2,
) in T.grid(1, 1, 2, 1, 3, 3, 16, 1, 14, 2, 1, 1, 1, 4, 1, 4, 14, 64):
with T.block("conv2d_nhwc"):
v_n = T.axis.spatial(1, n_0 + n_1 + n_2)
v_h = T.axis.spatial(56, h_0 * 56 + h_1 * 4 + h_2)
v_w = T.axis.spatial(56, w_0 * 28 + w_1 * 14 + w_2)
v_co = T.axis.spatial(64, co_0 * 64 + co_1 * 64 + co_2)
v_rh = T.axis.reduce(3, rh_0 + rh_1)
v_rw = T.axis.reduce(3, rw_0 + rw_1)
v_rc = T.axis.reduce(64, rc_0 * 4 + rc_1)
T.reads(
PadInput[v_n, v_h + v_rh, v_w + v_rw, v_co // 64 * 64 + v_rc],
weight[v_rh, v_rw, v_rc, v_co],
)
T.writes(conv2d_nhwc[v_n, v_h, v_w, v_co])
T.block_attr({"meta_schedule.tiling_structure": "SRSRS"})
with T.init():
conv2d_nhwc[v_n, v_h, v_w, v_co] = T.float16(0)
conv2d_nhwc[v_n, v_h, v_w, v_co] = (
conv2d_nhwc[v_n, v_h, v_w, v_co]
+ PadInput[v_n, v_h + v_rh, v_w + v_rw, v_co // 64 * 64 + v_rc]
* weight[v_rh, v_rw, v_rc, v_co]
)
target_hexagon = target.hexagon("v69", num_cores=4)
I = 64
O = 64
H = 56
W = 56
mod = te.create_prim_func(
te_workload.conv2d_nhwc(1, H, W, I, O, 3, 1, 1, 1, in_dtype="float16", out_dtype="float16")
)
actual = generate_design_space(
kind="cuda",
mod=mod,
target=Target(target_hexagon, host=target_hexagon),
types=None,
sch_rules=[
ms.schedule_rule.MultiLevelTilingWideVector(
structure="SRSRS",
vector_length_in_bits=1024,
max_innermost_factor=64,
reuse_read=None,
reuse_write=None,
)
],
)
decision_0 = [
("SamplePerfectTile", [1, 1, 1]),
("SamplePerfectTile", [1, 14, 4]),
("SamplePerfectTile", [2, 2, 14]),
("SamplePerfectTile", [3, 1]),
("SamplePerfectTile", [3, 1]),
("SamplePerfectTile", [16, 4]),
]
check_sketches(
mod,
sketches=actual,
expected_mods=[cpu_conv2d_nhwc],
expected_decisions=[decision_0],
)
def test_cache_read_specify_consumer():
@T.prim_func
def cache_read_specify_consumer_0(
A: T.Buffer((512, 512), "float32"),
B: T.Buffer((512, 512), "float32"),
T_add: T.Buffer((512, 512), "float32"),
):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
C = T.alloc_buffer((512, 512))
C_local = T.alloc_buffer((512, 512), scope="local")
A_shared = T.alloc_buffer((512, 512), scope="shared")
B_shared = T.alloc_buffer((512, 512), scope="shared")
for i_0_j_0_fused in T.thread_binding(2, thread="blockIdx.x"):
for i_1_j_1_fused in T.thread_binding(512, thread="vthread.x"):
for i_2_j_2_fused in T.thread_binding(16, thread="threadIdx.x"):
for k_0 in range(2):
for ax0_ax1_fused in range(131072):
with T.block("A_shared"):
v0 = T.axis.spatial(512, ax0_ax1_fused // 256)
v1 = T.axis.spatial(512, k_0 * 256 + ax0_ax1_fused % 256)
T.reads(A[v0, v1])
T.writes(A_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 2})
A_shared[v0, v1] = A[v0, v1]
for ax0_ax1_fused in range(65536):
with T.block("B_shared"):
v0 = T.axis.spatial(512, k_0 * 256 + ax0_ax1_fused // 256)
v1 = T.axis.spatial(512, i_0_j_0_fused * 256 + ax0_ax1_fused % 256)
T.reads(B[v0, v1])
T.writes(B_shared[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch": 3})
B_shared[v0, v1] = B[v0, v1]
for k_1, i_3, j_3, k_2, i_4, j_4 in T.grid(64, 1, 1, 4, 1, 16):
with T.block("C"):
v_i = T.axis.spatial(
512,
i_1_j_1_fused // 8 * 8 + i_2_j_2_fused // 2 + i_3 + i_4,
)
v_j = T.axis.spatial(
512,
i_0_j_0_fused * 256
+ i_1_j_1_fused % 8 * 32
+ i_2_j_2_fused % 2 * 16
+ j_3 * 16
+ j_4,
)
v_k = T.axis.reduce(512, k_0 * 256 + k_1 * 4 + k_2)
T.reads(A_shared[v_i, v_k], B_shared[v_k, v_j])
T.writes(C_local[v_i, v_j])
T.block_attr(
{
"meta_schedule.thread_extent_high_inclusive": 1024,
"meta_schedule.thread_extent_low_inclusive": 32,
"meta_schedule.tiling_structure": "SSSRRSRS",
}
)
with T.init():
C_local[v_i, v_j] = T.float32(0)
C_local[v_i, v_j] = (
C_local[v_i, v_j] + A_shared[v_i, v_k] * B_shared[v_k, v_j]
)
for ax0, ax1 in T.grid(1, 16):
with T.block("C_local"):
v0 = T.axis.spatial(
512,
i_1_j_1_fused // 8 * 8 + i_2_j_2_fused // 2 + ax0,
)
v1 = T.axis.spatial(
512,
i_0_j_0_fused * 256
+ i_1_j_1_fused % 8 * 32
+ i_2_j_2_fused % 2 * 16
+ ax1,
)
T.reads(C_local[v0, v1])
T.writes(C[v0, v1])
C[v0, v1] = C_local[v0, v1]
for ax0, ax1 in T.grid(512, 512):
with T.block("T_add"):
v_ax0 = T.axis.spatial(512, ax0)
v_ax1 = T.axis.spatial(512, ax1)
T.reads(C[v_ax0, v_ax1], A[v_ax0, v_ax1])
T.writes(T_add[v_ax0, v_ax1])
T_add[v_ax0, v_ax1] = C[v_ax0, v_ax1] + A[v_ax0, v_ax1]
decision_0 = [
("SamplePerfectTile", [1, 64, 8, 1, 1]),
("SamplePerfectTile", [2, 8, 2, 1, 16]),
("SamplePerfectTile", [2, 64, 4]),
("SampleCategorical", 1),
("SampleCategorical", 2),
]
A, B, C = te_workload.matmul(512, 512, 512)
mod = te.create_prim_func([A, B, C + A])
space = generate_design_space(
kind="cuda",
mod=mod,
target=Target("nvidia/geforce-rtx-2080"), # disable async trace using sm75
types=ms.schedule_rule.MultiLevelTiling,
)
check_sketches(
mod,
sketches=space,
expected_mods=[cache_read_specify_consumer_0],
expected_decisions=[decision_0],
)
def test_max_pool_blocked():
# fmt off
@T.prim_func
def pool_blocked_cache_read_write(
X: T.Buffer((1, 2, 8, 8, 8, 8, 32), "uint8"),
pool: T.Buffer((1, 2, 4, 4, 8, 8, 32), "uint8"),
):
T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)})
pool_global = T.alloc_buffer((1, 2, 4, 4, 8, 8, 32), "uint8")
X_global = T.alloc_buffer((1, 2, 8, 8, 8, 8, 32), "uint8")
for b_0, c_o_0, h_o_0, w_o_0, h_i_0, w_i_0, c_i_0 in T.grid(1, 2, 4, 1, 8, 1, 4):
for ax0_ax1_ax2_ax3_ax4_ax5_ax6_fused in range(896):
with T.block("X_global"):
v0 = T.axis.spatial(1, 0)
v1 = T.axis.spatial(2, c_o_0)
v2 = T.axis.spatial(8, h_o_0 * 2)
v3 = T.axis.spatial(8, ax0_ax1_ax2_ax3_ax4_ax5_ax6_fused // 128)
v4 = T.axis.spatial(
8, h_i_0 % 4 * 2 + ax0_ax1_ax2_ax3_ax4_ax5_ax6_fused % 128 // 64
)
v5 = T.axis.spatial(8, ax0_ax1_ax2_ax3_ax4_ax5_ax6_fused % 64 // 8)
v6 = T.axis.spatial(32, c_i_0 * 8 + ax0_ax1_ax2_ax3_ax4_ax5_ax6_fused % 8)
T.reads(X[v0, v1, v2, v3, v4, v5, v6])
T.writes(X_global[v0, v1, v2, v3, v4, v5, v6])
X_global[v0, v1, v2, v3, v4, v5, v6] = X[v0, v1, v2, v3, v4, v5, v6]
for wh, ww, b_1, c_o_1, h_o_1, w_o_1, h_i_1, w_i_1, c_i_1 in T.grid(
2, 2, 1, 1, 1, 4, 1, 8, 8
):
with T.block("pool"):
v_b = T.axis.spatial(1, b_0 + b_1)
v_c_o = T.axis.spatial(2, c_o_0 + c_o_1)
v_h_o = T.axis.spatial(4, h_o_0 + h_o_1)
v_w_o = T.axis.spatial(4, w_o_0 * 4 + w_o_1)
v_h_i = T.axis.spatial(8, h_i_0 + h_i_1)
v_w_i = T.axis.spatial(8, w_i_0 * 8 + w_i_1)
v_c_i = T.axis.spatial(32, c_i_0 * 8 + c_i_1)
v_wh, v_ww = T.axis.remap("RR", [wh, ww])
T.reads(
X_global[
v_b,
v_c_o,
v_h_i // 8 * 2 + v_h_o * 2,
v_w_i // 8 * 2 + v_w_o * 2,
v_h_i % 4 * 2 + v_wh,
v_w_i % 4 * 2 + v_ww,
v_c_i,
]
)
T.writes(pool_global[v_b, v_c_o, v_h_o, v_w_o, v_h_i, v_w_i, v_c_i])
T.block_attr({"meta_schedule.tiling_structure": "SRS"})
with T.init():
pool_global[v_b, v_c_o, v_h_o, v_w_o, v_h_i, v_w_i, v_c_i] = T.uint8(0)
pool_global[v_b, v_c_o, v_h_o, v_w_o, v_h_i, v_w_i, v_c_i] = T.max(
pool_global[v_b, v_c_o, v_h_o, v_w_o, v_h_i, v_w_i, v_c_i],
X_global[
v_b,
v_c_o,
v_h_i // 8 * 2 + v_h_o * 2,
v_w_i // 8 * 2 + v_w_o * 2,
v_h_i % 4 * 2 + v_wh,
v_w_i % 4 * 2 + v_ww,
v_c_i,
],
)
for ax0, ax1, ax2, ax3, ax4, ax5, ax6 in T.grid(1, 1, 1, 4, 1, 8, 8):
with T.block("pool_global"):
v0 = T.axis.spatial(1, ax0)
v1 = T.axis.spatial(2, c_o_0 + ax1)
v2 = T.axis.spatial(4, h_o_0 + ax2)
v3 = T.axis.spatial(4, ax3)
v4 = T.axis.spatial(8, h_i_0 + ax4)
v5 = T.axis.spatial(8, ax5)
v6 = T.axis.spatial(32, c_i_0 * 8 + ax6)
T.reads(pool_global[v0, v1, v2, v3, v4, v5, v6])
T.writes(pool[v0, v1, v2, v3, v4, v5, v6])
pool[v0, v1, v2, v3, v4, v5, v6] = pool_global[v0, v1, v2, v3, v4, v5, v6]
# fmt on
def max_pool_blocked_compute(height, width, channel):
ishape = (1, channel // 32, height // 8, width // 8, 8, 8, 32)
oshape = (1, channel // 32, height // 8 // 2, width // 8 // 2, 8, 8, 32)
X = te.placeholder(ishape, name="X", dtype="uint8")
window_h = te.reduce_axis((0, 2), name="wh")
window_w = te.reduce_axis((0, 2), name="ww")
out = te.compute(
oshape,
lambda b, c_o, h_o, w_o, h_i, w_i, c_i: te.max(
X[
b,
c_o,
(h_o * 8 + h_i) // 8 * 2,
(w_o * 8 + w_i) // 8 * 2,
(h_o * 8 + h_i) % 4 * 2 + window_h,
(w_o * 8 + w_i) % 4 * 2 + window_w,
c_i,
],
axis=[window_h, window_w],
),
name="pool",
)
return [X, out]
height = width = 64
channel = 64
mod = te.create_prim_func(max_pool_blocked_compute(height, width, channel))
actual = generate_design_space(
kind="llvm",
mod=mod,
target=Target("llvm"),
types=None,
sch_rules=[
ms.schedule_rule.MultiLevelTiling(
structure="SRS",
tile_binds=None,
max_innermost_factor=64,
vector_load_lens=None,
reuse_read=ms.schedule_rule.ReuseType(
req="must",
levels=[1],
scope="global",
),
reuse_write=ms.schedule_rule.ReuseType(req="must", levels=[1], scope="global"),
filter_fn=lambda sch, block_rv: sch.get(block_rv).name_hint == "pool",
)
],
)
decision = [
("SamplePerfectTile", [1, 1]),
("SamplePerfectTile", [2, 1]),
("SamplePerfectTile", [4, 1]),
("SamplePerfectTile", [1, 4]),
("SamplePerfectTile", [8, 1]),
("SamplePerfectTile", [1, 8]),
("SamplePerfectTile", [4, 8]),
]
check_sketches(
mod,
sketches=actual,
expected_mods=[pool_blocked_cache_read_write],
expected_decisions=[decision],
)
if __name__ == "__main__":
tvm.testing.main()
| 38,854 | 42.706412 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_meta_programming.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.script import tir as T
def test_meta_programming_matmul():
def matmul_generator(M: int, N: int, K: int, dtype: str):
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [M, K], dtype=dtype)
B = T.match_buffer(b, [N, K], dtype=dtype)
C = T.match_buffer(c, [M, N], dtype=dtype)
for i, j, k in T.grid(M, N, K):
with T.block():
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
return matmul
@T.prim_func
def matmul_128_128_128_fp16(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128], dtype="float16")
B = T.match_buffer(b, [128, 128], dtype="float16")
C = T.match_buffer(c, [128, 128], dtype="float16")
for i, j, k in T.grid(128, 128, 128):
with T.block():
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = T.float32(0)
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
f = matmul_generator(128, 128, 128, "float16")
tvm.ir.assert_structural_equal(f, matmul_128_128_128_fp16)
def test_meta_programming_uncaptured_var():
def generate_erf(dtype):
@T.prim_func
def main(A: T.Buffer((1,), dtype), C: T.Buffer((1,), dtype)):
for i in range(1):
with T.block("C"):
C[i] = T.erf(A[i])
return main
@T.prim_func
def fp32(A: T.Buffer((1,), "float32"), C: T.Buffer((1,), "float32")):
for i in range(1):
with T.block("C"):
C[i] = T.erf(A[i])
@T.prim_func
def fp16(A: T.Buffer((1,), "float16"), C: T.Buffer((1,), "float16")):
for i in range(1):
with T.block("C"):
C[i] = T.erf(A[i])
tvm.ir.assert_structural_equal(fp16, generate_erf("float16"))
tvm.ir.assert_structural_equal(fp32, generate_erf("float32"))
if __name__ == "__main__":
test_meta_programming_matmul()
test_meta_programming_uncaptured_var()
| 3,061 | 35.023529 | 79 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_pad_einsum.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
def test_pad_matmul():
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
@T.prim_func
def matmul_before(
a: T.handle,
b: T.handle,
c: T.handle,
) -> None:
n = T.int32()
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (n, 128), "float32")
C = T.match_buffer(c, (128, n), "float32")
for i0, i1, i2 in T.grid(128, n, 128):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
with T.init():
C[i, j] = T.float32(0)
C[i, j] = C[i, j] + A[i, k] * B[j, k]
@T.prim_func
def matmul_after(
a: T.handle,
b: T.handle,
c: T.handle,
):
n = T.int32()
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (n, 128), "float32")
C = T.match_buffer(c, (128, n), "float32")
B_pad = T.alloc_buffer(((n + 31) // 32 * 32, 128))
C_pad = T.alloc_buffer((128, (n + 31) // 32 * 32))
for i0, i1 in T.grid((n + 31) // 32 * 32, 128):
with T.block("B_pad"):
v0, v1 = T.axis.remap("SS", [i0, i1])
B_pad[v0, v1] = T.if_then_else(v0 < n, B[v0, v1], T.float32(0))
for i0, i1, i2 in T.grid(128, (n + 31) // 32 * 32, 128):
with T.block("C"):
i, j, k = T.axis.remap("SSR", [i0, i1, i2])
T.reads(A[i, k], B_pad[j, k])
T.writes(C_pad[i, j])
with T.init():
C_pad[i, j] = T.float32(0)
C_pad[i, j] = C_pad[i, j] + A[i, k] * B_pad[j, k]
for i0, i1 in T.grid(128, n):
with T.block("C_pad"):
v0, v1 = T.axis.remap("SS", [i0, i1])
C[v0, v1] = C_pad[v0, v1]
sch = tir.Schedule(matmul_before, debug_mask="all")
C = sch.get_block("C")
sch.pad_einsum(C, [32, 32, 32])
tvm.ir.assert_structural_equal(matmul_after, sch.mod["main"])
verify_trace_roundtrip(sch, mod=matmul_before)
def test_pad_matmul_2():
@T.prim_func
def before(
a: T.handle,
b: T.handle,
m: T.handle,
d: T.handle,
):
T.func_attr({"tir.noalias": T.bool(True)})
n = T.int32()
A = T.match_buffer(a, (1, n, 4096))
B = T.match_buffer(b, (11008, 4096))
M = T.match_buffer(m, (1, n, 11008))
D = T.match_buffer(d, (1, n, 11008))
C = T.alloc_buffer((1, n, 11008))
for i0, i1, i2, k in T.grid(1, n, 11008, 4096):
with T.block("C"):
v_i0, v_i1, v_i2, v_k = T.axis.remap("SSSR", [i0, i1, i2, k])
T.reads(A[v_i0, v_i1, v_k], B[v_i2, v_k])
T.writes(C[v_i0, v_i1, v_i2])
with T.init():
C[v_i0, v_i1, v_i2] = T.float32(0)
C[v_i0, v_i1, v_i2] = C[v_i0, v_i1, v_i2] + A[v_i0, v_i1, v_k] * B[v_i2, v_k]
for ax0, ax1, ax2 in T.grid(1, n, 11008):
with T.block("D"):
v_ax0, v_ax1, v_ax2 = T.axis.remap("SSS", [ax0, ax1, ax2])
D[v_ax0, v_ax1, v_ax2] = M[v_ax0, v_ax1, v_ax2] * C[v_ax0, v_ax1, v_ax2]
@T.prim_func
def after(a: T.handle, b: T.handle, m: T.handle, d: T.handle):
T.func_attr({"tir.noalias": T.bool(True)})
n = T.int32()
A = T.match_buffer(a, (1, n, 4096))
B = T.match_buffer(b, (11008, 4096))
M = T.match_buffer(m, (1, n, 11008))
D = T.match_buffer(d, (1, n, 11008))
# with T.block("root"):
C = T.alloc_buffer((1, n, 11008))
A_pad = T.alloc_buffer((1, (n + 31) // 32 * 32, 4096))
C_pad = T.alloc_buffer((1, (n + 31) // 32 * 32, 11008))
for i0, i1, i2 in T.grid(1, (n + 31) // 32 * 32, 4096):
with T.block("A_pad"):
v0, v1, v2 = T.axis.remap("SSS", [i0, i1, i2])
A_pad[v0, v1, v2] = T.if_then_else(v1 < n, A[v0, v1, v2], T.float32(0))
for i0, i1, i2, k in T.grid(1, (n + 31) // 32 * 32, 11008, 4096):
with T.block("C"):
v_i0, v_i1, v_i2, v_k = T.axis.remap("SSSR", [i0, i1, i2, k])
T.reads(A_pad[v_i0, v_i1, v_k], B[v_i2, v_k])
T.writes(C_pad[v_i0, v_i1, v_i2])
with T.init():
C_pad[v_i0, v_i1, v_i2] = T.float32(0)
C_pad[v_i0, v_i1, v_i2] = (
C_pad[v_i0, v_i1, v_i2] + A_pad[v_i0, v_i1, v_k] * B[v_i2, v_k]
)
for i0, i1, i2 in T.grid(1, n, 11008):
with T.block("C_pad"):
v0, v1, v2 = T.axis.remap("SSS", [i0, i1, i2])
C[v0, v1, v2] = C_pad[v0, v1, v2]
for ax0, ax1, ax2 in T.grid(1, n, 11008):
with T.block("D"):
v_ax0, v_ax1, v_ax2 = T.axis.remap("SSS", [ax0, ax1, ax2])
D[v_ax0, v_ax1, v_ax2] = M[v_ax0, v_ax1, v_ax2] * C[v_ax0, v_ax1, v_ax2]
sch = tir.Schedule(before, debug_mask="all")
C = sch.get_block("C")
sch.pad_einsum(C, [1, 32, 32, 32])
tvm.ir.assert_structural_equal(after, sch.mod["main"])
verify_trace_roundtrip(sch, mod=before)
def test_pad_rms():
@T.prim_func
def before(
a: T.handle,
w: T.handle,
r: T.handle,
):
T.func_attr({"tir.noalias": T.bool(True)})
n = T.int32()
A = T.match_buffer(a, (1, n, 4096))
W = T.match_buffer(w, (4096,), "float32")
R = T.match_buffer(r, (1, n, 4096), "float32")
S = T.alloc_buffer((1, n), "float32")
for bsz, i, k in T.grid(1, n, 4096):
with T.block("S"):
v_bsz, v_i, v_k = T.axis.remap("SSR", [bsz, i, k])
T.reads(A[v_bsz, v_i, v_k])
T.writes(S[v_bsz, v_i])
with T.init():
S[v_bsz, v_i] = T.float32(0)
S[v_bsz, v_i] = S[v_bsz, v_i] + A[v_bsz, v_i, v_k] * A[v_bsz, v_i, v_k]
for bsz, i, k in T.grid(1, n, 4096):
with T.block("R"):
v_bsz, v_i, v_k = T.axis.remap("SSS", [bsz, i, k])
R[v_bsz, v_i, v_k] = W[v_k] * (
A[v_bsz, v_i, v_k]
/ T.sqrt(S[v_bsz, v_i] * T.float32(0.000244140625) + T.float32(1e-6))
)
@T.prim_func
def after(a: T.handle, w: T.handle, r: T.handle):
T.func_attr({"tir.noalias": T.bool(True)})
n = T.int32()
A = T.match_buffer(a, (1, n, 4096))
W = T.match_buffer(w, (4096,), "float32")
R = T.match_buffer(r, (1, n, 4096))
S = T.alloc_buffer((1, n))
A_pad = T.alloc_buffer((1, (n + 31) // 32 * 32, 4096))
S_pad = T.alloc_buffer((1, (n + 31) // 32 * 32))
for i0, i1, i2 in T.grid(1, (n + 31) // 32 * 32, 4096):
with T.block("A_pad"):
v0, v1, v2 = T.axis.remap("SSS", [i0, i1, i2])
A_pad[v0, v1, v2] = T.if_then_else(v1 < n, A[v0, v1, v2], T.float32(0))
for bsz, i, k in T.grid(1, (n + 31) // 32 * 32, 4096):
with T.block("S"):
v_bsz, v_i, v_k = T.axis.remap("SSR", [bsz, i, k])
T.reads(A_pad[v_bsz, v_i, v_k])
T.writes(S_pad[v_bsz, v_i])
with T.init():
S_pad[v_bsz, v_i] = T.float32(0)
S_pad[v_bsz, v_i] = (
S_pad[v_bsz, v_i] + A_pad[v_bsz, v_i, v_k] * A_pad[v_bsz, v_i, v_k]
)
for i0, i1 in T.grid(1, n):
with T.block("S_pad"):
v0, v1 = T.axis.remap("SS", [i0, i1])
S[v0, v1] = S_pad[v0, v1]
for bsz, i, k in T.grid(1, n, 4096):
with T.block("R"):
v_bsz, v_i, v_k = T.axis.remap("SSS", [bsz, i, k])
R[v_bsz, v_i, v_k] = W[v_k] * (
A[v_bsz, v_i, v_k]
/ T.sqrt(S[v_bsz, v_i] * T.float32(0.000244140625) + T.float32(1e-6))
)
sch = tir.Schedule(before, debug_mask="all")
C = sch.get_block("S")
sch.pad_einsum(C, [1, 32, 1])
tvm.ir.assert_structural_equal(after, sch.mod["main"])
verify_trace_roundtrip(sch, mod=before)
if __name__ == "__main__":
test_pad_matmul()
test_pad_matmul_2()
test_pad_rms()
| 9,327 | 39.912281 | 93 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_dlpack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
import numpy as np
@tvm.testing.requires_package("torch")
def test_from_dlpack_shape_one():
# A test case for the issue https://github.com/pytorch/pytorch/issues/99803
import torch
from torch.utils.dlpack import to_dlpack
tgt = tvm.target.Target(target="llvm", host="llvm")
rows = 1
a = tvm.runtime.ndarray.from_dlpack(to_dlpack(torch.randn(rows, 16)))
A = te.placeholder((rows, 16), name="A")
B = te.placeholder((rows, 16), name="B")
C = te.compute(A.shape, lambda i, j: A[i, j] + B[i, j], name="C")
s = te.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], tgt)
dev = tvm.device(tgt.kind.name, 0)
b = tvm.nd.array(np.random.uniform(size=(rows, 16)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((rows, 16), dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
@tvm.testing.requires_package("torch")
def test_from_dlpack_strided():
import torch
from torch.utils.dlpack import to_dlpack
rows = 1
inp = torch.randn(rows, 16)
a = tvm.runtime.ndarray.from_dlpack(to_dlpack(inp))
view = a._create_view((2, 8))
np.testing.assert_equal(inp.numpy().reshape(2, 8), view.numpy())
if __name__ == "__main__":
tvm.testing.main()
| 2,111 | 31 | 79 | py |
tvm | tvm-main/tests/python/unittest/test_tir_ir_builder.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
import tvm.testing
from tvm.topi.math import cast
def test_for():
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
A = ib.allocate("float32", n, name="A", scope="global")
with ib.for_range(0, n, name="i") as i:
A[i] = A[i] + 1
with ib.for_range(0, 10, name="j") as j:
A[j] = A[j] + 2
body = ib.get()
assert isinstance(body, tvm.tir.Allocate)
body = body.body
assert isinstance(body, tvm.tir.For)
body = body.body
assert isinstance(body, tvm.tir.SeqStmt)
assert isinstance(body[1], tvm.tir.For)
def test_if():
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
A = ib.pointer("float32", name="A")
tmod = tvm.tir.truncmod
with ib.for_range(0, n, name="i") as i:
with ib.if_scope(tmod(i, 2) == 0):
A[i] = A[i] + 1
with ib.else_scope():
A[0] = A[i] + 2
body = ib.get()
assert A == A
assert isinstance(body, tvm.tir.For)
body = body.body
assert isinstance(body, tvm.tir.IfThenElse)
assert isinstance(body.condition, tvm.tir.EQ)
assert isinstance(body.then_case.indices[0], tvm.tir.Var)
assert list(body.else_case.indices) == [0]
def test_prefetch():
A = tvm.tir.decl_buffer((10, 20), name="A")
ib = tvm.tir.ir_builder.create()
n = te.size_var("n")
with ib.for_range(0, n, name="i") as i:
ib.emit(
tvm.tir.Prefetch(
A, [tvm.ir.Range.from_min_extent(i + 1, 2), tvm.ir.Range.from_min_extent(0, 20)]
)
)
body = ib.get()
assert body.body.bounds[0].extent.value == 2
def test_cpu():
n = 1024
dtype = "float32"
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
def test_device_ir(A, B, C):
n = A.shape[0]
max_threads = 8
ib = tvm.tir.ir_builder.create()
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
with ib.for_range(0, n, name="i") as i:
Cptr[i] = Aptr[i] + Bptr[i]
body = ib.get()
return body
C = te.extern(
A.shape,
[A, B],
lambda ins, outs: test_device_ir(ins[0], ins[1], outs[0]),
name="vector_add",
dtype=dtype,
)
s = te.create_schedule(C.op)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
# build and invoke the kernel.
fadd = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_target("llvm")
@tvm.testing.requires_gpu
def test_gpu():
n = te.size_var("n")
dtype = "float32"
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
idxd = tvm.tir.indexdiv
def test_device_ir(A, B, C):
n = A.shape[0]
max_threads = 32
ib = tvm.tir.ir_builder.create()
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(bx, "thread_extent", idxd(n + max_threads - 1, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
idx = bx.var * max_threads + tx.var
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
with ib.if_scope(ib.likely(idx < n)):
Cptr[idx] = Aptr[idx] + Bptr[idx]
body = ib.get()
return body
C = te.extern(
A.shape,
[A, B],
lambda ins, outs: test_device_ir(ins[0], ins[1], outs[0]),
name="vector_add",
dtype=dtype,
)
s = te.create_schedule(C.op)
bounds = tvm.te.schedule.InferBound(s)
stmt = tvm.te.schedule.ScheduleOps(s, bounds)
def check_target(target):
n = 1024
if not tvm.testing.device_enabled(target):
return
# build and invoke the kernel.
fadd = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
check_target("opencl")
check_target("cuda")
def test_while_vectorize():
"""Test while loop + vectorized inner loop"""
n = 64
num_iter = 10
def test_ir(A, B, C):
ib = tvm.tir.ir_builder.create()
n = C.shape[0]
A = ib.buffer_ptr(A)
B = ib.buffer_ptr(B)
C = ib.buffer_ptr(C)
i = ib.allocate("int32", (1,), name="i", scope="local")
i[0] = 0
with ib.for_range(0, n) as j:
C[j] = 0.0
with ib.while_loop(i[0] < num_iter):
with ib.for_range(0, n, kind="vectorize") as j:
C[j] += A[j] + B[j]
i[0] += 1
return ib.get()
def check_target(target, ir):
dtype = "float32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.placeholder((n,), name="B", dtype=dtype)
C = te.extern(
(n,),
[A, B],
lambda ins, outs: ir(ins[0], ins[1], outs[0]),
name="while_vectorize",
dtype=dtype,
)
s = te.create_schedule(C.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
a_np = np.random.uniform(size=n).astype(A.dtype)
b_np = np.random.uniform(size=n).astype(B.dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
func(a, b, c)
ref = num_iter * (a_np + b_np)
tvm.testing.assert_allclose(c.numpy(), ref, rtol=1e-5, atol=1e-5)
check_target("llvm", test_ir)
def test_while_collatz():
"""Test while loop + if"""
def collatz_ref(n):
a = n
i = 0
while a > 1:
if a % 2 == 1:
a = 3 * a + 1
else:
a = a >> 1
i += 1
return i
def collatz(ib, n, C):
i = ib.allocate("int32", (1,), name="i", scope="local")
a = ib.allocate("int32", (1,), name="a", scope="local")
i[0] = 0
a[0] = n
with ib.while_loop(a[0] > 1):
with ib.if_scope(tvm.tir.floormod(a[0], 2) == 1):
a[0] = 3 * a[0] + 1
with ib.else_scope():
a[0] = a[0] >> 1
i[0] += 1
C[n] = i[0]
def collatz_ir_cpu(C):
ib = tvm.tir.ir_builder.create()
n = C.shape[0]
C = ib.buffer_ptr(C)
with ib.for_range(0, n, name="i", kind="parallel") as i:
collatz(ib, i, C)
body = ib.get()
return body
n = 30
def check_target(target, ir):
C = te.extern(
(n,),
[],
lambda ins, outs: ir(outs[0]),
name="collatz",
dtype="int32",
)
s = te.create_schedule(C.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [C], target)
dev = tvm.device(target, 0)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
func(c)
ref = np.array([collatz_ref(i) for i in range(n)])
tvm.testing.assert_allclose(c.numpy(), ref)
check_target("llvm", collatz_ir_cpu)
def test_while_mandel():
n = 160
shape = (n * 2, n)
t = 300
def mandel_ref():
def complex_sqr(z):
return np.array([z[0] ** 2 - z[1] ** 2, z[1] * z[0] * 2])
pixels = np.zeros(shape)
for i in range(pixels.shape[0]):
for j in range(pixels.shape[1]):
c = np.array([-0.8, np.cos(t) * 0.2])
z = np.array([i / n - 1, j / n - 0.5]) * 2
iterations = 0
while np.linalg.norm(z) < 20 and iterations < 50:
z = complex_sqr(z) + c
iterations += 1
pixels[i, j] = 1 - iterations * 0.02
return pixels
def mandel(ib, i, j, pixels):
z = ib.allocate("float32", (2,), name="z", scope="local")
tmp = ib.allocate("float32", (1,), name="tmp", scope="local")
iterations = ib.allocate("int32", (1,), name="iterations", scope="local")
z[0] = (i / float(n) - 1) * 2
z[1] = (j / float(n) - 0.5) * 2
iterations[0] = 0
c = [-0.8, float(np.cos(t)) * 0.2]
def norm(z):
return tvm.tir.sqrt(z[0] * z[0] + z[1] * z[1])
with ib.while_loop(tvm.tir.all(norm(z) < 20, iterations[0] < 50)):
tmp[0] = z[0]
z[0] = z[0] * z[0] - z[1] * z[1] + c[0]
z[1] = z[1] * tmp[0] * 2 + c[1]
iterations[0] += 1
pixels[i, j] = 1 - iterations[0] * 0.02
def mandel_ir_cpu(C):
ib = tvm.tir.ir_builder.create()
ny = C.shape[0]
nx = C.shape[1]
C = ib.buffer_ptr(C)
with ib.for_range(0, ny, name="i", kind="parallel") as i:
with ib.for_range(0, nx, name="j") as j:
mandel(ib, i, j, C)
body = ib.get()
return body
def mandel_ir_gpu(C):
ib = tvm.tir.ir_builder.create()
ny = C.shape[0]
nx = C.shape[1]
C = ib.buffer_ptr(C)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
by = te.thread_axis("blockIdx.y")
ty = te.thread_axis("threadIdx.y")
max_threads = 16
ib.scope_attr(bx, "thread_extent", tvm.tir.indexdiv(nx + max_threads - 1, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
ib.scope_attr(by, "thread_extent", tvm.tir.indexdiv(ny + max_threads - 1, max_threads))
ib.scope_attr(ty, "thread_extent", max_threads)
tidx = bx * max_threads + tx
tidy = by * max_threads + ty
with ib.if_scope(tvm.tir.all(tidx < nx, tidy < ny)):
mandel(ib, tidy, tidx, C)
body = ib.get()
return body
ref = mandel_ref()
def check_target(target, ir):
if not tvm.testing.device_enabled(target):
return
C = te.extern(
shape,
[],
lambda ins, outs: ir(outs[0]),
name="mandel_ir",
dtype="float32",
)
s = te.create_schedule(C.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [C], target)
dev = tvm.device(target, 0)
c = tvm.nd.array(np.zeros(shape, dtype=C.dtype), dev)
func(c)
tvm.testing.assert_allclose(c.numpy(), ref, rtol=1e-5, atol=1e-5)
check_target("llvm", mandel_ir_cpu)
check_target("npvtx", mandel_ir_gpu)
check_target("cuda", mandel_ir_gpu)
check_target("vulkan", mandel_ir_gpu)
def test_while_binary_search():
def binary_search(ib, n, i, Aptr, Bptr, Cptr):
lo = ib.allocate("int32", (1,), name="lo", scope="local")
hi = ib.allocate("int32", (1,), name="hi", scope="local")
lo[0] = 0
hi[0] = n
v = Bptr[i]
with ib.while_loop(lo[0] < hi[0]):
mid = lo[0] + (hi[0] - lo[0] >> 1)
with ib.if_scope(Aptr[mid] < v):
lo[0] = mid + 1
with ib.else_scope():
hi[0] = mid
Cptr[i] = lo[0]
def searchsorted_ir_cpu(A, B, C, n):
ib = tvm.tir.ir_builder.create()
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
with ib.for_range(0, n, name="i", kind="parallel") as i:
binary_search(ib, n, i, Aptr, Bptr, Cptr)
body = ib.get()
return body
def searchsorted_ir_gpu(A, B, C, n):
ib = tvm.tir.ir_builder.create()
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
Cptr = ib.buffer_ptr(C)
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
max_threads = 32
ib.scope_attr(bx, "thread_extent", tvm.tir.indexdiv(n + max_threads - 1, max_threads))
ib.scope_attr(tx, "thread_extent", max_threads)
tid = bx * max_threads + tx
with ib.if_scope(tid < n):
binary_search(ib, n, tid, Aptr, Bptr, Cptr)
body = ib.get()
return body
n = 1024
dtype = "float32"
A = te.placeholder((n,), name="A", dtype=dtype)
B = te.placeholder((n,), name="B", dtype=dtype)
def check_target(target, ir):
if not tvm.testing.device_enabled(target):
return
C = te.extern(
A.shape,
[A, B],
lambda ins, outs: ir(ins[0], ins[1], outs[0], n),
name="searchsorted_ir",
dtype="int32",
)
s = te.create_schedule(C.op)
with tvm.transform.PassContext(opt_level=3):
func = tvm.build(s, [A, B, C], target)
dev = tvm.device(target, 0)
a_np = np.random.uniform(size=n).astype(A.dtype)
b_np = np.random.uniform(size=n).astype(B.dtype)
a_np = np.sort(a_np)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
func(a, b, c)
ref = np.searchsorted(a_np, b_np)
tvm.testing.assert_allclose(c.numpy(), ref)
check_target("llvm", searchsorted_ir_cpu)
check_target("cuda", searchsorted_ir_gpu)
check_target("nvptx", searchsorted_ir_gpu)
check_target("vulkan", searchsorted_ir_gpu)
@tvm.testing.requires_gpu
def test_dyn_shared():
n = te.size_var("n")
dtype = "float32"
A = te.placeholder((n,), name="A")
def test_device_ir(A, B):
n = A.shape[0]
ib = tvm.tir.ir_builder.create()
tx = te.thread_axis("threadIdx.x")
ib.scope_attr(tx, "thread_extent", n)
temp = ib.allocate(dtype, (n,), scope="shared.dyn") # n is symbolic size
Aptr = ib.buffer_ptr(A)
Bptr = ib.buffer_ptr(B)
temp[tx] = Aptr[tx]
depth = tvm.tir.log2(cast(n, "float32"))
with ib.for_range(0, cast(tvm.tir.ceil(depth), n.dtype)) as i:
ib.emit(tvm.tir.Call(None, "tir.tvm_storage_sync", tvm.runtime.convert(["shared"])))
d = n >> (i + 1)
with ib.if_scope(tx < d):
temp[tx] += temp[tx + d]
Bptr[0] = temp[0]
return ib.get()
B = te.extern(
(1,),
[A],
lambda ins, outs: test_device_ir(ins[0], outs[0]),
name="reduce",
dtype=dtype,
)
s = te.create_schedule(B.op)
def check_target(target):
if not tvm.testing.device_enabled(target):
return
freduce = tvm.build(s, [A, B], target)
dev = tvm.device(target, 0)
for n in [512, 1024]:
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(1, dtype=B.dtype), dev)
freduce(a, b)
tvm.testing.assert_allclose(b.numpy()[0], np.sum(a.numpy()), 1e-4, 1e-4)
for target in ["cuda", "nvptx"]:
check_target(target)
if __name__ == "__main__":
test_prefetch()
test_if()
test_for()
test_cpu()
test_gpu()
test_while_vectorize()
test_while_collatz()
test_while_mandel()
test_while_binary_search()
test_dyn_shared()
| 16,582 | 28.298587 | 96 | py |
tvm | tvm-main/tests/python/unittest/test_tvmscript_parser_source.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unittests for tvm.script.parser.core"""
import pytest
import inspect
import tvm.testing
from tvm.script.parser.core.diagnostics import Source
from tvm.script.parser.core import doc_core as doc
from tvm.script import tir as T
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
def test_source_base():
source = Source(matmul)
assert (
source.source_name == inspect.getsourcefile(matmul)
and source.start_line is not None
and source.start_column == 0
and source.source == inspect.getsource(matmul)
and source.full_source == inspect.getsource(inspect.getmodule(matmul))
)
def test_source_ast():
source = Source(matmul)
mod = source.as_ast()
assert isinstance(mod, doc.Module)
func_def = mod.body[0]
assert isinstance(func_def, doc.FunctionDef)
assert func_def.name == "matmul"
func_args = func_def.args
assert (
len(func_args.args) == 3
and func_args.args[0].arg == "a"
and func_args.args[1].arg == "b"
and func_args.args[2].arg == "c"
)
func_body = func_def.body
assert len(func_body) == 4
func_assigns = func_body[:3]
assert (
isinstance(func_assigns[0], doc.Assign)
and func_assigns[0].targets[0].id == "A"
and isinstance(func_assigns[1], doc.Assign)
and func_assigns[1].targets[0].id == "B"
and isinstance(func_assigns[2], doc.Assign)
and func_assigns[2].targets[0].id == "C"
)
func_for = func_body[3]
assert (
len(func_for.target.elts) == 3
and func_for.target.elts[0].id == "i"
and func_for.target.elts[1].id == "j"
and func_for.target.elts[2].id == "k"
)
for_body = func_for.body
assert len(for_body) == 1
for_block = for_body[0]
assert isinstance(for_block, doc.With) and len(for_block.body) == 2
def test_nesting_parsing():
class dummy:
pass
for i in range(1):
@tvm.script.ir_module
class Module:
@T.prim_func
def impl(
A: T.Buffer((12, 196, 64), "float32"),
) -> None:
T.evaluate(0)
if __name__ == "__main__":
tvm.testing.main()
| 3,297 | 31.333333 | 78 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.