repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
tvm
tvm-main/tests/python/unittest/test_tir_schedule_storage_align.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import pytest import tvm from tvm import tir from tvm.script import tir as T from tvm.tir.schedule.testing import verify_trace_roundtrip # fmt: off # pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name @T.prim_func def element_wise(a: T.handle, c: T.handle) -> None: C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1) A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1) # body with T.block("root"): T.reads([]) T.writes([]) B = T.alloc_buffer([128, 128], elem_offset=0, align=64, offset_factor=1) for i0 in T.serial(0, 128): for ax1 in T.serial(0, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i0, ax1]) T.reads([A[vi, vj]]) T.writes([B[vi, vj]]) B[vi, vj] = (A[vi, vj]*T.float32(2)) for i1 in T.serial(0, 128): with T.block("C"): vi_1, vj_1 = T.axis.remap("SS", [i0, i1]) T.reads([B[vi_1, vj_1]]) T.writes([C[vi_1, vj_1]]) C[vi_1, vj_1] = (B[vi_1, vj_1] + T.float32(1)) @T.prim_func def element_wise_storage_align(a: T.handle, c: T.handle) -> None: C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1) A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1) # body with T.block("root"): T.reads([]) T.writes([]) B = T.alloc_buffer([128, 128], elem_offset=0, align=64, offset_factor=1) for i0 in T.serial(0, 128): for ax1 in T.serial(0, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i0, ax1]) T.reads([A[vi, vj]]) T.writes([B[vi, vj]]) T.block_attr({"buffer_dim_align":[[0, 0, 128, 127]]}) B[vi, vj] = (A[vi, vj]*T.float32(2)) for i1 in T.serial(0, 128): with T.block("C"): vi_1, vj_1 = T.axis.remap("SS", [i0, i1]) T.reads([B[vi_1, vj_1]]) T.writes([C[vi_1, vj_1]]) C[vi_1, vj_1] = (B[vi_1, vj_1] + T.float32(1)) @T.prim_func def element_wise_invalid_annotation(a: T.handle, c: T.handle) -> None: C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1) A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1) # body with T.block("root"): T.reads([]) T.writes([]) B = T.alloc_buffer([128, 128], elem_offset=0, align=64, offset_factor=1) for i0 in T.serial(0, 128): for ax1 in T.serial(0, 128): with T.block("B"): T.block_attr({"buffer_dim_align": [0]}) vi, vj = T.axis.remap("SS", [i0, ax1]) T.reads([A[vi, vj]]) T.writes([B[vi, vj]]) B[vi, vj] = (A[vi, vj]*T.float32(2)) for i1 in T.serial(0, 128): with T.block("C"): vi_1, vj_1 = T.axis.remap("SS", [i0, i1]) T.reads([B[vi_1, vj_1]]) T.writes([C[vi_1, vj_1]]) C[vi_1, vj_1] = (B[vi_1, vj_1] + T.float32(1)) use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True}) def test_storage_align(use_block_name): func = element_wise s = tir.Schedule(func, debug_mask='all') B = 'B' if use_block_name else s.get_block("B") s.storage_align(B, 0, axis=0, factor=128, offset=127) tvm.ir.assert_structural_equal(element_wise_storage_align, s.mod["main"]) verify_trace_roundtrip(sch=s, mod=func) def test_storage_align_update(): func = element_wise s = tir.Schedule(func, debug_mask='all') B = s.get_block("B") s.storage_align(B, 0, axis=0, factor=128, offset=0) s.storage_align(B, 0, axis=0, factor=128, offset=127) tvm.ir.assert_structural_equal(element_wise_storage_align, s.mod["main"]) verify_trace_roundtrip(sch=s, mod=func) def test_storage_align_invalid_factor1(): func = element_wise s = tir.Schedule(func, debug_mask='all') B = s.get_block("B") with pytest.raises(tir.ScheduleError): s.storage_align(B, 0, axis=0, factor=0, offset=127) def test_storage_align_invalid_factor2(): func = element_wise s = tir.Schedule(func, debug_mask='all') B = s.get_block("B") with pytest.raises(tir.ScheduleError): s.storage_align(B, 0, axis=0, factor=-1, offset=127) def test_storage_align_invalid_buffer(): func = element_wise s = tir.Schedule(func, debug_mask='all') C = s.get_block("C") with pytest.raises(tir.ScheduleError): s.storage_align(C, 0, axis=0, factor=128, offset=127) def test_storage_align_invalid_buffer_index(): func = element_wise s = tir.Schedule(func, debug_mask='all') B = s.get_block("B") with pytest.raises(tir.ScheduleError): s.storage_align(B, 2, axis=0, factor=128, offset=127) def test_storage_align_invalid_axis(): func = element_wise s = tir.Schedule(func, debug_mask='all') B = s.get_block("B") with pytest.raises(tir.ScheduleError): s.storage_align(B, 0, axis=2, factor=128, offset=127) def test_storage_align_invalid_annotation(): func = element_wise_invalid_annotation s = tir.Schedule(func, debug_mask='all') B = s.get_block("B") with pytest.raises(tir.ScheduleError): s.storage_align(B, 0, axis=2, factor=128, offset=127) if __name__ == "__main__": test_storage_align() test_storage_align_update() test_storage_align_invalid_factor1() test_storage_align_invalid_factor2() test_storage_align_invalid_buffer() test_storage_align_invalid_buffer_index() test_storage_align_invalid_axis() test_storage_align_invalid_annotation()
6,850
37.273743
91
py
tvm
tvm-main/tests/python/unittest/test_arith_canonical_simplify.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm import te class CanonicalChecker: def __init__(self): self.analyzer = tvm.arith.Analyzer() def verify(self, data, expected): res = self.analyzer.canonical_simplify(data) expected = tvm.runtime.convert(expected) assert tvm.ir.structural_equal(res, expected), "\ndata={}\nres={}\nexpected={}".format( data, res, expected ) def test_mul_sum_simplify(): ck = CanonicalChecker() x, y, z = te.var("x"), te.var("y"), te.var("z") ck.verify(2 + (3 * x + z + y + 1) * 4 + x, x * 13 + z * 4 + y * 4 + 6) ck.verify(x * 3 - 4 * x + 1, 1 - x) ck.verify(y + x * 3 - 5 * x + 1 + y, y * 2 + 1 - x * 2) tdiv = tvm.tir.truncdiv tmod = tvm.tir.truncmod # trucdiv ck.verify(tdiv(x + y + x + y * 3, 2), y * 2 + x) ck.verify(tmod(x + y + x + y * 3, 2), 0) # floordiv fld = tvm.te.floordiv flm = tvm.te.floormod ck.verify(flm(x + x + y * 3, 2), flm(y * 3, 2)) ck.verify(fld(x + y + x + y * 3, 2), y * 2 + x) ck.verify(flm(x + y + x + y * 3, 2), 0) ck.verify(fld(x + x + y * 3, 2), fld(y * 3, 2) + x) def test_split_index_simplify(): ck = CanonicalChecker() x, y, z = te.var("x"), te.var("y"), te.var("z") # trucdiv tdiv = tvm.tir.truncdiv tmod = tvm.tir.truncmod # split div const ck.verify(tdiv(x, 3) * 3 + tmod(x, 3), x) ck.verify(tdiv(x, 6) * 6 + tmod(tdiv(x, 3), 2) * 3 + tmod(x, 3), x) ck.verify(tdiv(tdiv(tmod(x, 16), 2) * 2, 4), tdiv(tmod(x, 16), 4)) ck.verify(tdiv(tmod(x, 2), 8), 0) ck.verify(tdiv(tmod(x, 2), 7), 0) ck.verify(tdiv(tdiv(tmod(x, 16), 2) * 2, 6), tdiv(tmod(x, 16), 6)) # split mod const ck.verify(tmod((x * 8), 16), tmod(x, 2) * 8) ck.verify(tmod(x * 8, 2), 0) # simplify then fold ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 1000)) ck.analyzer.update(y, tvm.arith.ConstIntBound(0, 1000)) ck.verify(tdiv(x * 4 + y, 2) * 2 + tmod(x * 4 + y, 2), x * 4 + y) # complex fold ck.verify(tdiv(z * 9 + y, 2) * 2 + tmod(z * 9 + y, 2), z * 9 + y) ck.analyzer.update(x, tvm.arith.ConstIntBound(-100, 1000), True) ck.analyzer.update(y, tvm.arith.ConstIntBound(-100, 1000), True) ck.verify(tdiv(x * 4 + y, 2) * 2 + tmod(x * 4 + y, 2), x * 4 + y) # floordiv fld = tvm.te.floordiv flm = tvm.te.floormod ck.verify(fld(x * 5, 2), fld(x * 5, 2)) ck.verify(fld(x, 3) * 3 + flm(x, 3), x) ck.verify(fld(x, 6) * 6 + flm(fld(x, 3), 2) * 3 + flm(x, 3), x) ck.verify(fld(fld(flm(x, 16), 2) * 2, 4), fld(flm(x, 16), 4)) ck.verify(fld(flm(x, 2), 8), 0) ck.verify(fld(flm(x, 2), 7), 0) ck.verify(fld(fld(flm(x, 16), 2) * 2, 6), fld(flm(x, 16), 6)) # cannot simplify mixed case, unless we canonicalize into one mode. ck.verify(tdiv(x, 6) * 2 + tmod(fld(x, 3), 2), tdiv(x, 6) * 2 + tmod(fld(x, 3), 2)) ck.verify(tmod(-x, 2), tmod(x, -2) * -1) def test_div_simplify(): ck = CanonicalChecker() x = te.var("x") tdiv = tvm.tir.truncdiv # truc div ck.verify(tdiv(16 + 48 * x, 16), x * 3 + 1) # (17+48*x)/16 is not simplifiable for arbitrary x because when 17+48*x<0 # (17+48*x)/16 != 1+3*x ck.verify(tdiv(17 + 48 * x, 16), tdiv(x * 48 + 17, 16)) # However, when x >= 0, then 17+48*x >= 0 and (17+48*x)/16 can be simplified ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 10)) ck.verify(tdiv(17 + 48 * x, 16), x * 3 + 1) # Trying expressions that are not simplifiable for any values of the variables ck.verify(tdiv(17 + 47 * x, 16), tdiv(x * 47 + 17, 16)) # floordiv fld = tvm.te.floordiv ck.analyzer.update(x, tvm.arith.ConstIntBound(-1000, 10000), True) ck.verify(fld(16 + 48 * x, 16), x * 3 + 1) ck.verify(fld(17 + 48 * x, 16), x * 3 + 1) ck.verify(fld(17 + 47 * x, 16), fld(x * 47 + 17, 16)) def test_fp16_const_fold(): ck = CanonicalChecker() zero = tvm.tir.const(0, "float16") one = tvm.tir.const(1, "float16") half = tvm.tir.const(0.5, "float16") ck.verify(zero + half, half) ck.verify(half - zero, half) ck.verify(zero * half, zero) ck.verify(half * one, half) ck.verify(half / one, half) ck.verify(zero / half, zero) def test_floormod_simplify(): ck = CanonicalChecker() flm = tvm.te.floormod x, y = te.var("x"), te.var("y") ck.verify(flm(flm((x * 4) + y - 466036, 24528) - 24512, 16), flm((x * 4) + y + 12, 16)) ck.verify(flm(flm((x * 4), 16), 8), flm(x, 2) * 4) ck.verify(flm(-x, 2), flm(x, -2) * -1) def test_canonical_mixed(): ck = CanonicalChecker() x = te.var("x") z = tvm.tir.const(3, "int32") tdiv = tvm.tir.truncdiv tmod = tvm.tir.truncmod ck.verify(tdiv(x, (z * z)) - tdiv(x, (z * z)), 0) ck.verify(tdiv(x, (z + z)) - tdiv(x, (z + z)), 0) ck.verify(x - 2 < 3, x < 5) ck.verify(tvm.te.max(x, 1) - tvm.te.max(x, 1), 0) ck.verify(tvm.te.min(x, 1) - tvm.te.min(x, 1), 0) ck.verify(x * x - x * x, 0) ck.verify(tmod(tdiv(tmod(x, 20), 2) * 2, 4), tdiv(tmod(x, 4), 2) * 2) fld = tvm.te.floordiv ck.verify(fld(x, (z * z)) - fld(x, (z * z)), 0) ck.verify(fld(x, (z + z)) - fld(x, (z + z)), 0) def test_reduce_combiner_simplify(): ck = CanonicalChecker() dummy = te.var("dummy") comm_reducer = te.comm_reducer prod = comm_reducer(lambda x, y: x * y, lambda t0: tvm.tir.const(1, t0)) sum_or_prod = comm_reducer( lambda x, y: tvm.tir.Select(dummy < 0, x + y, x * y), lambda t0: tvm.tir.Select(dummy < 0, tvm.tir.const(0, t0), tvm.tir.const(1, t0)), ) sum_and_prod = comm_reducer( lambda x, y: (x[0] + y[0], x[1] * y[1]), lambda t0, t1: (tvm.tir.const(0, t0), tvm.tir.const(5, t1) - tvm.tir.const(4, t1)), ) some_reducer1 = comm_reducer( lambda x, y: ( x[0] + y[0], x[0] + y[0] + x[1] + y[1], x[0] * y[2] + y[0] * x[2], x[1] + y[2], 4.0, ), lambda t0, t1, t2, t3, t4: ( tvm.tir.const(0, t0), tvm.tir.const(1, t1), tvm.tir.const(2, t2), tvm.tir.const(3, t3), tvm.tir.const(4, t4), ), ) k = te.reduce_axis((0, 10), name="k") A = te.placeholder((10,), name="A") # Test that SimplifyCombiner makes use of vranges ck.analyzer.update(dummy, tvm.arith.ConstIntBound(-10, -4)) ck.verify(sum_or_prod(A[k], k), te.sum(A[k], k)) ck.verify(sum_or_prod(A[k], k, init=1), te.sum(A[k], k, init=1)) ck.analyzer.update(dummy, tvm.arith.ConstIntBound(5, 9), True) ck.verify(sum_or_prod(A[k], k), prod(A[k], k)) ck.verify(sum_or_prod(A[k], k, init=1), prod(A[k], k, init=1)) ck.analyzer.update(dummy, tvm.arith.ConstIntBound(-10, 100), True) ck.verify(sum_and_prod((A[k], A[10 - k]), k)[0], te.sum(A[k], k)) ck.verify(sum_and_prod((A[k], A[10 - k]), k)[1], prod(A[10 - k], k)) reference_simplified_sources = [ [A[0]], [A[0], A[1]], [A[0], A[2]], [A[0], A[1], A[2], A[3]], [A[4]], ] for j in range(5): # Here we use the j-th component of the result, so only it and the components it # depends on are left. simplified = ck.analyzer.canonical_simplify( some_reducer1((A[0], A[1], A[2], A[3], A[4]), k)[j] ) # Check that the remaining components are the expected ones. for lhs, rhs in zip(simplified.source, reference_simplified_sources[j]): assert tvm.ir.structural_equal(lhs, rhs) # Test that components with side effects are not removed dummy = tvm.ir.GlobalVar("dummy") side_effect = lambda *xs: tvm.tir.Call("int32", dummy, xs) ck.verify( sum_and_prod((A[k], side_effect(A[10 - k])), k)[0], sum_and_prod((A[k], side_effect(A[10 - k])), k)[0], ) ck.verify(sum_and_prod((side_effect(A[k]), A[10 - k]), k)[0], te.sum(side_effect(A[k]), k)) def test_reduce_simplify(): ck = CanonicalChecker() k = te.reduce_axis((0, 10), name="k") j = te.reduce_axis((-5, 3), name="j") A = te.placeholder((10,), name="A") ck.verify(te.sum(tvm.tir.Select(k + j < 12, k + j, 0), [k, j]), te.sum(k + j, [k, j])) ck.verify(te.sum(A[3], []), A[3]) ck.verify(te.sum(A[3], [], where=k > 12, init=1.0), tvm.tir.const(1.0, dtype="float32")) # The rule below is not typical, removed for now ck.verify(te.sum(te.div(k, 10), k), te.sum(tvm.tir.const(0, "int32"), k)) def test_simplify_if_then_else(): ck = CanonicalChecker() x = te.var("x") y = te.var("y") tdiv = tvm.tir.truncdiv tmod = tvm.tir.truncmod # simplification that takes condition into account. res = tvm.tir.if_then_else( (x * 4 + y) >= 466036, tvm.tir.if_then_else( 24512 <= tmod(((x * 4) + y) - 466036, 24528), tmod(tmod(((x * 4) + y) - 466036, 24528) - 24512, 16), x, ), y, ) res2 = tvm.tir.if_then_else( (x * 4) >= 466036 - y, tvm.tir.if_then_else( 24512 <= tmod(((x * 4) + y) - 466036, 24528), tmod(tmod(((x * 4) + y) - 466036, 24528) - 24512, 16), x, ), y, ) expected = tvm.tir.if_then_else( tvm.tir.LE(466036, (x * 4 + y)), tvm.tir.if_then_else( tvm.tir.LE(24512, tmod(((x * 4) + y) - 4, 24528)), tmod(((x * 4) + y) - 4, 16), x ), y, ) ck.verify(res, expected) ck.verify(res2, expected) # can only simplify if condition res = tvm.tir.Select(tvm.tir.all(x >= -1, y >= 0), tmod(x + y + 100, 3), tmod(x + 100, 3)) expected = tvm.tir.Select(tvm.tir.all(x >= -1, y >= 0), tmod(x + y + 1, 3), tmod(x + 100, 3)) ck.verify(res, ck.analyzer.canonical_simplify(expected)) res = tvm.tir.Select(x >= 10, tvm.tir.if_then_else(tdiv(x, 3) > 2, x, 0), 0) expected = tvm.tir.Select(x >= 10, x, 0) ck.verify(res, ck.analyzer.canonical_simplify(expected)) res = tvm.tir.Select(x >= 10, tvm.tir.if_then_else(tdiv(x, 3) < 2, x, 0), 0) ck.verify(res, 0) def test_complex_cases(): ck = CanonicalChecker() x = te.var("x") y = te.var("y") tdiv = tvm.tir.truncdiv tmod = tvm.tir.truncmod res2 = ( tdiv(tdiv(tmod(x * 128 + y, 1296), 36) * 2 + 1, 2) * 36 + tdiv(tmod((x * 128) + y, 36) * 2 + 1, 2) - tmod((x * 128) + y, 1296) + 1 ) ck.analyzer.update(x, tvm.arith.ConstIntBound(0, 5)) ck.analyzer.update(y, tvm.arith.ConstIntBound(0, 127)) ck.verify(res2, 1) ck.analyzer.update(y, tvm.arith.ConstIntBound(0, 1024), True) res3 = ( tdiv(x * 1024 + y, 65536) + tdiv(tmod(x * 1024 + y, 65536), 256) + tdiv(tmod(x * 1024 + y, 256), 16) + tmod(x * 1024 + y, 16) - tdiv(y, 256) - tdiv(tmod(y, 256), 16) - tmod(y, 16) - (x * 4) ) ck.verify(res3, tdiv((x * 1024) + y, 256) - tdiv(y, 256) - (x * 4)) def test_simplify_cast(): ck = CanonicalChecker() tcast = tvm.tir.Cast fld = tvm.te.floordiv flm = tvm.te.floormod # cast(i64, i + j + 1) - cast(i64, i) i = te.var("i", dtype="int32") j = te.var("j", dtype="int32") res = tcast("int64", i + j + 1) - tcast("int64", i) ck.verify(res, tcast("int64", j) + tvm.tir.const(1, "int64")) # cast(i32, i + j + 1) - cast(i32, i) i = te.var("i", dtype="int64") j = te.var("j", dtype="int64") ck.analyzer.update(i, tvm.arith.ConstIntBound(0, 10)) ck.analyzer.update(j, tvm.arith.ConstIntBound(0, 10)) res = tcast("int32", i + j + 1) - tcast("int32", i) ck.verify(res, tcast("int32", j) + 1) # cast(i32, i + j - 100) i = te.var("i", dtype="int64") j = te.var("j", dtype="int64") ck.analyzer.update(i, tvm.arith.ConstIntBound(0, 2**31 - 1)) ck.analyzer.update(j, tvm.arith.ConstIntBound(0, 10)) res = tcast("int32", i + j - 100) ck.verify(res, res) # cast(i32, flm(axis, 7i64) * 2i64 + 1i64) + 1i32 # - cast(i32, flm(axis, 7i64) * 2i64) axis = te.var("axis", dtype="int64") ck.analyzer.update(axis, tvm.arith.ConstIntBound(0, 42)) res = ( tcast( "int32", flm(axis, tvm.tir.const(7, "int64")) * tvm.tir.const(2, "int64") + tvm.tir.const(1, "int64"), ) + tvm.tir.const(1, "int32") - tcast("int32", flm(axis, tvm.tir.const(7, "int64")) * tvm.tir.const(2, "int64")) ) ck.verify(res, 2) def test_simplify_normalize_min_value_expr(): ck = CanonicalChecker() x = te.var("x", "int32") ck.verify(te.min_value("int32") - x == 0, x == te.min_value("int32")) ck.verify(te.min_value("int32") + x == 0, False) ck.verify(0 == te.min_value("int32") - x, x == te.min_value("int32")) ck.verify(0 == te.min_value("int32") + x, False) ck.verify(-x + te.min_value("int32") == 0, x == te.min_value("int32")) ck.verify(x + te.min_value("int32") == 0, False) ck.verify(0 == -x + te.min_value("int32"), x == te.min_value("int32")) ck.verify(0 == x + te.min_value("int32"), False) def test_proddiv_simplify(): ck = CanonicalChecker() flm = tvm.te.floormod fld = tvm.te.floordiv tdiv = tvm.te.truncdiv tmod = tvm.te.truncmod x, y, z = te.var("x"), te.var("y"), te.var("y") ck.verify(flm(x * 32 * x, x), 0) ck.verify(flm(z * x * 32 * x * y, x * z), 0) ck.verify(flm(z * x * 32 * x * y, x * z * y * 8 * x), 0) ck.verify(flm(z * x * 32 * (x * y), 6 * x * z), flm(x * y * 16, 3) * (x * z * 2)) ck.verify(flm(x * 32 * x, x * z), flm(x * 32, z) * x) ck.verify(tmod(x * 32 * x, x), 0) ck.verify(tmod(z * x * 32 * x * y, x * z), 0) ck.verify(tmod(z * x * 32 * (x * y), 6 * x * z), tmod(x * y * 16, 3) * (x * z * 2)) ck.verify(tmod(x * 32 * x, x * z), tmod(x * 32, z) * x) ck.verify(fld(x * 2 * x * z, 4 * x * x * x), fld(z, x * 2)) ck.verify(fld(x * (2 * y) * 3, 3 * y), x * 2) ck.verify(fld(x * (2 * y) * 3, 3 * y * z), fld(x * 2, z)) ck.verify(tdiv(x * 2 * x * z, 4 * x * x * x), tdiv(z, x * 2)) ck.verify(tdiv(x * (2 * y) * 3, 3 * y), x * 2) ck.verify(tdiv(x * (2 * y) * 3, 3 * y * z), tdiv(x * 2, z)) def test_floormod_two(): ck = CanonicalChecker() flm = tvm.te.floormod x, y = te.var("x"), te.var("y") ck.verify(flm(x * 10 + 1 + y * 2 + 2, 2), 1) if __name__ == "__main__": tvm.testing.main()
15,246
34.70726
97
py
tvm
tvm-main/tests/python/unittest/test_tir_schedule_rolling_buffer.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import numpy as np import tvm import tvm.testing from tvm import tir from tvm.script import tir as T from tvm.tir.schedule.testing import verify_trace_roundtrip import pytest def check_rolling_buffer( sch: tir.Schedule, origin: tir.PrimFunc, expected: tir.PrimFunc, check_run=False ): scheduled = sch.mod["main"] tvm.ir.assert_structural_equal(scheduled, expected) verify_trace_roundtrip(sch, origin) if check_run: in_buffer = origin.buffer_map[origin.params[0]] out_buffer = origin.buffer_map[origin.params[1]] in_shape = [int(_) for _ in in_buffer.shape] out_shape = [int(_) for _ in out_buffer.shape] x = tvm.nd.array(np.random.uniform(0, 64, in_shape).astype(in_buffer.dtype)) y0 = tvm.nd.array(np.zeros(out_shape).astype(out_buffer.dtype)) y1 = tvm.nd.array(np.zeros(out_shape).astype(out_buffer.dtype)) f_origin = tvm.build(origin) f_scheduled = tvm.build(scheduled) f_origin(x, y0) f_scheduled(x, y1) tvm.testing.assert_allclose(y0.numpy(), y1.numpy()) def _tile_nd(s, tile, block_name): outer_indices = [] inner_indices = [] block = s.get_block(block_name) loops = s.get_loops(block) for i, size in enumerate(tile): outer, inner = s.split(loops[i], [None, size]) outer_indices.append(outer) inner_indices.append(inner) s.reorder(*outer_indices, *inner_indices) return outer_indices, inner_indices def test_1d_rolling_buffer(): @T.prim_func def before(A: T.Buffer((4, 12), "int32"), C: T.Buffer((4, 8), "int32")): B = T.alloc_buffer((4, 10), "int32") for c in T.serial(4): for i in T.serial(0, 10): for k in T.serial(3): with T.block("B"): cc, vi, vk = T.axis.remap("SSR", [c, i, k]) with T.init(): B[cc, vi] = 0 B[cc, vi] = B[cc, vi] + A[cc, vi + vk] for i in T.serial(0, 8): for k in T.serial(3): with T.block("C"): cc, vi, vk = T.axis.remap("SSR", [c, i, k]) with T.init(): C[cc, vi] = 0 C[cc, vi] = C[cc, vi] + B[cc, vi + vk] @T.prim_func def expected(A: T.Buffer((4, 12), "int32"), C: T.Buffer((4, 8), "int32")): B = T.alloc_buffer([4, 6], dtype="int32") for c, i_0 in T.grid(4, 2): for ax0, ax1 in T.grid(6, 3): with T.block("B"): T.where(i_0 < 1 or 2 <= ax0) cc = T.axis.spatial(4, c) vi = T.axis.opaque(10, i_0 * 4 + ax0) vk = T.axis.reduce(3, ax1) T.reads(A[cc, vi + vk]) T.writes(B[cc, vi % 6]) with T.init(): B[cc, vi % 6] = 0 B[cc, vi % 6] = B[cc, vi % 6] + A[cc, vi + vk] for i_1, k in T.grid(4, 3): with T.block("C"): cc = T.axis.spatial(4, c) vi = T.axis.opaque(8, i_0 * 4 + i_1) vk = T.axis.reduce(3, k) T.reads(B[cc, (vi + vk) % 6]) T.writes(C[cc, vi]) with T.init(): C[cc, vi] = 0 C[cc, vi] = C[cc, vi] + B[cc, (vi + vk) % 6] sch = tir.Schedule(before, debug_mask="all") _, i, _ = sch.get_loops(sch.get_block("C")) io, _ = sch.split(i, [2, 4]) sch.compute_at(sch.get_block("B"), io) sch.rolling_buffer(sch.get_block("B"), 0) check_rolling_buffer(sch, before, expected, check_run=True) @T.prim_func def cascade_2_max_pool2d(A: T.Buffer((1, 12, 12, 16), "int8"), C: T.Buffer((1, 8, 8, 16), "int8")): B = T.alloc_buffer([1, 10, 10, 16], dtype="int8") for i0, i1, i2, i3, i4, i5 in T.grid(1, 10, 10, 16, 3, 3): with T.block("B"): ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5]) with T.init(): B[ax0, ax1, ax2, ax3] = T.int8(-128) B[ax0, ax1, ax2, ax3] = T.max(B[ax0, ax1, ax2, ax3], A[ax0, ax1 + rv0, ax2 + rv1, ax3]) for i0, i1, i2, i3, i4, i5 in T.grid(1, 8, 8, 16, 3, 3): with T.block("C"): ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5]) with T.init(): C[ax0, ax1, ax2, ax3] = T.int8(-128) C[ax0, ax1, ax2, ax3] = T.max(C[ax0, ax1, ax2, ax3], B[ax0, ax1 + rv0, ax2 + rv1, ax3]) @T.prim_func def cascade_3_max_pool2d_with_stride( A: T.Buffer((1, 24, 24, 16), "int8"), C: T.Buffer((1, 8, 8, 16), "int8") ): B_0 = T.alloc_buffer([1, 22, 22, 16], dtype="int8") B_1 = T.alloc_buffer([1, 10, 10, 16], dtype="int8") for i0, i1, i2, i3, i4, i5 in T.grid(1, 22, 22, 16, 3, 3): with T.block("B_0"): ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5]) with T.init(): B_0[ax0, ax1, ax2, ax3] = T.int8(-128) B_0[ax0, ax1, ax2, ax3] = T.max( B_0[ax0, ax1, ax2, ax3], A[ax0, ax1 + rv0, ax2 + rv1, ax3] ) for i0, i1, i2, i3, i4, i5 in T.grid(1, 10, 10, 16, 3, 3): with T.block("B_1"): ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5]) with T.init(): B_1[ax0, ax1, ax2, ax3] = T.int8(-128) B_1[ax0, ax1, ax2, ax3] = T.max( B_1[ax0, ax1, ax2, ax3], B_0[ax0, ax1 * 2 + rv0, ax2 * 2 + rv1, ax3] ) for i0, i1, i2, i3, i4, i5 in T.grid(1, 8, 8, 16, 3, 3): with T.block("C"): ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5]) with T.init(): C[ax0, ax1, ax2, ax3] = T.int8(-128) C[ax0, ax1, ax2, ax3] = T.max( C[ax0, ax1, ax2, ax3], B_1[ax0, ax1 + rv0, ax2 + rv1, ax3] ) def test_cascade_max_pool2d_w_tiled(): @T.prim_func def expected(A: T.Buffer((1, 12, 12, 16), "int8"), C: T.Buffer((1, 8, 8, 16), "int8")): B = T.alloc_buffer([1, 10, 6, 16], dtype="int8") for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 1, 2, 1): for ax0, ax1, ax2, ax3, ax4 in T.grid(10, 6, 16, 3, 3): with T.block("B"): T.where(i2_0 < 1 or 2 <= ax1) ax0_1 = T.axis.spatial(1, 0) ax1_1 = T.axis.spatial(10, ax0) ax2_1 = T.axis.opaque(10, i2_0 * 4 + ax1) ax3_1, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4]) T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]) T.writes(B[ax0_1, ax1_1, ax2_1 % 6, ax3_1]) with T.init(): B[ax0_1, ax1_1, ax2_1 % 6, ax3_1] = T.int8(-128) B[ax0_1, ax1_1, ax2_1 % 6, ax3_1] = T.max( B[ax0_1, ax1_1, ax2_1 % 6, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1] ) for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 8, 4, 16, 3, 3): with T.block("C"): ax0 = T.axis.spatial(1, i0_0 + i0_1) ax1 = T.axis.spatial(8, i1_0 * 8 + i1_1) ax2 = T.axis.opaque(8, i2_0 * 4 + i2_1) ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1) rv0, rv1 = T.axis.remap("RR", [i4, i5]) T.reads(B[ax0, ax1 + rv0, (ax2 + rv1) % 6, ax3]) T.writes(C[ax0, ax1, ax2, ax3]) with T.init(): C[ax0, ax1, ax2, ax3] = T.int8(-128) C[ax0, ax1, ax2, ax3] = T.max( C[ax0, ax1, ax2, ax3], B[ax0, ax1 + rv0, (ax2 + rv1) % 6, ax3] ) sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all") oi, _ = _tile_nd(sch, [1, 8, 4, 16], "C") sch.compute_at(sch.get_block("B"), oi[-1]) sch.rolling_buffer(sch.get_block("B"), 0) check_rolling_buffer(sch, cascade_2_max_pool2d, expected, check_run=True) def test_cascade_max_pool2d_h_tiled(): @T.prim_func def expected(A: T.Buffer((1, 12, 12, 16), "int8"), C: T.Buffer((1, 8, 8, 16), "int8")): B = T.alloc_buffer([1, 6, 10, 16], dtype="int8") for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 2, 1, 1): for ax0, ax1, ax2, ax3, ax4 in T.grid(6, 10, 16, 3, 3): with T.block("B"): T.where(i1_0 < 1 or 2 <= ax0) ax0_1 = T.axis.spatial(1, 0) ax1_1 = T.axis.opaque(10, i1_0 * 4 + ax0) ax2_1 = T.axis.spatial(10, ax1) ax3_1, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4]) T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]) T.writes(B[ax0_1, ax1_1 % 6, ax2_1, ax3_1]) with T.init(): B[ax0_1, ax1_1 % 6, ax2_1, ax3_1] = T.int8(-128) B[ax0_1, ax1_1 % 6, ax2_1, ax3_1] = T.max( B[ax0_1, ax1_1 % 6, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1] ) for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 4, 8, 16, 3, 3): with T.block("C"): ax0 = T.axis.spatial(1, i0_0 + i0_1) ax1 = T.axis.opaque(8, i1_0 * 4 + i1_1) ax2 = T.axis.spatial(8, i2_0 * 8 + i2_1) ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1) rv0, rv1 = T.axis.remap("RR", [i4, i5]) T.reads(B[ax0, (ax1 + rv0) % 6, ax2 + rv1, ax3]) T.writes(C[ax0, ax1, ax2, ax3]) with T.init(): C[ax0, ax1, ax2, ax3] = T.int8(-128) C[ax0, ax1, ax2, ax3] = T.max( C[ax0, ax1, ax2, ax3], B[ax0, (ax1 + rv0) % 6, ax2 + rv1, ax3] ) sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all") io, _ = _tile_nd(sch, [1, 4, 8, 16], "C") sch.compute_at(sch.get_block("B"), io[-1]) sch.rolling_buffer(sch.get_block("B"), 0) check_rolling_buffer(sch, cascade_2_max_pool2d, expected, check_run=True) def test_cascade_max_pool2d_h_w_c_tiled(): @T.prim_func def expected(A: T.Buffer((1, 12, 12, 16), "int8"), C: T.Buffer((1, 8, 8, 16), "int8")): B = T.alloc_buffer([1, 6, 10, 16], dtype="int8") for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 2, 2, 2): for ax0, ax1, ax2, ax3, ax4 in T.grid(6, 6, 8, 3, 3): with T.block("B"): T.where((i1_0 < 1 or 2 <= ax0) and (i2_0 < 1 or 2 <= ax1)) ax0_1 = T.axis.spatial(1, 0) ax1_1 = T.axis.opaque(10, i1_0 * 4 + ax0) ax2_1 = T.axis.spatial(10, i2_0 * 4 + ax1) ax3_1 = T.axis.spatial(16, i3_0 * 8 + ax2) rv0, rv1 = T.axis.remap("RR", [ax3, ax4]) T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]) T.writes(B[ax0_1, ax1_1 % 6, ax2_1, ax3_1]) with T.init(): B[ax0_1, ax1_1 % 6, ax2_1, ax3_1] = T.int8(-128) B[ax0_1, ax1_1 % 6, ax2_1, ax3_1] = T.max( B[ax0_1, ax1_1 % 6, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1] ) for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 4, 4, 8, 3, 3): with T.block("C"): ax0 = T.axis.spatial(1, i0_0 + i0_1) ax1 = T.axis.opaque(8, i1_0 * 4 + i1_1) ax2 = T.axis.spatial(8, i2_0 * 4 + i2_1) ax3 = T.axis.spatial(16, i3_0 * 8 + i3_1) rv0, rv1 = T.axis.remap("RR", [i4, i5]) T.reads(B[ax0, (ax1 + rv0) % 6, ax2 + rv1, ax3]) T.writes(C[ax0, ax1, ax2, ax3]) with T.init(): C[ax0, ax1, ax2, ax3] = T.int8(-128) C[ax0, ax1, ax2, ax3] = T.max( C[ax0, ax1, ax2, ax3], B[ax0, (ax1 + rv0) % 6, ax2 + rv1, ax3] ) sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all") io, _ = _tile_nd(sch, [1, 4, 4, 8], "C") sch.compute_at(sch.get_block("B"), io[-1]) sch.rolling_buffer(sch.get_block("B"), 0) check_rolling_buffer(sch, cascade_2_max_pool2d, expected, check_run=True) def test_cascade_max_pool2d_non_perfect_tiled(): @T.prim_func def expected(A: T.Buffer((1, 12, 12, 16), "int8"), C: T.Buffer((1, 8, 8, 16), "int8")) -> None: B = T.alloc_buffer([1, 8, 10, 16], dtype="int8") for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 2, 2, 1): for ax0, ax1, ax2, ax3, ax4 in T.grid(8, 8, 16, 3, 3): with T.block("B"): T.where( i1_0 * 6 + ax0 < 10 and i2_0 * 6 + ax1 < 10 and (i1_0 < 1 or 2 <= ax0) and (i2_0 < 1 or 2 <= ax1) ) ax0_1 = T.axis.spatial(1, 0) ax1_1 = T.axis.opaque(10, i1_0 * 6 + ax0) ax2_1 = T.axis.spatial(10, i2_0 * 6 + ax1) ax3_1, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4]) T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]) T.writes(B[ax0_1, ax1_1 % 8, ax2_1, ax3_1]) with T.init(): B[ax0_1, ax1_1 % 8, ax2_1, ax3_1] = T.int8(-128) B[ax0_1, ax1_1 % 8, ax2_1, ax3_1] = T.max( B[ax0_1, ax1_1 % 8, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1] ) for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 6, 6, 16, 3, 3): with T.block("C"): T.where(i1_0 * 6 + i1_1 < 8 and i2_0 * 6 + i2_1 < 8) ax0 = T.axis.spatial(1, i0_0 + i0_1) ax1 = T.axis.opaque(8, i1_0 * 6 + i1_1) ax2 = T.axis.spatial(8, i2_0 * 6 + i2_1) ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1) rv0, rv1 = T.axis.remap("RR", [i4, i5]) T.reads(B[ax0, (ax1 + rv0) % 8, ax2 + rv1, ax3]) T.writes(C[ax0, ax1, ax2, ax3]) with T.init(): C[ax0, ax1, ax2, ax3] = T.int8(-128) C[ax0, ax1, ax2, ax3] = T.max( C[ax0, ax1, ax2, ax3], B[ax0, (ax1 + rv0) % 8, ax2 + rv1, ax3] ) sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all") io, _ = _tile_nd(sch, [1, 6, 6, 16], "C") sch.compute_at(sch.get_block("B"), io[-1]) sch.rolling_buffer(sch.get_block("B"), 0) check_rolling_buffer(sch, cascade_2_max_pool2d, expected, check_run=True) def test_cascade_3_max_pool2d_with_stride(): @T.prim_func def expected(A: T.Buffer((1, 24, 24, 16), "int8"), C: T.Buffer((1, 8, 8, 16), "int8")) -> None: B_0 = T.alloc_buffer([1, 13, 22, 16], dtype="int8") B_1 = T.alloc_buffer([1, 6, 10, 16], dtype="int8") for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 2, 2, 1): for ax0, ax1, ax2, ax3, ax4 in T.grid(13, 13, 16, 3, 3): with T.block("B_0"): T.where((i1_0 < 1 or 5 <= ax0) and (i2_0 < 1 or 5 <= ax1)) ax0_1 = T.axis.spatial(1, 0) ax1_1 = T.axis.opaque(22, i1_0 * 8 + ax0) ax2_1 = T.axis.spatial(22, i2_0 * 8 + ax1) ax3_1, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4]) T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]) T.writes(B_0[ax0_1, ax1_1 % 13, ax2_1, ax3_1]) with T.init(): B_0[ax0_1, ax1_1 % 13, ax2_1, ax3_1] = T.int8(-128) B_0[ax0_1, ax1_1 % 13, ax2_1, ax3_1] = T.max( B_0[ax0_1, ax1_1 % 13, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1], ) for ax0, ax1, ax2, ax3, ax4 in T.grid(6, 6, 16, 3, 3): with T.block("B_1"): T.where((i1_0 < 1 or 2 <= ax0) and (i2_0 < 1 or 2 <= ax1)) ax0_2 = T.axis.spatial(1, 0) ax1_2 = T.axis.opaque(10, i1_0 * 4 + ax0) ax2_2 = T.axis.spatial(10, i2_0 * 4 + ax1) ax3_2, rv0, rv1 = T.axis.remap("SRR", [ax2, ax3, ax4]) T.reads(B_0[ax0_2, (ax1_2 * 2 + rv0) % 13, ax2_2 * 2 + rv1, ax3_2]) T.writes(B_1[ax0_2, ax1_2 % 6, ax2_2, ax3_2]) with T.init(): B_1[ax0_2, ax1_2 % 6, ax2_2, ax3_2] = T.int8(-128) B_1[ax0_2, ax1_2 % 6, ax2_2, ax3_2] = T.max( B_1[ax0_2, ax1_2 % 6, ax2_2, ax3_2], B_0[ax0_2, (ax1_2 * 2 + rv0) % 13, ax2_2 * 2 + rv1, ax3_2], ) for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 4, 4, 16, 3, 3): with T.block("C"): ax0_3 = T.axis.spatial(1, i0_0 + i0_1) ax1_3 = T.axis.opaque(8, i1_0 * 4 + i1_1) ax2_3 = T.axis.spatial(8, i2_0 * 4 + i2_1) ax3_3 = T.axis.spatial(16, i3_0 * 16 + i3_1) rv0, rv1 = T.axis.remap("RR", [i4, i5]) T.reads(B_1[ax0_3, (ax1_3 + rv0) % 6, ax2_3 + rv1, ax3_3]) T.writes(C[ax0_3, ax1_3, ax2_3, ax3_3]) with T.init(): C[ax0_3, ax1_3, ax2_3, ax3_3] = T.int8(-128) C[ax0_3, ax1_3, ax2_3, ax3_3] = T.max( C[ax0_3, ax1_3, ax2_3, ax3_3], B_1[ax0_3, (ax1_3 + rv0) % 6, ax2_3 + rv1, ax3_3], ) sch = tir.Schedule(cascade_3_max_pool2d_with_stride, debug_mask="all") io, _ = _tile_nd(sch, [1, 4, 4, 16], "C") sch.compute_at(sch.get_block("B_1"), io[-1]) sch.compute_at(sch.get_block("B_0"), io[-1]) sch.rolling_buffer(sch.get_block("B_0"), 0) sch.rolling_buffer(sch.get_block("B_1"), 0) check_rolling_buffer(sch, cascade_3_max_pool2d_with_stride, expected, check_run=True) def test_upscale(): @T.prim_func def before(A: T.Buffer((1, 16, 16, 16), "int8"), C: T.Buffer((1, 24, 24, 16), "int8")) -> None: B = T.alloc_buffer([1, 14, 14, 16], dtype="int8") for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 5, 5, 1): for ax0, ax1, ax2, ax3, ax4 in T.grid(5, 5, 16, 3, 3): with T.block("B"): T.where(i1_0 * 5 // 2 + ax0 < 14 and i2_0 * 5 // 2 + ax1 < 14) ax0_1 = T.axis.spatial(1, 0) ax1_1 = T.axis.spatial(14, i1_0 * 5 // 2 + ax0) ax2_1 = T.axis.spatial(14, i2_0 * 5 // 2 + ax1) ax3_1 = T.axis.spatial(16, ax2) rv0, rv1 = T.axis.remap("RR", [ax3, ax4]) T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]) T.writes(B[ax0_1, ax1_1, ax2_1, ax3_1]) with T.init(): B[ax0_1, ax1_1, ax2_1, ax3_1] = T.int8(-128) B[ax0_1, ax1_1, ax2_1, ax3_1] = T.max( B[ax0_1, ax1_1, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1] ) for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 5, 5, 16, 3, 3): with T.block("C"): T.where(i1_0 * 5 + i1_1 < 24 and i2_0 * 5 + i2_1 < 24) ax0 = T.axis.spatial(1, i0_0 + i0_1) ax1 = T.axis.spatial(24, i1_0 * 5 + i1_1) ax2 = T.axis.spatial(24, i2_0 * 5 + i2_1) ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1) rv0, rv1 = T.axis.remap("RR", [i4, i5]) T.reads(B[ax0, ax1 // 2 + rv0, ax2 // 2 + rv1, ax3]) T.writes(C[ax0, ax1, ax2, ax3]) with T.init(): C[ax0, ax1, ax2, ax3] = T.int8(-128) C[ax0, ax1, ax2, ax3] = T.max( C[ax0, ax1, ax2, ax3], B[ax0, ax1 // 2 + rv0, ax2 // 2 + rv1, ax3] ) @T.prim_func def expected( A: T.Buffer((1, 16, 16, 16), "int8"), C: T.Buffer((1, 24, 24, 16), "int8") ) -> None: B = T.alloc_buffer([1, 5, 14, 16], dtype="int8") for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 5, 5, 1): for ax0, ax1, ax2, ax3, ax4 in T.grid(5, 5, 16, 3, 3): with T.block("B"): T.where( i1_0 * 5 // 2 + ax0 < 14 and i2_0 * 5 // 2 + ax1 < 14 and (i1_0 < 1 or 2 <= ax0) and (i2_0 < 1 or 2 <= ax1) ) ax0_1 = T.axis.spatial(1, 0) ax1_1 = T.axis.opaque(14, i1_0 * 5 // 2 + ax0) ax2_1 = T.axis.spatial(14, i2_0 * 5 // 2 + ax1) ax3_1 = T.axis.spatial(16, ax2) rv0, rv1 = T.axis.remap("RR", [ax3, ax4]) T.reads(A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1]) T.writes(B[ax0_1, ax1_1 % 5, ax2_1, ax3_1]) with T.init(): B[ax0_1, ax1_1 % 5, ax2_1, ax3_1] = T.int8(-128) B[ax0_1, ax1_1 % 5, ax2_1, ax3_1] = T.max( B[ax0_1, ax1_1 % 5, ax2_1, ax3_1], A[ax0_1, ax1_1 + rv0, ax2_1 + rv1, ax3_1] ) for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 5, 5, 16, 3, 3): with T.block("C"): T.where(i1_0 * 5 + i1_1 < 24 and i2_0 * 5 + i2_1 < 24) ax0 = T.axis.spatial(1, i0_0 + i0_1) ax1 = T.axis.opaque(24, i1_0 * 5 + i1_1) ax2 = T.axis.spatial(24, i2_0 * 5 + i2_1) ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1) rv0, rv1 = T.axis.remap("RR", [i4, i5]) T.reads(B[ax0, (ax1 // 2 + rv0) % 5, ax2 // 2 + rv1, ax3]) T.writes(C[ax0, ax1, ax2, ax3]) with T.init(): C[ax0, ax1, ax2, ax3] = T.int8(-128) C[ax0, ax1, ax2, ax3] = T.max( C[ax0, ax1, ax2, ax3], B[ax0, (ax1 // 2 + rv0) % 5, ax2 // 2 + rv1, ax3] ) sch = tir.Schedule(before, debug_mask="all") sch.rolling_buffer(sch.get_block("B"), 0) check_rolling_buffer(sch, before, expected, check_run=True) def test_fail_rolling_buffer_multi_writers(): @T.prim_func def func_multi_writers( A: T.Buffer((1, 12, 12, 16), "int8"), C: T.Buffer((1, 12, 12, 16), "int8") ): B = T.alloc_buffer([1, 12, 12, 16], dtype="int8") for i0, i1, i2, i3 in T.grid(1, 3, 3, 1): for ax0, ax1, ax2 in T.grid(6, 6, 16): with T.block("B_writer_0"): ax0_1 = T.axis.spatial(1, i0) ax1_1 = T.axis.spatial(12, i1 * 4 + ax0) ax2_1 = T.axis.spatial(12, i2 * 4 + ax1) ax3_1 = T.axis.spatial(16, ax2) with T.init(): B[ax0_1, ax1_1, ax2_1, ax3_1] = T.int8(-128) B[ax0_1, ax1_1, ax2_1, ax3_1] = A[ax0_1, ax1_1, ax2_1, ax3_1] + T.int8(1) for ax0, ax1, ax2 in T.grid(6, 6, 16): with T.block("B_writer_1"): ax0_2 = T.axis.spatial(1, i0) ax1_2 = T.axis.spatial(12, i1 * 4 + ax0) ax2_2 = T.axis.spatial(12, i2 * 4 + ax1) ax3_2 = T.axis.spatial(16, ax2) with T.init(): B[ax0_2, ax1_2, ax2_2, ax3_2] = T.int8(-128) B[ax0_2, ax1_2, ax2_2, ax3_2] = B[ax0_2, ax1_2, ax2_2, ax3_2] + A[ ax0_2, ax1_2, ax2_2, ax3_2 ] * T.int8(2) for ax0, ax1, ax2, ax3, ax4, ax5 in T.grid(1, 4, 4, 16, 3, 3): with T.block("C"): ax0_3 = T.axis.spatial(1, i0 + ax0) ax1_3 = T.axis.spatial(12, i1 * 4 + ax1) ax2_3 = T.axis.spatial(12, i2 * 4 + ax2) ax3_3 = T.axis.spatial(16, i3 * 16 + ax3) rv0, rv1 = T.axis.remap("RR", [ax4, ax5]) with T.init(): C[ax0_3, ax1_3, ax2_3, ax3_3] = T.int8(-128) C[ax0_3, ax1_3, ax2_3, ax3_3] = T.max( C[ax0_3, ax1_3, ax2_3, ax3_3], B[ax0_3, ax1_3 + rv0, ax2_3 + rv1, ax3_3] ) sch = tir.Schedule(func_multi_writers, debug_mask="all") with pytest.raises(tvm.tir.ScheduleError): sch.rolling_buffer(sch.get_block("B_writer_0"), 0) def test_fail_rolling_buffer_not_match(): @T.prim_func def func_non_overlap( A: T.Buffer((1, 12, 12, 16), "int8"), C: T.Buffer((1, 12, 12, 16), "int8") ): B = T.alloc_buffer([1, 12, 12, 16], dtype="int8") for i0_0, i1_0, i2_0, i3_0 in T.grid(1, 3, 3, 1): for ax0, ax1, ax2 in T.grid(4, 4, 16): with T.block("B"): ax0_1 = T.axis.spatial(1, 0) ax1_1 = T.axis.spatial(12, i1_0 * 4 + ax0) ax2_1 = T.axis.spatial(12, i2_0 * 4 + ax1) ax3 = T.axis.spatial(16, ax2) T.reads(A[ax0_1, ax1_1, ax2_1, ax3]) T.writes(B[ax0_1, ax1_1, ax2_1, ax3]) with T.init(): B[ax0_1, ax1_1, ax2_1, ax3] = T.int8(-128) B[ax0_1, ax1_1, ax2_1, ax3] = A[ax0_1, ax1_1, ax2_1, ax3] for i0_1, i1_1, i2_1, i3_1, i4, i5 in T.grid(1, 4, 4, 16, 1, 1): with T.block("C"): ax0 = T.axis.spatial(1, i0_0 + i0_1) ax1 = T.axis.spatial(12, i1_0 * 4 + i1_1) ax2 = T.axis.spatial(12, i2_0 * 4 + i2_1) ax3 = T.axis.spatial(16, i3_0 * 16 + i3_1) rv0, rv1 = T.axis.remap("RR", [i4, i5]) T.reads(B[ax0, ax1 + rv0, ax2 + rv1, ax3]) T.writes(C[ax0, ax1, ax2, ax3]) with T.init(): C[ax0, ax1, ax2, ax3] = T.int8(-128) C[ax0, ax1, ax2, ax3] = T.max( C[ax0, ax1, ax2, ax3], B[ax0, ax1 + rv0, ax2 + rv1, ax3] ) sch = tir.Schedule(func_non_overlap, debug_mask="all") with pytest.raises(tvm.tir.ScheduleError): sch.rolling_buffer(sch.get_block("B"), 0) def test_fail_rolling_buffer_injection_invalid(): sch = tir.Schedule(cascade_2_max_pool2d, debug_mask="all") # Block B is not compute_at to Block C, so rolling_buffer injection is invalid. _, _ = _tile_nd(sch, [1, 4, 8, 16], "C") _, _ = _tile_nd(sch, [1, 4, 8, 16], "B") with pytest.raises(tvm.tir.ScheduleError): sch.rolling_buffer(sch.get_block("B"), 0) if __name__ == "__main__": tvm.testing.main()
28,359
48.407666
100
py
tvm
tvm-main/tests/python/unittest/test_tir_ops.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te def check_throws(f): try: f() except tvm.error.TVMError: pass else: raise AssertionError("Should have raised an exception but didn't.") def test_const_fold(): def check(f, *args): x = f(*[tvm.tir.const(x, "int32") for x in args]) y = f(*args) if not isinstance(x, (tvm.tir.IntImm,)) or x.value != int(y): raise ValueError("check error: %s vs %s " % (x, y)) tmod = tvm.tir.truncmod check(lambda x, y: x + y, 3, 4) check(lambda x, y: x * y, 3, 12) check(lambda x, y: x * y - 10, 3, 12) check(lambda x, y: x - tmod(y, 10), 3, 12) check(lambda x, y: x // y + 10, 100, 12) check(lambda x, y: x & y + 10, 112, 128) check(lambda x, y: x > y, 112, 128) check(lambda x, y: x < y, 112, 128) check(lambda x, y: x <= y, 112, 128) check(lambda x, y: x >= y, 112, 128) check(lambda x, y: (x | y) ^ 10, 112, 128) def test_const_fold2(): x = te.var("x") tmod = tvm.tir.truncmod tdiv = tvm.tir.truncdiv assert (x + 0).same_as(x) assert (0 + x).same_as(x) assert (x - 0).same_as(x) assert tmod(x, 1).value == 0 assert (x * 1).same_as(x) assert (1 * x).same_as(x) assert isinstance(tdiv(1, x), tvm.tir.Div) def test_const_fold3(): # Test that using ints with logic operations is forbidden x = te.var("x") for val in [0, 1]: for func in [tvm.tir.all, tvm.tir.any]: check_throws(lambda: func(tvm.tir.const(val, "uint1"), x)) check_throws(lambda: func(x, tvm.tir.const(val, "uint1"))) # Test const folding when both arguments are const for tvm_func, py_func in [ (tvm.tir.all, lambda a, b: a and b), (tvm.tir.any, lambda a, b: a or b), ]: for v1 in [0, 1]: for v2 in [0, 1]: assert tvm.ir.structural_equal( tvm_func(tvm.tir.const(v1, "uint1"), tvm.tir.const(v2, "uint1")), tvm.tir.const(py_func(v1, v2), "uint1"), ) x = te.var("x", "uint1") true = tvm.tir.const(1, "uint1") false = tvm.tir.const(0, "uint1") assert tvm.tir.all(x, true).same_as(x) assert tvm.tir.all(true, x).same_as(x) assert tvm.tir.any(x, false).same_as(x) assert tvm.tir.any(false, x).same_as(x) assert tvm.tir.all(x, false).same_as(false) assert tvm.tir.all(false, x).same_as(false) assert tvm.tir.any(x, true).same_as(true) assert tvm.tir.any(true, x).same_as(true) def test_const_fold4(): x1 = tvm.tir.const(4, "int32") x2 = x1 + 5 tdiv = tvm.tir.truncdiv assert isinstance(x2, tvm.tir.IntImm) and x2.value == 9 x3 = tdiv(x2, 3) assert isinstance(x3, tvm.tir.IntImm) and x3.value == 3 x4 = x3 + 0.55 assert isinstance(x4, tvm.tir.FloatImm) and abs(x4.value - 3.55) < 1e-6 x5 = te.ceil(x4) assert isinstance(x5, tvm.tir.FloatImm) and x5.value == 4 x6 = x5.astype("int") assert isinstance(x6, tvm.tir.IntImm) and x6.value == 4, "x6={}".format(x6) y = (te.round((tvm.tir.const(6.5, "float32") - 1) / 1.5) + 2).astype("int") assert isinstance(y, tvm.tir.IntImm) and y.value == 6 def test_binary_dtype_match(): def verify_general_dtype_support(f, is_conditional=False): rules = [ [("bool", "int32"), "int32"], [("int32", "float32"), "float32"], [("int32", "int64"), "int64"], [("uint32", "int8"), "uint32"], [("uint32", "int32"), "uint32"], ] for (lhs_dtype, rhs_dtype), out_dtype in rules: lhs = te.var("lhs", dtype=lhs_dtype) rhs = te.var("rhs", dtype=rhs_dtype) out = f(lhs, rhs) if not is_conditional: assert out.dtype == out_dtype else: assert out.dtype == "bool" if hasattr(out, "a"): assert out.a.dtype == out_dtype assert out.b.dtype == out_dtype elif hasattr(out, "args"): # CallOp assert out.args[0].dtype == out_dtype assert out.args[1].dtype == out_dtype else: raise ValueError("Unknown binary op format!") def verify_callop_float_only(f): for lhs_dtype in ["int32", "float32", "float64"]: for rhs_dtype in ["int32", "float32", "float64"]: lhs = te.var("lhs", dtype=lhs_dtype) rhs = te.var("rhs", dtype=rhs_dtype) if "float" not in lhs_dtype and "float" not in rhs_dtype: check_throws(lambda: f(lhs, rhs)) elif "float" in lhs_dtype: out = f(lhs, rhs) # Upcasting for floating point types dtypes = [lhs_dtype, rhs_dtype] if "float64" in dtypes: target_dtype = "float64" elif "float32" in dtypes: target_dtype = "float32" else: target_dtype = "int32" assert out.dtype == target_dtype # Final inputs are the right type assert out.args[0].dtype == target_dtype assert out.args[1].dtype == target_dtype else: out = f(lhs, rhs) assert out.dtype == rhs_dtype assert out.args[0].dtype == rhs_dtype assert out.args[1].dtype == rhs_dtype verify_general_dtype_support(lambda a, b: a + b) verify_general_dtype_support(lambda a, b: a * b) verify_general_dtype_support(lambda a, b: a >= b, is_conditional=True) verify_general_dtype_support(lambda a, b: a <= b, is_conditional=True) verify_callop_float_only(lambda a, b: te.power(a, b)) # verify bool & int32 constant folding assert tvm.tir.const(1) == tvm.tir.const(True) assert tvm.tir.const(2) != tvm.tir.const(True) def test_if_then_else(): cases = [ [(te.var("cond", dtype="bool"), "bool", "int32"), "int32"], [(True, "int32", "float32"), "float32"], [(False, "int32", "int64"), "int64"], [(te.var("cond", dtype="bool"), "uint32", "int32"), "uint32"], [(te.var("cond", dtype="int32"), "uint32", "int32"), "uint32"], ] for (cond, lhs_dtype, rhs_dtype), out_dtype in cases: lhs = te.var("lhs", dtype=lhs_dtype) rhs = te.var("rhs", dtype=rhs_dtype) if cond is True or cond is False: out = tvm.tir.if_then_else(cond, lhs, rhs) out2 = tvm.tir.if_then_else(not cond, rhs, lhs) out3 = tvm.tir.if_then_else(not cond, lhs, rhs) assert tvm.ir.structural_equal(out, out2) == 1 if cond: assert tvm.ir.structural_equal(out, lhs.astype(out_dtype)) == 1 assert tvm.ir.structural_equal(out3, rhs.astype(out_dtype)) == 1 else: assert tvm.ir.structural_equal(out, rhs.astype(out_dtype)) == 1 assert tvm.ir.structural_equal(out3, lhs.astype(out_dtype)) == 1 elif cond.dtype == "bool": out = tvm.tir.if_then_else(cond, lhs, rhs) assert out.dtype == out_dtype assert out.args[1].dtype == out_dtype assert out.args[2].dtype == out_dtype elif cond.dtype != "bool": check_throws(lambda: tvm.tir.if_then_else(cond, lhs, rhs)) else: raise ValueError("Unknown combinations") if __name__ == "__main__": test_const_fold() test_const_fold2() test_const_fold3() test_const_fold4() test_binary_dtype_match() test_if_then_else()
8,515
37.188341
85
py
tvm
tvm-main/tests/python/unittest/test_slice_tir.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm.script import tir as T import pytest # --------------------------------------------------------------------------------------------------- # ABOUT THIS FILE: # --------------------------------------------------------------------------------------------------- # We (cconvey / OctoML) are working on a sequence of PRs to allow a single TIR primfunc's # AST to be sliced into multiple partitiones, where each partition will be converted into # a new TIR primfunc. (See https://en.wikipedia.org/wiki/Program_slicing). # # The unit tests below provide a roadmap for that sequence of PRs; each PR should allow # one more of these tests to pass. # # NOTE: These unit tests may change as work progresses. They aren't meant to # indicate hard requirements. # NOTE! The `tvm.testing.CompareBeforeAfter` class provides TWO useful mechanisms for # these tests: # # (a) It lets us specify code snippets which are valid Python, but which aren't YET # recognized as valid TVMScript. This allows unit tests for new constructs, # e.g. 'call_tir(...)' to simply be disabled rather than fully commented out. # # (b) It lets us structurally compare the TIR bodies of two primfuncs. # # Note that some of the tests below will require the structural comparison of # two entire IRModules, not just primfuncs. This will require adding functionality # to the `CompareBeforeAfter` class, or implementing that level of comparison within # the individual unit tests. # # Some of the unit tests below which require whole-IRModule comparison. For expedience # we simply comment out the (early draft) bodies of those unit tests, rather than # hacking their structure to get the benefits of (a). # --------------------------------------------------------------------------------------------------- # 'CALL_TIR' (AND RELATED) CAVEATS: # --------------------------------------------------------------------------------------------------- # (c) "call_tir" is a placeholder name. # The TVM "Relax" effort also defines a node named "call_tir", which is likely # become something different from what we're calling "call_tir" here. So # we may rename *this* "call_tir" during implementation. # # (d) For "call_tir" calls, the syntax/semantics for passing buffer regions is still # an active area of development. So that detail of these unit tests is likely # to change. # # (e) The specific string "extract_as_subroutine" used to annotate some IR Blocks, # i.e., `T.annotate("extract_as_subroutine", ...)`, may change as work progresses. # --------------------------------------------------------------------------------------------------- # step 1: Simply passes Python / TVMScript parsing. # --------------------------------------------------------------------------------------------------- # # The only requirement for this test is that the TVMScript parser # doesn't raise an error when encountering `T.call_tir(foo)`, # where "foo" is a syntactically valid TVMScript function name. # # NOTE! The role of this unit test should evolve as follows: # 1) Initially the test should fail, because we haven't yet changed the TVMScript # parser to support 'call_tir'. # # 2) Initial TVMScript support for 'call_tir' will be minimal, essentially ignoring # it. This test should pass once that change is made. # # 3) As support for 'call_tir' becomes more complete, this test should once again # fail, because the specified callee doesn't exist. This test should be updated # to once again expect failure. @pytest.mark.xfail(reason="Awaiting TVMScript support for 'call_tir' token.", strict=True) class TestParseCallTIR(tvm.testing.CompareBeforeAfter): """ Simply confirm that the TIR node `call_tir` doesn't interfere with the successful parsing of the TVMScript. """ def before(): T.call_tir(add_one) T.evalute(0) def expected(): T.evaluate(0) # Provide a trivial 'transform' pass to satisfy the requirements of # tvm.testing.CompareBeforeAfter. transform = tvm.tir.transform.prim_func_pass(lambda func, _mod, _ctx: func, 0) # --------------------------------------------------------------------------------------------------- # step 2: transform annotated block ==> separate primfuncs + call_tir # # NOTE: This early-draft version of the unit test contains pseudocode to compare entire IRModule # objects, analogously to how tvm.testing.CompareBeforeAfter compares two primfuncs. # TVM's testing infrastructure currently has no such functionality, and it will need to be added # (or approximated) to make this unit test useable. # --------------------------------------------------------------------------------------------------- @pytest.mark.xfail( reason="Awaiting TVMScript support for 'call_tir' and T.annotation(\"extract_as_subroutine\").", strict=True, ) class TestAnnotateAndSliceTIR(tvm.testing.CompareBeforeAfter): # def test_annotate_and_slice(): # @tvm.script.ir_module # class irmod_before: # @T.prim_func # def main(A: T.Buffer((1,), "int8"): # #A = T.match_buffer(a, (1,), "int8") # A[0] = 0 # with T.block("block_foo"): # optional: give this block a name, perhaps for testing? # # NOTE: nice to have: human control over name used for the generated callee # T.annotate("extract_as_subroutine", "add_one") # A[0] += 1 # return 42 # # @tvm.script.ir_module # class irmod_after: # @T.prim_func # def main(): # A = T.buffer[[1], "int8"] # A[0] = 0 # with T.block("block_foo"): # call_tir(add_one, A) # # @T.prim_func # def add_one(X: T.buffer[[1], "int8"]): # X[0] += 1 pass # --------------------------------------------------------------------------------------------------- # step 3: transform call_tir ==> packed call # --------------------------------------------------------------------------------------------------- @pytest.mark.xfail( reason="Awaiting TVMScript support for lowering of 'T.call_tir' to 'T.call_packed'.", strict=True, ) class TestLowerCallTir(tvm.testing.CompareBeforeAfter): # @tvm.script.ir_module # class test_lower_before: # @T.prim_func # def main(): # A = T.buffer[[1], "int8"] # A[0] = 0 # with T.block(): # call_tir(add_one, A) # # @T.prim_func # def add_one(X: T.buffer[[1], "int8"]): # X[0] += 1 # # @tvm.script.ir_module # class test_lower_after: # @T.prim_func # def main(): # A = T.buffer[[1], "int8"] # A[0] = 0 # with T.block(): # # TODO: figure out the right TVMScript thing to do here # call_packed(add_one, A) # not sure about this function / interface # # @T.prim_func # def add_one(X: T.buffer[[1], "int8"]): # X[0] += 1 # # TODO(cconvey): additional test logic needed. # NOTE(lunderberg): Will also need a `transform` defined here. # I think we'll want it to occur in `tvm.tir.transform.MakePackedAPI`. pass # --------------------------------------------------------------------------------------------------- # step 4: end-to-end functionality # --------------------------------------------------------------------------------------------------- @pytest.mark.xfail(reason="Awaiting end-to-end support for Primfunc slicing.", strict=True) class TestPrimfuncSlicingEndToEnd(tvm.testing.CompareBeforeAfter): # @tvm.script.ir_module # class test_annotate_before: # @T.prim_func # def main(): # A = T.buffer[[1], "int8"] # A[0] = 0 # with T.block(): # optional: give this block a name, perhaps for testing? # # NOTE: nice to have: human control over name used for the generated callee # T.annotate("extract_as_subroutine", "add_one") # A[0] += 1 # assert(A[0] == 1) # # TODO(cconvey): additional test logic needed: # Starting with the IRModule shown above, end up with a running test that # module actually increments A[0] on Hexagon and x86-64 Linux. # # NOTE(lunderberg): We can use the function calls currently generated by `SplitHostDevice` as a template # (see https://github.com/apache/tvm/blob/9a673faa74ed7cd715a4e011716bcce3fd2158b6/src/tir/transforms/split_host_device.cc#L336). # Overall, we'll want to output a Call node with the operation builtin::tvm_call_packed(). pass
9,623
43.35023
137
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_lower_intrin.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm import te import numpy as np def lower_intrin(params, stmt): """wrapper to call transformation in stmt""" lower_expr = isinstance(stmt, tvm.tir.PrimExpr) stmt = tvm.tir.Evaluate(stmt) if lower_expr else stmt mod = tvm.IRModule.from_expr( tvm.tir.PrimFunc(params, stmt).with_attr("target", tvm.target.Target("llvm")) ) mod = tvm.transform.Sequential([tvm.tir.transform.Simplify(), tvm.tir.transform.LowerIntrin()])( mod ) func = mod["main"] stmt = func.body return stmt.value if lower_expr else stmt.body def check_value(expr, vx, vy, data, fref): n = len(data) A = te.placeholder((n,), name="A", dtype=expr.dtype) B = te.placeholder((n,), name="B", dtype=expr.dtype) def make_binds(i): x = expr x = tvm.tir.Let(vx, A[i], x) x = tvm.tir.Let(vy, B[i], x) return x C = te.compute((n,), make_binds) s = te.create_schedule([C.op]) f = tvm.build(s, [A, B, C], "llvm") a = tvm.nd.array(np.array([x for x, y in data], dtype=expr.dtype)) b = tvm.nd.array(np.array([y for x, y in data], dtype=expr.dtype)) c = tvm.nd.array(np.zeros(len(data), dtype=expr.dtype)) f(a, b, c) cref = np.array([fref(x, y) for x, y in data]) np.testing.assert_equal(c.numpy(), cref) def get_ref_data(): """Get reference data for every pairs""" import itertools x = range(-10, 10) y = list(range(-10, 10)) y.remove(0) return list(itertools.product(x, y)) @tvm.testing.requires_llvm def test_lower_floordiv(): data = get_ref_data() for dtype in ["int32", "int64", "int16"]: x = te.var("x", dtype=dtype) y = te.var("y", dtype=dtype) zero = tvm.tir.const(0, dtype) # no constraints res = lower_intrin([x, y], tvm.te.floordiv(x, y)) check_value(res, x, y, data, lambda a, b: a // b) # rhs >= 0 res = lower_intrin([x, y], tvm.tir.Select(y >= 0, tvm.te.floordiv(x, y), zero)) check_value(res, x, y, data, lambda a, b: a // b if b > 0 else 0) # involves max res = lower_intrin( [x, y], tvm.tir.Select(y >= 0, tvm.te.max(tvm.te.floordiv(x, y), zero), zero) ) check_value(res, x, y, data, lambda a, b: max(a // b, 0) if b > 0 else 0) # lhs >= 0 res = lower_intrin( [x, y], tvm.tir.Select(tvm.tir.all(y >= 0, x >= 0), tvm.te.floordiv(x, y), zero) ) check_value(res, x, y, data, lambda a, b: a // b if b > 0 and a >= 0 else 0) # const power of two res = lower_intrin([x, y], tvm.te.floordiv(x, tvm.tir.const(8, dtype=dtype))) check_value(res, x, y, [(a, b) for a, b in data if b == 8], lambda a, b: a // b) @tvm.testing.requires_llvm def test_lower_floormod(): data = get_ref_data() for dtype in ["int32", "int64", "int16"]: x = te.var("x", dtype=dtype) y = te.var("y", dtype=dtype) zero = tvm.tir.const(0, dtype) # no constraints res = lower_intrin([x, y], tvm.te.floormod(x, y)) check_value(res, x, y, data, lambda a, b: a % b) # rhs >= 0 res = lower_intrin([x, y], tvm.tir.Select(y >= 0, tvm.te.floormod(x, y), zero)) check_value(res, x, y, data, lambda a, b: a % b if b > 0 else 0) # lhs >= 0 res = lower_intrin( [x, y], tvm.tir.Select(tvm.tir.all(y >= 0, x >= 0), tvm.te.floormod(x, y), zero) ) check_value(res, x, y, data, lambda a, b: a % b if b > 0 and a >= 0 else 0) # const power of two res = lower_intrin([x, y], tvm.te.floormod(x, tvm.tir.const(8, dtype=dtype))) check_value(res, x, y, [(a, b) for a, b in data if b == 8], lambda a, b: a % b) if __name__ == "__main__": test_lower_floordiv() test_lower_floormod()
4,634
36.08
100
py
tvm
tvm-main/tests/python/unittest/test_tir_schedule_tensorize_ldmatrix_mma.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-docstring import numpy as np import pytest import tvm import tvm.testing from tvm import te from tvm.testing.tir import mma_schedule from tvm.tir.tensor_intrin.cuda import ( LDMATRIX_16x16_A_INTRIN, LDMATRIX_16x16_B_INTRIN, LDMATRIX_16x16_B_TRANS_INTRIN, LDMATRIX_16x32_A_INTRIN, LDMATRIX_16x32_B_TRANS_INTRIN, LDMATRIX_32x16_B_INTRIN, MMA_f16f16f16_INTRIN, MMA_f16f16f16_TRANS_INTRIN, MMA_f16f16f32_INTRIN, MMA_f16f16f32_TRANS_INTRIN, MMA_fill_16x16_f16_INTRIN, MMA_fill_16x16_f32_INTRIN, MMA_fill_16x16_i32_INTRIN, MMA_i8i8i32_INTRIN, MMA_i8i8i32_TRANS_INTRIN, MMA_store_16x16_f16_global_INTRIN, MMA_store_16x16_f32_global_INTRIN, MMA_store_16x16_i32_global_INTRIN, shared_16x16_to_ldmatrix_32x8_layout, shared_16x32_to_ldmatrix_32x16_layout, shared_32x16_to_ldmatrix_32x16_layout, ) M = 4096 N = 4096 K = 4096 measure_perf = False gflops = (N * M * K) * 2 / 1e9 def matmul(m, n, k, in_dtype, out_dtype, b_transposed): b_shape = (n, k) if b_transposed else (k, n) a = te.placeholder((m, k), name="A", dtype=in_dtype) b = te.placeholder(b_shape, name="B", dtype=in_dtype) k = te.reduce_axis((0, k), name="k") def maybe_cast(v): if in_dtype != out_dtype: return tvm.tir.Cast(out_dtype, v) return v def maybe_swap(i, j): if b_transposed: return j, i return i, j c = te.compute( (m, n), lambda i, j: te.sum(maybe_cast(a[i, k]) * maybe_cast(b[maybe_swap(k, j)]), axis=[k]), name="C", ) return (a, b, c) def run_test( k_inner, in_dtype, out_dtype, b_transposed, i_factors, j_factors, k_factors, index_map_A, index_map_B, index_map_C, ldmatrix_a_intrin, ldmatrix_b_intrin, mma_intrin, mma_fill_intrin, mma_store_intrin, ): sch = mma_schedule( te.create_prim_func(matmul(M, N, K, in_dtype, out_dtype, b_transposed)), k_inner, in_dtype, b_transposed, i_factors, j_factors, k_factors, index_map_A, index_map_B, index_map_C, ldmatrix_a_intrin, ldmatrix_b_intrin, mma_intrin, mma_fill_intrin, mma_store_intrin, ) f = tvm.build(sch.mod["main"], target="cuda", name="dense") dev = tvm.device("cuda", 0) if in_dtype == "float16": a_np = np.random.uniform(size=(M, K)).astype("float16") if b_transposed: b_np = np.random.uniform(size=(N, K)).astype("float16") c_np = np.dot(a_np.astype("float32"), b_np.astype("float32").transpose()).astype( out_dtype ) else: b_np = np.random.uniform(size=(K, N)).astype("float16") c_np = np.dot(a_np.astype("float32"), b_np.astype("float32")).astype(out_dtype) else: a_np = np.random.randint(-128, 128, (M, K)).astype("int8") if b_transposed: b_np = np.random.randint(-128, 128, (N, K)).astype("int8") c_np = np.dot(a_np.astype("float32"), b_np.astype("float32").transpose()).astype( "int32" ) else: b_np = np.random.randint(-128, 128, (K, N)).astype("int8") c_np = np.dot(a_np.astype("float32"), b_np.astype("float32")).astype("int32") a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros((M, N), dtype=out_dtype), dev) f(a, b, c) if out_dtype != "float16": # The numpy reference is computed with fp32 precision (otherwise too slow). # So there is non-trivial accuracy difference if TVM result is computed with fp16 accumulation. tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3) return lambda: f.time_evaluator(f.entry_name, dev, number=500)(a, b, c) @tvm.testing.requires_cuda_compute_version(8) def test_f16f16f32_m16n16k16(): def index_map(i, j): return ( i // 16, j // 16, *shared_16x16_to_ldmatrix_32x8_layout(i % 16, j % 16), ) k_inner = 16 in_dtype = "float16" out_dtype = "float32" i_factors, j_factors, k_factors = [4, 8, 2, 4, 1], [1, 64, 2, 1, 2], [128, 2, 1] timer = run_test( k_inner, in_dtype, out_dtype, False, # b_transposed i_factors, j_factors, k_factors, index_map, index_map, index_map, LDMATRIX_16x16_A_INTRIN, LDMATRIX_16x16_B_INTRIN, MMA_f16f16f32_INTRIN, MMA_fill_16x16_f32_INTRIN, MMA_store_16x16_f32_global_INTRIN, ) if measure_perf and timer: print("f16f16f32_m16n16k16: %f GFLOPS" % (gflops / (timer().mean))) timer = run_test( k_inner, in_dtype, out_dtype, True, # b_transposed i_factors, j_factors, k_factors, index_map, index_map, index_map, LDMATRIX_16x16_A_INTRIN, LDMATRIX_16x16_B_TRANS_INTRIN, MMA_f16f16f32_TRANS_INTRIN, MMA_fill_16x16_f32_INTRIN, MMA_store_16x16_f32_global_INTRIN, ) if measure_perf and timer: print("f16f16f32_m16n16k16_trans: %f GFLOPS" % (gflops / (timer().mean))) @tvm.testing.requires_cuda_compute_version(8) def test_f16f16f16_m16n16k16(): def index_map(i, j): return ( i // 16, j // 16, *shared_16x16_to_ldmatrix_32x8_layout(i % 16, j % 16), ) k_inner = 16 in_dtype = "float16" out_dtype = "float16" i_factors, j_factors, k_factors = [16, 2, 1, 4, 2], [16, 2, 2, 1, 4], [128, 2, 1] timer = run_test( k_inner, in_dtype, out_dtype, False, # b_transposed i_factors, j_factors, k_factors, index_map, index_map, index_map, LDMATRIX_16x16_A_INTRIN, LDMATRIX_16x16_B_INTRIN, MMA_f16f16f16_INTRIN, MMA_fill_16x16_f16_INTRIN, MMA_store_16x16_f16_global_INTRIN, ) if measure_perf and timer: print("f16f16f16_m16n16k16: %f GFLOPS" % (gflops / (timer().mean))) timer = run_test( k_inner, in_dtype, out_dtype, True, # b_transposed i_factors, j_factors, k_factors, index_map, index_map, index_map, LDMATRIX_16x16_A_INTRIN, LDMATRIX_16x16_B_TRANS_INTRIN, MMA_f16f16f16_TRANS_INTRIN, MMA_fill_16x16_f16_INTRIN, MMA_store_16x16_f16_global_INTRIN, ) if measure_perf and timer: print("f16f16f16_m16n16k16_trans: %f GFLOPS" % (gflops / (timer().mean))) @tvm.testing.requires_cuda_compute_version(8) def test_i8i8i32_m16n16k32(): def index_map_A(i, j): return ( i // 16, j // 32, *shared_16x32_to_ldmatrix_32x16_layout(i % 16, j % 32), ) def index_map_B(i, j): return ( i // 32, j // 16, *shared_32x16_to_ldmatrix_32x16_layout(i % 32, j % 16), ) def index_map_C(i, j): return ( i // 16, j // 16, *shared_16x16_to_ldmatrix_32x8_layout(i % 16, j % 16), ) k_inner = 32 in_dtype = "int8" out_dtype = "int32" i_factors, j_factors, k_factors = [1, 32, 1, 4, 2], [8, 4, 4, 2, 1], [32, 2, 2] timer = run_test( k_inner, in_dtype, out_dtype, False, # b_transposed i_factors, j_factors, k_factors, index_map_A, index_map_B, index_map_C, LDMATRIX_16x32_A_INTRIN, LDMATRIX_32x16_B_INTRIN, MMA_i8i8i32_INTRIN, MMA_fill_16x16_i32_INTRIN, MMA_store_16x16_i32_global_INTRIN, ) if measure_perf and timer: print("i8i8i32_m16n16k32: %f GOPS" % (gflops / (timer().mean))) timer = run_test( k_inner, in_dtype, out_dtype, True, # b_transposed i_factors, j_factors, k_factors, index_map_A, index_map_A, index_map_C, LDMATRIX_16x32_A_INTRIN, LDMATRIX_16x32_B_TRANS_INTRIN, MMA_i8i8i32_TRANS_INTRIN, MMA_fill_16x16_i32_INTRIN, MMA_store_16x16_i32_global_INTRIN, ) if measure_perf and timer: print("i8i8i32_m16n16k32_trans: %f GOPS" % (gflops / (timer().mean))) if __name__ == "__main__": tvm.testing.main()
9,362
26.377193
103
py
tvm
tvm-main/tests/python/unittest/test_tir_ptx_mma.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys import pytest import tvm from tvm.script import tir as T import numpy as np import tvm.testing @T.prim_func def gemm_mma_m8n8k4_row_col_fp64pf64fp64(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [8, 4], dtype="float64") B = T.match_buffer(b, [8, 4], dtype="float64") C = T.match_buffer(c, [8, 8], dtype="float64") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([1], "float64", scope="local") MultiB = T.decl_buffer([1], "float64", scope="local") Accum = T.decl_buffer([2], "float64", scope="local") for i in range(2): Accum[i] = T.float64(0) MultiA[0] = A[(tx % 32) // 4, (tx % 32) % 4] MultiB[0] = B[(tx % 32) // 4, (tx % 32) % 4] T.evaluate( T.ptx_mma( "m8n8k4", "row", "col", "fp64", "fp64", "fp64", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="float64", ) ) for mma_accum_c_id in range(2): C[(tx % 32) // 4, (tx % 32) % 4 * 2 + mma_accum_c_id] = Accum[mma_accum_c_id] @tvm.testing.requires_cuda_compute_version(8) def test_gemm_mma_m8n8k4_row_col_fp64pf64fp64(): sch = tvm.tir.Schedule(gemm_mma_m8n8k4_row_col_fp64pf64fp64) cuda_mod = tvm.build(sch.mod, target="cuda") A_np = np.random.uniform(-1, 1, [8, 4]).astype("float64") B_np = np.random.uniform(-1, 1, [8, 4]).astype("float64") C_np = np.zeros([8, 8]).astype("float64") ctx = tvm.cuda() A_tvm = tvm.nd.array(A_np, ctx) B_tvm = tvm.nd.array(B_np, ctx) C_tvm = tvm.nd.array(C_np, ctx) cuda_mod(A_tvm, B_tvm, C_tvm) golden = np.matmul(A_np.astype("float64"), B_np.astype("float64").T) C_numpy = C_tvm.numpy() tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3) @T.prim_func def gemm_mma_m8n8k4_row_row_fp16fp16fp16(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [16, 4], dtype="float16") B = T.match_buffer(b, [4, 16], dtype="float16") C = T.match_buffer(c, [16, 16], dtype="float16") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([4], "float16", scope="local") MultiB = T.decl_buffer([4], "float16", scope="local") Accum = T.decl_buffer([8], "float16", scope="local") for i in range(8): Accum[i] = T.float32(0) for mma_multi_a_col in T.vectorized(4): MultiA[mma_multi_a_col] = A[ ((tx % 32) % 4) + (4 * ((((tx % 32) // 16 + (tx % 32) % 16 // 4 * 2)) % 4)), mma_multi_a_col, ] for mma_multi_b_col in T.vectorized(4): MultiB[mma_multi_b_col] = B[ (tx % 32) % 4, mma_multi_b_col + (4 * ((tx % 32) // 8)), ] T.evaluate( T.ptx_mma( "m8n8k4", "row", "row", "fp16", "fp16", "fp16", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="float16", ) ) for mma_accum_c_id in range(8): C[ ((tx % 32) % 4) + (4 * ((((tx % 32) // 16 + (tx % 32) % 16 // 4 * 2)) % 4)), mma_accum_c_id % 4 + (4 * ((tx % 32) % 16 // 8)) + mma_accum_c_id // 4 * 8, ] = Accum[mma_accum_c_id] @tvm.testing.requires_cuda_compute_version(7) def test_gemm_mma_m8n8k4_row_row_fp16fp16fp16(): sch = tvm.tir.Schedule(gemm_mma_m8n8k4_row_row_fp16fp16fp16) cuda_mod = tvm.build(sch.mod, target="cuda") A_np = np.random.uniform(-1, 1, [16, 4]).astype("float16") B_np = np.random.uniform(-1, 1, [4, 16]).astype("float16") C_np = np.zeros([16, 16]).astype("float16") ctx = tvm.cuda() A_tvm = tvm.nd.array(A_np, ctx) B_tvm = tvm.nd.array(B_np, ctx) C_tvm = tvm.nd.array(C_np, ctx) cuda_mod(A_tvm, B_tvm, C_tvm) golden = np.matmul(A_np.astype("float16"), B_np.astype("float16")) C_numpy = C_tvm.numpy() tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3) @T.prim_func def gemm_mma_m8n8k4_row_row_fp16fp16fp32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [16, 4], dtype="float16") B = T.match_buffer(b, [4, 16], dtype="float16") C = T.match_buffer(c, [16, 16], dtype="float32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([4], "float16", scope="local") MultiB = T.decl_buffer([4], "float16", scope="local") Accum = T.decl_buffer([8], "float32", scope="local") for i in range(8): Accum[i] = T.float32(0) for mma_multi_a_col in T.vectorized(4): MultiA[mma_multi_a_col] = A[ ((tx % 32) % 4) + (4 * ((((tx % 32) // 16 + (tx % 32) % 16 // 4 * 2)) % 4)), mma_multi_a_col, ] for mma_multi_b_col in T.vectorized(4): MultiB[mma_multi_b_col] = B[ (tx % 32) % 4, mma_multi_b_col + (4 * ((tx % 32) // 8)), ] T.evaluate( T.ptx_mma( "m8n8k4", "row", "row", "fp16", "fp16", "fp32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="float32", ) ) for mma_accum_c_id in range(8): C[ ((tx % 32) % 2) + ((mma_accum_c_id // 2 % 2) * 2) + 4 * ((tx % 32) // 16) + ((tx % 32) % 16 // 4) % 2 * 8, (tx % 32) % 4 // 2 * 2 + (tx % 32) % 16 // 8 * 4 + mma_accum_c_id % 2 + mma_accum_c_id // 4 * 8, ] = Accum[mma_accum_c_id] @tvm.testing.requires_cuda_compute_version(7) def test_gemm_mma_m8n8k4_row_row_fp16fp16fp32(): sch = tvm.tir.Schedule(gemm_mma_m8n8k4_row_row_fp16fp16fp32) cuda_mod = tvm.build(sch.mod, target="cuda") A_np = np.random.uniform(-1, 1, [16, 4]).astype("float16") B_np = np.random.uniform(-1, 1, [4, 16]).astype("float16") C_np = np.zeros([16, 16]).astype("float32") ctx = tvm.cuda() A_tvm = tvm.nd.array(A_np, ctx) B_tvm = tvm.nd.array(B_np, ctx) C_tvm = tvm.nd.array(C_np, ctx) cuda_mod(A_tvm, B_tvm, C_tvm) golden = np.matmul(A_np.astype("float32"), B_np.astype("float32")) C_numpy = C_tvm.numpy() tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3) @T.prim_func def gemm_mma_m8n8k16_row_col_s8s8s32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [8, 16], dtype="int8") B = T.match_buffer(b, [8, 16], dtype="int8") C = T.match_buffer(c, [8, 8], dtype="int32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([4], "int8", scope="local") MultiB = T.decl_buffer([4], "int8", scope="local") Accum = T.decl_buffer([2], "int32", scope="local") for i in range(2): Accum[i] = T.int32(0) for mma_multi_a_col in T.vectorized(4): MultiA[mma_multi_a_col] = A[(tx % 32) // 4, mma_multi_a_col + (tx % 32) % 4 * 4] for mma_multi_b_col in T.vectorized(4): MultiB[mma_multi_b_col] = B[(tx % 32) // 4, mma_multi_b_col + (tx % 32) % 4 * 4] T.evaluate( T.ptx_mma( "m8n8k16", "row", "col", "int8", "int8", "int32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="int32", ) ) for mma_accum_c_id in range(2): C[(tx % 32) // 4, (tx % 32) % 4 * 2 + mma_accum_c_id] = Accum[mma_accum_c_id] # This test uses mma instructions that are not available on NVCC 10.1. # Failure occurs during the external call to nvcc, when attempting to # generate the .fatbin file. @tvm.testing.requires_nvcc_version(11) @tvm.testing.requires_cuda_compute_version(7, 5) def test_gemm_mma_m8n8k16_row_col_s8s8s32(): sch = tvm.tir.Schedule(gemm_mma_m8n8k16_row_col_s8s8s32) cuda_mod = tvm.build(sch.mod, target="cuda") A_np = np.random.uniform(-10, 10, [8, 16]).astype("int8") B_np = np.random.uniform(-10, 10, [8, 16]).astype("int8") C_np = np.zeros([8, 8]).astype("int32") ctx = tvm.cuda() A_tvm = tvm.nd.array(A_np, ctx) B_tvm = tvm.nd.array(B_np, ctx) C_tvm = tvm.nd.array(C_np, ctx) cuda_mod(A_tvm, B_tvm, C_tvm) golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T) C_numpy = C_tvm.numpy() tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3) @T.prim_func def gemm_mma_m8n8k16_row_col_s8u8s32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [8, 16], dtype="int8") B = T.match_buffer(b, [8, 16], dtype="uint8") C = T.match_buffer(c, [8, 8], dtype="int32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([4], "int8", scope="local") MultiB = T.decl_buffer([4], "uint8", scope="local") Accum = T.decl_buffer([2], "int32", scope="local") for i in range(2): Accum[i] = T.int32(0) for mma_multi_a_col in T.vectorized(4): MultiA[mma_multi_a_col] = A[(tx % 32) // 4, mma_multi_a_col + (tx % 32) % 4 * 4] for mma_multi_b_col in T.vectorized(4): MultiB[mma_multi_b_col] = B[(tx % 32) // 4, mma_multi_b_col + (tx % 32) % 4 * 4] T.evaluate( T.ptx_mma( "m8n8k16", "row", "col", "int8", "uint8", "int32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="int32", ) ) for mma_accum_c_id in range(2): C[(tx % 32) // 4, (tx % 32) % 4 * 2 + mma_accum_c_id] = Accum[mma_accum_c_id] # This test uses mma instructions that are not available on NVCC 10.1. # Failure occurs during the external call to nvcc, when attempting to # generate the .fatbin file. @tvm.testing.requires_nvcc_version(11) @tvm.testing.requires_cuda_compute_version(7, 5) def test_gemm_mma_m8n8k16_row_col_s8u8s32(): sch = tvm.tir.Schedule(gemm_mma_m8n8k16_row_col_s8u8s32) cuda_mod = tvm.build(sch.mod, target="cuda") A_np = np.random.uniform(-10, 10, [8, 16]).astype("int8") B_np = np.random.uniform(-10, 10, [8, 16]).astype("uint8") C_np = np.zeros([8, 8]).astype("int32") ctx = tvm.cuda() A_tvm = tvm.nd.array(A_np, ctx) B_tvm = tvm.nd.array(B_np, ctx) C_tvm = tvm.nd.array(C_np, ctx) cuda_mod(A_tvm, B_tvm, C_tvm) golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T) C_numpy = C_tvm.numpy() tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3) @T.prim_func def gemm_mma_m8n8k32_row_col_s4s4s32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [8, 32], dtype="int4") B = T.match_buffer(b, [8, 32], dtype="int4") C = T.match_buffer(c, [8, 8], dtype="int32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([8], "int4", scope="local") MultiB = T.decl_buffer([8], "int4", scope="local") Accum = T.decl_buffer([2], "int32", scope="local") for i in range(2): Accum[i] = T.int32(0) for mma_multi_a_col in T.vectorized(8): MultiA[mma_multi_a_col] = A[(tx % 32) // 4, mma_multi_a_col + (tx % 32) % 4 * 8] for mma_multi_b_col in T.vectorized(8): MultiB[mma_multi_b_col] = B[(tx % 32) // 4, mma_multi_b_col + (tx % 32) % 4 * 8] T.evaluate( T.ptx_mma( "m8n8k32", "row", "col", "int4", "int4", "int32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="int32", ) ) for mma_accum_c_id in range(2): C[(tx % 32) // 4, (tx % 32) % 4 * 2 + mma_accum_c_id] = Accum[mma_accum_c_id] # This test uses mma instructions that are not available on NVCC 10.1. # Failure occurs during the external call to nvcc, when attempting to # generate the .fatbin file. @tvm.testing.requires_nvcc_version(11) @tvm.testing.requires_cuda_compute_version(7, 5) def test_gemm_mma_m8n8k32_row_col_s4s4s32(): sch = tvm.tir.Schedule(gemm_mma_m8n8k32_row_col_s4s4s32) cuda_mod = tvm.build(sch.mod, target="cuda") ctx = tvm.cuda() A_tvm = tvm.nd.empty([8, 32], "int4", ctx) B_tvm = tvm.nd.empty([8, 32], "int4", ctx) C_tvm = tvm.nd.empty([8, 8], "int32", ctx) cuda_mod(A_tvm, B_tvm, C_tvm) # Currently the correctness is not checked. # TODO: add correctness checking here. @T.prim_func def gemm_mma_m8n8k32_row_col_s4u4s32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [8, 32], dtype="int4") B = T.match_buffer(b, [8, 32], dtype="uint4") C = T.match_buffer(c, [8, 8], dtype="int32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([8], "int4", scope="local") MultiB = T.decl_buffer([8], "uint4", scope="local") Accum = T.decl_buffer([2], "int32", scope="local") for i in range(2): Accum[i] = T.int32(0) for mma_multi_a_col in T.vectorized(8): MultiA[mma_multi_a_col] = A[(tx % 32) // 4, mma_multi_a_col + (tx % 32) % 4 * 8] for mma_multi_b_col in T.vectorized(8): MultiB[mma_multi_b_col] = B[(tx % 32) // 4, mma_multi_b_col + (tx % 32) % 4 * 8] T.evaluate( T.ptx_mma( "m8n8k32", "row", "col", "int4", "uint4", "int32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="int32", ) ) for mma_accum_c_id in range(2): C[(tx % 32) // 4, (tx % 32) % 4 * 2 + mma_accum_c_id] = Accum[mma_accum_c_id] # This test uses mma instructions that are not available on NVCC 10.1. # Failure occurs during the external call to nvcc, when attempting to # generate the .fatbin file. @tvm.testing.requires_nvcc_version(11) @tvm.testing.requires_cuda_compute_version(7, 5) def test_gemm_mma_m8n8k32_row_col_s4u4s32(): sch = tvm.tir.Schedule(gemm_mma_m8n8k32_row_col_s4u4s32) cuda_mod = tvm.build(sch.mod, target="cuda") ctx = tvm.cuda() A_tvm = tvm.nd.empty([8, 32], "int4", ctx) B_tvm = tvm.nd.empty([8, 32], "uint4", ctx) C_tvm = tvm.nd.empty([8, 8], "int32", ctx) cuda_mod(A_tvm, B_tvm, C_tvm) # Currently the correctness is not checked. # TODO: add correctness checking here. @T.prim_func def gemm_mma_m16n8k8_row_col_fp16fp16fp32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [16, 8], dtype="float16") B = T.match_buffer(b, [8, 8], dtype="float16") C = T.match_buffer(c, [16, 8], dtype="float32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([4], "float16", scope="local") MultiB = T.decl_buffer([2], "float16", scope="local") Accum = T.decl_buffer([4], "float32", scope="local") for i in range(4): Accum[i] = T.float32(0) for mma_multi_a_col in T.vectorized(4): MultiA[mma_multi_a_col] = A[ (tx % 32) // 4 + mma_multi_a_col // 2 * 8, (tx % 32) % 4 * 2 + mma_multi_a_col % 2 ] for mma_multi_b_col in T.vectorized(4): MultiB[mma_multi_b_col] = B[ (tx % 32) // 4 + mma_multi_b_col // 2 * 8, (tx % 32) % 4 * 2 + mma_multi_b_col % 2 ] T.evaluate( T.ptx_mma( "m16n8k8", "row", "col", "fp16", "fp16", "fp32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="float32", ) ) for mma_accum_c_id in range(4): C[(tx % 32) // 4 + mma_accum_c_id // 2 * 8, (tx % 32) % 4 * 2 + mma_accum_c_id % 2] = Accum[ mma_accum_c_id ] @tvm.testing.requires_cuda_compute_version(8) def test_gemm_mma_m16n8k8_row_col_fp16fp16fp32(): sch = tvm.tir.Schedule(gemm_mma_m16n8k8_row_col_fp16fp16fp32) cuda_mod = tvm.build(sch.mod, target="cuda") A_np = np.random.uniform(-1, 1, [16, 8]).astype("float16") B_np = np.random.uniform(-1, 1, [8, 8]).astype("float16") C_np = np.zeros([16, 8]).astype("float32") ctx = tvm.cuda() A_tvm = tvm.nd.array(A_np, ctx) B_tvm = tvm.nd.array(B_np, ctx) C_tvm = tvm.nd.array(C_np, ctx) cuda_mod(A_tvm, B_tvm, C_tvm) golden = np.matmul(A_np.astype("float32"), B_np.astype("float32").T) C_numpy = C_tvm.numpy() tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3) @T.prim_func def gemm_mma_m16n8k16_row_col_fp16fp16fp16(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [16, 16], dtype="float16") B = T.match_buffer(b, [8, 16], dtype="float16") C = T.match_buffer(c, [16, 8], dtype="float16") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([8], "float16", scope="local") MultiB = T.decl_buffer([4], "float16", scope="local") Accum = T.decl_buffer([4], "float16", scope="local") for i in range(4): Accum[i] = T.float32(0) for mma_multi_a_col in range(8): MultiA[mma_multi_a_col] = A[ (tx % 32) // 4 + mma_multi_a_col % 4 // 2 * 8, (tx % 32) % 4 * 2 + mma_multi_a_col % 2 + mma_multi_a_col // 4 * 8, ] for mma_multi_b_col in T.vectorized(4): MultiB[mma_multi_b_col] = B[ (tx % 32) // 4, (tx % 32) % 4 * 2 + mma_multi_b_col % 2 + mma_multi_b_col // 2 * 8, ] T.evaluate( T.ptx_mma( "m16n8k16", "row", "col", "fp16", "fp16", "fp16", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="float16", ) ) for mma_accum_c_id in range(4): C[ (tx % 32) // 4 + mma_accum_c_id // 2 * 8, (tx % 32) % 4 * 2 + mma_accum_c_id % 2, ] = Accum[mma_accum_c_id] @tvm.testing.requires_cuda_compute_version(8) def test_gemm_mma_m16n8k16_row_col_fp16fp16fp16(): sch = tvm.tir.Schedule(gemm_mma_m16n8k16_row_col_fp16fp16fp16) cuda_mod = tvm.build(sch.mod, target="cuda") A_np = np.random.uniform(-1, 1, [16, 16]).astype("float16") B_np = np.random.uniform(-1, 1, [8, 16]).astype("float16") C_np = np.zeros([16, 8]).astype("float16") ctx = tvm.cuda() A_tvm = tvm.nd.array(A_np, ctx) B_tvm = tvm.nd.array(B_np, ctx) C_tvm = tvm.nd.array(C_np, ctx) cuda_mod(A_tvm, B_tvm, C_tvm) golden = np.matmul(A_np.astype("float16"), B_np.astype("float16").T) C_numpy = C_tvm.numpy() tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3) @T.prim_func def gemm_mma_m16n8k16_row_col_fp16fp16fp32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [16, 16], dtype="float16") B = T.match_buffer(b, [8, 16], dtype="float16") C = T.match_buffer(c, [16, 8], dtype="float32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([8], "float16", scope="local") MultiB = T.decl_buffer([4], "float16", scope="local") Accum = T.decl_buffer([4], "float32", scope="local") for i in range(4): Accum[i] = T.float32(0) for mma_multi_a_col in range(8): MultiA[mma_multi_a_col] = A[ (tx % 32) // 4 + mma_multi_a_col % 4 // 2 * 8, (tx % 32) % 4 * 2 + mma_multi_a_col % 2 + mma_multi_a_col // 4 * 8, ] for mma_multi_b_col in T.vectorized(4): MultiB[mma_multi_b_col] = B[ (tx % 32) // 4, (tx % 32) % 4 * 2 + mma_multi_b_col % 2 + mma_multi_b_col // 2 * 8, ] T.evaluate( T.ptx_mma( "m16n8k16", "row", "col", "fp16", "fp16", "fp32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="float32", ) ) for mma_accum_c_id in range(4): C[ (tx % 32) // 4 + mma_accum_c_id // 2 * 8, (tx % 32) % 4 * 2 + mma_accum_c_id % 2, ] = Accum[mma_accum_c_id] @tvm.testing.requires_cuda_compute_version(8) def test_gemm_mma_m16n8k16_row_col_fp16fp16fp32(): sch = tvm.tir.Schedule(gemm_mma_m16n8k16_row_col_fp16fp16fp32) cuda_mod = tvm.build(sch.mod, target="cuda") A_np = np.random.uniform(-1, 1, [16, 16]).astype("float16") B_np = np.random.uniform(-1, 1, [8, 16]).astype("float16") C_np = np.zeros([16, 8]).astype("float32") ctx = tvm.cuda() A_tvm = tvm.nd.array(A_np, ctx) B_tvm = tvm.nd.array(B_np, ctx) C_tvm = tvm.nd.array(C_np, ctx) cuda_mod(A_tvm, B_tvm, C_tvm) golden = np.matmul(A_np.astype("float32"), B_np.astype("float32").T) C_numpy = C_tvm.numpy() tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3) @T.prim_func def gemm_mma_m16n8k16_row_col_s8s8s32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [16, 16], dtype="int8") B = T.match_buffer(b, [8, 16], dtype="int8") C = T.match_buffer(c, [16, 8], dtype="int32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([8], "int8", scope="local") MultiB = T.decl_buffer([4], "int8", scope="local") Accum = T.decl_buffer([4], "int32", scope="local") for i in range(4): Accum[i] = T.int32(0) for mma_multi_a_col in range(8): MultiA[mma_multi_a_col] = A[ (tx % 32) // 4 + mma_multi_a_col // 4 * 8, (tx % 32) % 4 * 4 + mma_multi_a_col % 4, ] for mma_multi_b_col in T.vectorized(4): MultiB[mma_multi_b_col] = B[ (tx % 32) // 4, (tx % 32) % 4 * 4 + mma_multi_b_col, ] T.evaluate( T.ptx_mma( "m16n8k16", "row", "col", "int8", "int8", "int32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="int32", ) ) for mma_accum_c_id in range(4): C[ (tx % 32) // 4 + mma_accum_c_id // 2 * 8, (tx % 32) % 4 * 2 + mma_accum_c_id % 2, ] = Accum[mma_accum_c_id] @tvm.testing.requires_cuda_compute_version(8) def test_gemm_mma_m16n8k16_row_col_s8s8s32(): sch = tvm.tir.Schedule(gemm_mma_m16n8k16_row_col_s8s8s32) cuda_mod = tvm.build(sch.mod, target="cuda") A_np = np.random.uniform(-10, 10, [16, 16]).astype("int8") B_np = np.random.uniform(-10, 10, [8, 16]).astype("int8") C_np = np.zeros([16, 8]).astype("int32") ctx = tvm.cuda() A_tvm = tvm.nd.array(A_np, ctx) B_tvm = tvm.nd.array(B_np, ctx) C_tvm = tvm.nd.array(C_np, ctx) cuda_mod(A_tvm, B_tvm, C_tvm) golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T) C_numpy = C_tvm.numpy() tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3) @T.prim_func def gemm_mma_m16n8k16_row_col_s8u8s32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [16, 16], dtype="int8") B = T.match_buffer(b, [8, 16], dtype="uint8") C = T.match_buffer(c, [16, 8], dtype="int32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([8], "int8", scope="local") MultiB = T.decl_buffer([4], "uint8", scope="local") Accum = T.decl_buffer([4], "int32", scope="local") for i in range(4): Accum[i] = T.int32(0) for mma_multi_a_col in range(8): MultiA[mma_multi_a_col] = A[ (tx % 32) // 4 + mma_multi_a_col // 4 * 8, (tx % 32) % 4 * 4 + mma_multi_a_col % 4, ] for mma_multi_b_col in T.vectorized(4): MultiB[mma_multi_b_col] = B[ (tx % 32) // 4, (tx % 32) % 4 * 4 + mma_multi_b_col, ] T.evaluate( T.ptx_mma( "m16n8k16", "row", "col", "int8", "uint8", "int32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="int32", ) ) for mma_accum_c_id in range(4): C[ (tx % 32) // 4 + mma_accum_c_id // 2 * 8, (tx % 32) % 4 * 2 + mma_accum_c_id % 2, ] = Accum[mma_accum_c_id] @tvm.testing.requires_cuda_compute_version(8) def test_gemm_mma_m16n8k16_row_col_s8u8s32(): sch = tvm.tir.Schedule(gemm_mma_m16n8k16_row_col_s8u8s32) cuda_mod = tvm.build(sch.mod, target="cuda") A_np = np.random.uniform(-10, 10, [16, 16]).astype("int8") B_np = np.random.uniform(-10, 10, [8, 16]).astype("uint8") C_np = np.zeros([16, 8]).astype("int32") ctx = tvm.cuda() A_tvm = tvm.nd.array(A_np, ctx) B_tvm = tvm.nd.array(B_np, ctx) C_tvm = tvm.nd.array(C_np, ctx) cuda_mod(A_tvm, B_tvm, C_tvm) golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T) C_numpy = C_tvm.numpy() tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3) @T.prim_func def gemm_mma_m16n8k32_row_col_s8s8s32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [16, 32], dtype="int8") B = T.match_buffer(b, [8, 32], dtype="int8") C = T.match_buffer(c, [16, 8], dtype="int32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([16], "int8", scope="local") MultiB = T.decl_buffer([8], "int8", scope="local") Accum = T.decl_buffer([4], "int32", scope="local") for i in range(4): Accum[i] = T.int32(0) for mma_multi_a_col in range(16): MultiA[mma_multi_a_col] = A[ (tx % 32) // 4 + mma_multi_a_col % 8 // 4 * 8, (tx % 32) % 4 * 4 + mma_multi_a_col % 4 + mma_multi_a_col // 8 * 16, ] for mma_multi_b_col in range(8): MultiB[mma_multi_b_col] = B[ (tx % 32) // 4, (tx % 32) % 4 * 4 + mma_multi_b_col % 4 + mma_multi_b_col // 4 * 16, ] T.evaluate( T.ptx_mma( "m16n8k32", "row", "col", "int8", "int8", "int32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="int32", ) ) for mma_accum_c_id in range(4): C[ (tx % 32) // 4 + mma_accum_c_id // 2 * 8, (tx % 32) % 4 * 2 + mma_accum_c_id % 2, ] = Accum[mma_accum_c_id] @tvm.testing.requires_cuda_compute_version(8) def test_gemm_mma_m16n8k32_row_col_s8s8s32(): sch = tvm.tir.Schedule(gemm_mma_m16n8k32_row_col_s8s8s32) cuda_mod = tvm.build(sch.mod, target="cuda") A_np = np.random.uniform(-10, 10, [16, 32]).astype("int8") B_np = np.random.uniform(-10, 10, [8, 32]).astype("int8") C_np = np.zeros([16, 8]).astype("int32") ctx = tvm.cuda() A_tvm = tvm.nd.array(A_np, ctx) B_tvm = tvm.nd.array(B_np, ctx) C_tvm = tvm.nd.array(C_np, ctx) cuda_mod(A_tvm, B_tvm, C_tvm) golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T) C_numpy = C_tvm.numpy() tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3) @T.prim_func def gemm_mma_m16n8k32_row_col_s8u8s32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [16, 32], dtype="int8") B = T.match_buffer(b, [8, 32], dtype="uint8") C = T.match_buffer(c, [16, 8], dtype="int32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([16], "int8", scope="local") MultiB = T.decl_buffer([8], "uint8", scope="local") Accum = T.decl_buffer([4], "int32", scope="local") for i in range(4): Accum[i] = T.int32(0) for mma_multi_a_col in range(16): MultiA[mma_multi_a_col] = A[ (tx % 32) // 4 + mma_multi_a_col % 8 // 4 * 8, (tx % 32) % 4 * 4 + mma_multi_a_col % 4 + mma_multi_a_col // 8 * 16, ] for mma_multi_b_col in range(8): MultiB[mma_multi_b_col] = B[ (tx % 32) // 4, (tx % 32) % 4 * 4 + mma_multi_b_col % 4 + mma_multi_b_col // 4 * 16, ] T.evaluate( T.ptx_mma( "m16n8k32", "row", "col", "int8", "uint8", "int32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="int32", ) ) for mma_accum_c_id in range(4): C[ (tx % 32) // 4 + mma_accum_c_id // 2 * 8, (tx % 32) % 4 * 2 + mma_accum_c_id % 2, ] = Accum[mma_accum_c_id] @tvm.testing.requires_cuda_compute_version(8) def test_gemm_mma_m16n8k32_row_col_s8u8s32(): sch = tvm.tir.Schedule(gemm_mma_m16n8k32_row_col_s8u8s32) cuda_mod = tvm.build(sch.mod, target="cuda") A_np = np.random.uniform(-10, 10, [16, 32]).astype("int8") B_np = np.random.uniform(-10, 10, [8, 32]).astype("uint8") C_np = np.zeros([16, 8]).astype("int32") ctx = tvm.cuda() A_tvm = tvm.nd.array(A_np, ctx) B_tvm = tvm.nd.array(B_np, ctx) C_tvm = tvm.nd.array(C_np, ctx) cuda_mod(A_tvm, B_tvm, C_tvm) golden = np.matmul(A_np.astype("int32"), B_np.astype("int32").T) C_numpy = C_tvm.numpy() tvm.testing.assert_allclose(golden, C_numpy, atol=1e-3, rtol=1e-3) @T.prim_func def gemm_mma_m16n8k64_row_col_s4s4s32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [16, 64], dtype="int4") B = T.match_buffer(b, [8, 64], dtype="int4") C = T.match_buffer(c, [16, 8], dtype="int32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([32], "int4", scope="local") MultiB = T.decl_buffer([16], "int4", scope="local") Accum = T.decl_buffer([4], "int32", scope="local") for i in range(4): Accum[i] = T.int32(0) for mma_multi_a_col in range(32): MultiA[mma_multi_a_col] = A[ (tx % 32) // 4 + mma_multi_a_col % 16 // 8 * 8, (tx % 32) % 4 * 8 + mma_multi_a_col % 8 + mma_multi_a_col // 16 * 32, ] for mma_multi_b_col in range(16): MultiB[mma_multi_b_col] = B[ (tx % 32) // 4, (tx % 32) % 4 * 8 + mma_multi_b_col % 8 + mma_multi_b_col // 8 * 32, ] T.evaluate( T.ptx_mma( "m8n8k32", "row", "col", "int4", "int4", "int32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="int32", ) ) for mma_accum_c_id in range(4): C[ (tx % 32) // 4 + mma_accum_c_id // 2 * 8, (tx % 32) % 4 * 2 + mma_accum_c_id % 2, ] = Accum[mma_accum_c_id] @tvm.testing.requires_cuda_compute_version(8) def test_gemm_mma_m16n8k64_row_col_s4s4s32(): sch = tvm.tir.Schedule(gemm_mma_m16n8k64_row_col_s4s4s32) cuda_mod = tvm.build(sch.mod, target="cuda") ctx = tvm.cuda() A_tvm = tvm.nd.empty([16, 64], "int4", ctx) B_tvm = tvm.nd.empty([8, 64], "int4", ctx) C_tvm = tvm.nd.empty([16, 8], "int32", ctx) cuda_mod(A_tvm, B_tvm, C_tvm) # Currently the correctness is not checked. # TODO: add correctness checking here. @T.prim_func def gemm_mma_m16n8k64_row_col_s4u4s32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [16, 64], dtype="int4") B = T.match_buffer(b, [8, 64], dtype="uint4") C = T.match_buffer(c, [16, 8], dtype="int32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([32], "int4", scope="local") MultiB = T.decl_buffer([16], "uint4", scope="local") Accum = T.decl_buffer([4], "int32", scope="local") for i in range(4): Accum[i] = T.int32(0) for mma_multi_a_col in range(32): MultiA[mma_multi_a_col] = A[ (tx % 32) // 4 + mma_multi_a_col % 16 // 8 * 8, (tx % 32) % 4 * 8 + mma_multi_a_col % 8 + mma_multi_a_col // 16 * 32, ] for mma_multi_b_col in range(16): MultiB[mma_multi_b_col] = B[ (tx % 32) // 4, (tx % 32) % 4 * 8 + mma_multi_b_col % 8 + mma_multi_b_col // 8 * 32, ] T.evaluate( T.ptx_mma( "m8n8k32", "row", "col", "int4", "uint4", "int32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, dtype="int32", ) ) for mma_accum_c_id in range(4): C[ (tx % 32) // 4 + mma_accum_c_id // 2 * 8, (tx % 32) % 4 * 2 + mma_accum_c_id % 2, ] = Accum[mma_accum_c_id] @tvm.testing.requires_cuda_compute_version(8) def test_gemm_mma_m16n8k64_row_col_s4u4s32(): sch = tvm.tir.Schedule(gemm_mma_m16n8k64_row_col_s4u4s32) cuda_mod = tvm.build(sch.mod, target="cuda") ctx = tvm.cuda() A_tvm = tvm.nd.empty([16, 64], "int4", ctx) B_tvm = tvm.nd.empty([8, 64], "uint4", ctx) C_tvm = tvm.nd.empty([16, 8], "int32", ctx) cuda_mod(A_tvm, B_tvm, C_tvm) # Currently the correctness is not checked. # TODO: add correctness checking here. @T.prim_func def gemm_mma_m16n8k256_row_col_b1b1s32(a: T.handle, b: T.handle, c: T.handle): T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) A = T.match_buffer(a, [16, 256], dtype="int1") B = T.match_buffer(b, [8, 256], dtype="int1") C = T.match_buffer(c, [16, 8], dtype="int32") brow = T.env_thread("blockIdx.y") bcol = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(brow, 1) T.launch_thread(bcol, 1) T.launch_thread(tx, 32) MultiA = T.decl_buffer([128], "int1", scope="local") MultiB = T.decl_buffer([64], "int1", scope="local") Accum = T.decl_buffer([4], "int32", scope="local") for i in range(4): Accum[i] = T.int32(0) for mma_multi_a_col in range(128): MultiA[mma_multi_a_col] = A[ (tx % 32) // 4 + mma_multi_a_col % 64 // 32 * 8, (tx % 32) % 4 * 32 + mma_multi_a_col % 32 + mma_multi_a_col // 64 * 128, ] for mma_multi_b_col in range(16): MultiB[mma_multi_b_col] = B[ (tx % 32) // 4, (tx % 32) % 4 * 32 + mma_multi_b_col % 32 + mma_multi_b_col // 32 * 128, ] T.evaluate( T.ptx_mma( "m16n8k256", "row", "col", "int1", "int1", "int32", MultiA.data, 0, MultiB.data, 0, Accum.data, 0, False, "xor", dtype="int32", ) ) for mma_accum_c_id in range(4): C[ (tx % 32) // 4 + mma_accum_c_id // 2 * 8, (tx % 32) % 4 * 2 + mma_accum_c_id % 2, ] = Accum[mma_accum_c_id] @tvm.testing.requires_cuda_compute_version(8) def test_gemm_mma_m16n8k256_row_col_b1b1s32(): sch = tvm.tir.Schedule(gemm_mma_m16n8k256_row_col_b1b1s32) cuda_mod = tvm.build(sch.mod, target="cuda") ctx = tvm.cuda() A_tvm = tvm.nd.empty([16, 256], "int1", ctx) B_tvm = tvm.nd.empty([8, 256], "int1", ctx) C_tvm = tvm.nd.empty([16, 8], "int32", ctx) cuda_mod(A_tvm, B_tvm, C_tvm) # Currently the correctness is not checked. # TODO: add correctness checking here. if __name__ == "__main__": tvm.testing.main()
40,397
31.189641
100
py
tvm
tvm-main/tests/python/unittest/test_tvmscript_type.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring,invalid-name,pointless-string-statement from tvm.script import tir as T """ This prim func include necessary buffer types that need to be checked e.g. reads/writes, match_buffer/alloc_buffer, serial/block etc. """ @T.prim_func def element_wise_storage_align(a: T.handle, c: T.handle) -> None: C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1) A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1) # body with T.block("root"): T.reads([]) T.writes([]) B = T.alloc_buffer([128, 128], elem_offset=0, align=64, offset_factor=1) for i0 in T.serial(0, 128): for ax1 in T.serial(0, 128): with T.block("B"): vi = T.axis.S(128, i0) vj = T.axis.S(128, ax1) T.reads([A[vi, vj]]) T.writes([B[vi, vj]]) T.block_attr({"buffer_dim_align": [[0, 0, 128, 127]]}) B[vi, vj] = A[vi, vj] * T.float32(2) for i1 in T.serial(0, 128): with T.block("C"): vi_1, vj_1 = T.axis.remap("SS", [i0, i1]) T.reads([B[vi_1, vj_1]]) T.writes([C[vi_1, vj_1]]) C[vi_1, vj_1] = B[vi_1, vj_1] + T.float32(1) """ This prim func include necessary thread types that need to be checked e.g. env_thread, launch_thread, thread_binding etc. """ @T.prim_func def element_wise_env_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None: j1_0 = T.env_thread("threadIdx.x") j0_0 = T.env_thread("threadIdx.x") i = T.env_thread("blockIdx.x") A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) T.launch_thread(i, 128) T.launch_thread(j0_0, 4) T.launch_thread(j1_0, 4) for blockIdx_x in T.thread_binding(0, 128, "blockIdx.x"): for threadIdx_x in T.thread_binding(0, 4, "threadIdx.x"): for j0_1 in T.serial(0, 32): with T.block(""): B[blockIdx_x, threadIdx_x * 32 + j0_1] = ( A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0 ) for j1_1 in T.serial(0, 32): with T.block(""): C[blockIdx_x, threadIdx_x * 32 + j1_1] = ( B[blockIdx_x, threadIdx_x * 32 + j1_1] + 1.0 ) """ This test case is added to test T.grid """ @T.prim_func def loop_split(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128], dtype="float32") B = T.match_buffer(b, [128], dtype="float32") for i, ko in T.grid(128, 4): for ki in T.thread_binding(0, 32, thread="threadIdx.x"): with T.block("B"): vi = T.axis.S(128, i) vk = T.axis.R(128, ko * 32 + ki) T.reads([B[vi], A[vi, vk]]) T.writes([B[vi]]) with T.init(): B[vi] = T.float32(0) B[vi] = B[vi] + A[vi, vk] """ This test case is added to test T.comm_reducer, T.reinterpret, T.tvm_thread_allreduce """ @T.prim_func def lowered_loop_split(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128], dtype="float32") B = T.match_buffer(b, [128], dtype="float32") reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local") normal_reduce_temp0 = T.alloc_buffer([1], dtype="float32", strides=[1], scope="local") for i in T.serial(0, 128): for ki in T.thread_binding(0, 32, thread="threadIdx.x"): normal_reduce_temp0[0] = T.float32(0) for ko in T.serial(0, 4): with T.block("B_normal_reduction"): vi = T.axis.S(128, i) vk = T.axis.R(128, ko * 32 + ki) T.reads([A[vi, vk], normal_reduce_temp0[0]]) T.writes([normal_reduce_temp0[0]]) normal_reduce_temp0[0] = normal_reduce_temp0[0] + A[vi, vk] with T.block("B_cross_thread_reduction"): T.reads([normal_reduce_temp0[0]]) T.writes([reduce_temp0[0]]) T.attr( T.comm_reducer(lambda x, y: x + y, [T.float32(0)]), "reduce_scope", T.reinterpret(T.uint64(0), dtype="handle"), ) T.evaluate( T.tvm_thread_allreduce( T.uint32(1), normal_reduce_temp0[0], True, reduce_temp0.data, ki, dtype="handle", ) ) with T.block("B_write_back"): vi = T.axis.S(128, i) T.reads([reduce_temp0[0]]) T.writes([B[vi]]) B[vi] = reduce_temp0[0] """ This test case is added to test T.Buffer with slice as argument and T.exp """ @T.prim_func def different_access_indices(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128, 128], dtype="float32") B = T.match_buffer(b, [128, 128], dtype="float32") for i, j in T.grid(128, 128): for k in T.thread_binding(0, 128, thread="threadIdx.x"): with T.block("B"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) T.reads([B[vi, vj], A[vi, vj, vk]]) T.writes( [ B[ T.min(vj, vi) : T.min(vj, vi) # type: ignore[misc] + (T.max(vj, vi) + 1 - T.min(vj, vi)), T.min(vi, vj) : T.min(vi, vj) # type: ignore[misc] + (T.max(vi, vj) + 1 - T.min(vi, vj)), ] ] ) with T.init(): B[vj, vi] = T.exp(B[vj, vi], dtype="float32") B[vi, vj] = B[vi, vj] + A[vi, vj, vk] # Not running any test as we only want to type-check here if __name__ == "__main__": pass
7,008
37.300546
109
py
tvm
tvm-main/tests/python/unittest/test_te_schedule_postproc_rewrite_for_tensor_core.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # 'License'); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te from tvm import topi import numpy as np import tvm.testing def tensor_core_matmul(warp_tile_m=16, m=64, n=32, l=96): A = te.placeholder((n, l), name="A", dtype="float16") B = te.placeholder((l, m), name="B", dtype="float16") k = te.reduce_axis((0, l), name="k") C = te.compute( (n, m), lambda i, j: te.sum(A[i, k].astype("float32") * B[k, j].astype("float32"), axis=k) ) s = te.create_schedule(C.op) y, x = s[C].op.axis k = s[C].op.reduce_axis[0] AA = s.cache_read(A, "shared", [C]) AL = s.cache_read(AA, "local", [C]) BB = s.cache_read(B, "shared", [C]) BL = s.cache_read(BB, "local", [C]) CL = s.cache_write(C, "local") bx = 4 by = 32 step_k = 8 v = 4 TX = 8 TY = 1 tile_x = bx * TX tile_y = by * TY WX = min(warp_tile_m, tile_x) tile_k = 16 vthread = 1 yo, ty = s[C].split(y, tile_y * vthread) vy, ty = s[C].split(ty, tile_y) ty, yi = s[C].split(ty, TY) xo, xi = s[C].split(x, tile_x) tz, xi = s[C].split(xi, WX) tx, xi = s[C].split(xi, TX) ko, ki = s[CL].split(k, step_k * tile_k) kl, ki = s[CL].split(ki, tile_k) s[C].reorder(yo, xo, tz, ty, tx, yi, xi) s[C].bind(yo, te.thread_axis("blockIdx.y")) s[C].bind(xo, te.thread_axis("blockIdx.x")) s[C].bind(ty, te.thread_axis("threadIdx.y")) s[C].bind(tz, te.thread_axis("threadIdx.z")) s[C].bind(tx, te.thread_axis("threadIdx.x")) s[C].bind(vy, te.thread_axis((0, vthread), "vthread", name="vy")) s[CL].compute_at(s[C], tx) yo, xo = CL.op.axis s[CL].reorder(ko, kl, ki, yo, xo) s[AA].compute_at(s[CL], ko) xo, xi = s[AA].split(s[AA].op.axis[1], factor=bx * v) tz, tx = s[AA].split(xi, factor=(WX // TX) * v) tx, vec = s[AA].split(tx, factor=v) fused = s[AA].fuse(s[AA].op.axis[0], xo) _, ty = s[AA].split(fused, factor=by) s[AA].bind(ty, te.thread_axis("threadIdx.y")) s[AA].bind(tz, te.thread_axis("threadIdx.z")) s[AA].bind(tx, te.thread_axis("threadIdx.x")) s[AA].vectorize(vec) s[BB].compute_at(s[CL], ko) xo, xi = s[BB].split(s[BB].op.axis[1], factor=bx * v) tz, tx = s[BB].split(xi, factor=(WX // TX) * v) tx, vec = s[BB].split(tx, factor=v) fused = s[BB].fuse(s[BB].op.axis[0], xo) _, ty = s[BB].split(fused, factor=by) s[BB].bind(ty, te.thread_axis("threadIdx.y")) s[BB].bind(tz, te.thread_axis("threadIdx.z")) s[BB].bind(tx, te.thread_axis("threadIdx.x")) s[BB].vectorize(vec) s[AL].compute_at(s[CL], kl) s[BL].compute_at(s[CL], kl) s[CL].pragma(ko, "tensor_core") func = tvm.build(s, [A, B, C], "cuda") dev = tvm.cuda(0) a_np = np.random.uniform(size=(n, l)).astype(A.dtype) b_np = np.random.uniform(size=(l, m)).astype(B.dtype) c_np = np.zeros((n, m), dtype=np.float32) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev) func(a, b, c) evaluator = func.time_evaluator(func.entry_name, dev, number=3) print("gemm m=%d n=%d k=%d: %f ms" % (m, n, l, evaluator(a, b, c).mean * 1e3)) c_np = np.dot(a_np, b_np) np.testing.assert_allclose(c_np, c.numpy(), rtol=1e-3) def tensor_core_batch_matmul(warp_tile_m=16, m=64, n=32, l=96, batch=2): A = te.placeholder((batch, n, l), name="A", dtype="float16") B = te.placeholder((batch, l, m), name="B", dtype="float16") k = te.reduce_axis((0, l), name="k") C = te.compute( (batch, n, m), lambda b, i, j: te.sum((A[b, i, k] * B[b, k, j]).astype("float32"), axis=k) ) s = te.create_schedule(C.op) z, y, x = s[C].op.axis k = s[C].op.reduce_axis[0] AA = s.cache_read(A, "shared", [C]) AL = s.cache_read(AA, "local", [C]) BB = s.cache_read(B, "shared", [C]) BL = s.cache_read(BB, "local", [C]) CL = s.cache_write(C, "local") bx = 2 by = 32 step_k = 8 v = 4 TX = 8 TY = 1 tile_x = bx * TX tile_y = by * TY WX = min(warp_tile_m, tile_x) tile_k = 16 vthread = 1 yo, ty = s[C].split(y, tile_y * vthread) vy, ty = s[C].split(ty, tile_y) ty, yi = s[C].split(ty, TY) xo, xi = s[C].split(x, tile_x) tz, xi = s[C].split(xi, WX) tx, xi = s[C].split(xi, TX) ko, ki = s[CL].split(k, step_k * tile_k) kl, ki = s[CL].split(ki, tile_k) s[C].reorder(z, yo, xo, tz, ty, tx, yi, xi) s[C].bind(z, te.thread_axis("blockIdx.z")) s[C].bind(yo, te.thread_axis("blockIdx.y")) s[C].bind(xo, te.thread_axis("blockIdx.x")) s[C].bind(ty, te.thread_axis("threadIdx.y")) s[C].bind(tz, te.thread_axis("threadIdx.z")) s[C].bind(tx, te.thread_axis("threadIdx.x")) s[C].bind(vy, te.thread_axis((0, vthread), "vthread", name="vy")) s[CL].compute_at(s[C], tx) zo, yo, xo = CL.op.axis s[CL].reorder(ko, kl, ki, zo, yo, xo) s[AA].compute_at(s[CL], ko) xo, xi = s[AA].split(s[AA].op.axis[2], factor=bx * v) tz, tx = s[AA].split(xi, factor=(WX // TX) * v) tx, vec = s[AA].split(tx, factor=v) fused = s[AA].fuse(s[AA].op.axis[1], xo) _, ty = s[AA].split(fused, factor=by) s[AA].bind(ty, te.thread_axis("threadIdx.y")) s[AA].bind(tz, te.thread_axis("threadIdx.z")) s[AA].bind(tx, te.thread_axis("threadIdx.x")) s[AA].vectorize(vec) s[BB].compute_at(s[CL], ko) xo, xi = s[BB].split(s[BB].op.axis[2], factor=bx * v) tz, tx = s[BB].split(xi, factor=(WX // TX) * v) tx, vec = s[BB].split(tx, factor=v) fused = s[BB].fuse(s[BB].op.axis[1], xo) _, ty = s[BB].split(fused, factor=by) s[BB].bind(ty, te.thread_axis("threadIdx.y")) s[BB].bind(tz, te.thread_axis("threadIdx.z")) s[BB].bind(tx, te.thread_axis("threadIdx.x")) s[BB].vectorize(vec) s[AL].compute_at(s[CL], kl) s[BL].compute_at(s[CL], kl) s[CL].pragma(ko, "tensor_core") func = tvm.build(s, [A, B, C], "cuda") dev = tvm.cuda(0) a_np = np.random.uniform(size=(batch, n, l)).astype(A.dtype) b_np = np.random.uniform(size=(batch, l, m)).astype(B.dtype) c_np = np.zeros((batch, n, m), dtype=np.float32) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros((batch, n, m), dtype=C.dtype), dev) func(a, b, c) evaluator = func.time_evaluator(func.entry_name, dev, number=3) print( "batch gemm m=%d n=%d k=%d batch=%d: %f ms" % (m, n, l, batch, evaluator(a, b, c).mean * 1e3) ) for bs in range(batch): c_np[bs, :, :] = np.dot(a_np[bs, :, :], b_np[bs, :, :]) np.testing.assert_allclose(c_np, c.numpy(), rtol=1e-3) @tvm.testing.requires_tensorcore def test_tensor_core_matmul(): tensor_core_matmul(16) # test with warp_tile 16x16x16 tensor_core_matmul(8) # test with warp_tile 8x32x16 tensor_core_matmul(32) # test with warp_tile 32x8x16 @tvm.testing.requires_tensorcore def test_tensor_core_batch_matmul(): tensor_core_batch_matmul() if __name__ == "__main__": test_tensor_core_matmul() test_tensor_core_batch_matmul()
7,848
32.831897
98
py
tvm
tvm-main/tests/python/unittest/test_target_codegen_vulkan.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os from posixpath import split import random import re import threading import numpy as np import pytest import tvm import tvm.testing from tvm import relay, te from tvm.topi.math import cast from tvm.script import tir as T, ir as I from tvm.tir import TensorIntrin, IntImm, Cast, Schedule from tvm.tir.tensor_intrin.cuda import ( WMMA_LOAD_16x16x16_F16_A_INTRIN, WMMA_LOAD_16x16x16_F16_B_INTRIN, WMMA_SYNC_16x16x16_f16f16f32_INTRIN, WMMA_FILL_16x16x16_F32_INTRIN, WMMA_STORE_16x16x16_F32_GLOBAL_INTRIN, WMMA_SYNC_16x16x16_f16f16f16_INTRIN, WMMA_FILL_16x16x16_F16_INTRIN, WMMA_STORE_16x16x16_F16_GLOBAL_INTRIN, ) dtype = tvm.testing.parameter("float32", "int32", "float16", "int8") fuzz_seed = tvm.testing.parameter(range(25)) # Explicitly specify a target, as this test is looking at the # generated shader code, and is not running on an actual device. @tvm.testing.parametrize_targets( " ".join( [ "vulkan", "-supports_int8=1", "-supports_8bit_buffer=1", "-supports_storage_buffer_storage_class=1", "-supports_float16=1", "-supports_16bit_buffer=1", ] ) ) def test_vector_comparison(target, dtype): n = (1024,) A = te.placeholder(n, dtype=dtype, name="A") B = te.compute( A.shape, lambda i: tvm.tir.Select( A[i] >= 0, A[i] + tvm.tir.const(1, dtype), tvm.tir.const(0, dtype) ), name="B", ) s = te.create_schedule(B.op) (bx, tx) = s[B].split(s[B].op.axis[0], factor=128) (tx, vx) = s[B].split(tx, factor=4) s[B].bind(bx, te.thread_axis("blockIdx.x")) s[B].bind(tx, te.thread_axis("threadIdx.x")) s[B].vectorize(vx) f = tvm.build(s, [A, B], target) # Verify we generate the boolx4 type declaration and the OpSelect # v4{float,half,int} instruction assembly = f.imported_modules[0].get_source() matches = re.findall("%v4bool = OpTypeVector %bool 4", assembly) assert len(matches) == 1 matches = re.findall("OpSelect %v4.*", assembly) assert len(matches) == 1 def test_array_copy(dev, dtype, fuzz_seed): np.random.seed(fuzz_seed) log_arr_size = np.random.uniform(low=np.log(1), high=np.log(32768)) arr_size = np.exp(log_arr_size).astype(int) a_np = np.random.uniform(size=(arr_size,)).astype(dtype) a = tvm.nd.empty((arr_size,), dtype, dev).copyfrom(a_np) b_np = a.numpy() tvm.testing.assert_allclose(a_np, b_np) tvm.testing.assert_allclose(a_np, a.numpy()) @tvm.testing.exclude_targets("llvm") def test_array_vectorize_add(target, dev, dtype): arr_size = 64 lanes = 2 if "opencl" in target and dtype == "float16": pytest.xfail("Opencl target does not support float16") num_thread = 8 A = te.placeholder((arr_size,), name="A", dtype="%sx%d" % (dtype, lanes)) B = te.compute((arr_size,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B") s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], factor=num_thread) s[B].bind(xo, te.thread_axis("blockIdx.x")) s[B].bind(xi, te.thread_axis("threadIdx.x")) fun = tvm.build(s, [A, B], target) a = tvm.nd.empty((arr_size,), A.dtype, dev).copyfrom(np.random.uniform(size=(arr_size, lanes))) c = tvm.nd.empty((arr_size,), B.dtype, dev) fun(a, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1) @tvm.testing.parametrize_targets("vulkan") @pytest.mark.skip("Flaky, https://github.com/apache/tvm/issues/10779") def test_vulkan_stress(target, dev): """ Launch a randomized test with multiple kernels per stream, multiple uses of kernels per stream, over multiple threads. """ n = 1024 num_thread = 64 def run_stress(): def worker(): A = te.placeholder((n,), name="A", dtype="float32") B = te.placeholder((n,), name="B", dtype="float32") functions = [ ( lambda: te.compute((n,), lambda i: 2 * A[i] + 3 * B[i]), lambda a, b: 2 * a + 3 * b, ), (lambda: te.compute((n,), lambda i: A[i] + B[i]), lambda a, b: a + b), (lambda: te.compute((n,), lambda i: A[i] + 2 * B[i]), lambda a, b: a + 2 * b), ] def build_f(f_ref): (C_f, ref) = f_ref C = C_f() s = te.create_schedule(C.op) xo, xi = s[C].split(C.op.axis[0], factor=num_thread) s[C].bind(xo, te.thread_axis("blockIdx.x")) s[C].bind(xi, te.thread_axis("threadIdx.x")) fun = tvm.build(s, [A, B, C], target) return (fun, ref) fs = [ build_f(random.choice(functions)) for _ in range(np.random.randint(low=1, high=10)) ] a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n,))) b = tvm.nd.empty((n,), B.dtype, dev).copyfrom(np.random.uniform(size=(n,))) cs = [tvm.nd.empty((n,), A.dtype, dev) for _ in fs] for ((f, _), c) in zip(fs, cs): f(a, b, c) for ((_, ref), c) in zip(fs, cs): tvm.testing.assert_allclose(c.numpy(), ref(a.numpy(), b.numpy())) ts = [threading.Thread(target=worker) for _ in range(np.random.randint(1, 10))] for t in ts: t.start() for t in ts: t.join() run_stress() @tvm.testing.exclude_targets("llvm") def test_vulkan_bool_load(target, dev): arr_size = 1024 target = tvm.target.Target(target) if target.kind.name == "vulkan": supports_int8_buffer = target.attrs.get("supports_int8", False) and target.attrs.get( "supports_8bit_buffer", False ) if not supports_int8_buffer: pytest.xfail( "Vulkan target does not support int8 buffer access, used to transfer booleans" ) def do_copy(A, B, n): ib = tvm.tir.ir_builder.create() A = ib.buffer_ptr(A) B = ib.buffer_ptr(B) tx = te.thread_axis("threadIdx.x") bx = te.thread_axis("blockIdx.x") max_threads = 32 ib.scope_attr(bx, "thread_extent", tvm.tir.indexdiv(n + max_threads - 1, max_threads)) ib.scope_attr(tx, "thread_extent", max_threads) tid = bx * max_threads + tx with ib.if_scope(tid < n): B[tid] = cast(A[tid], "int32") return ib.get() A = te.placeholder((arr_size,), name="A", dtype="bool") B = te.placeholder((arr_size,), name="B", dtype="int32") B = te.extern( A.shape, [A], lambda ins, outs: do_copy(ins[0], outs[0], arr_size), name="bool_copy_ir", dtype="int32", ) s = te.create_schedule(B.op) with tvm.transform.PassContext(opt_level=3): func = tvm.build(s, [A, B], target) a_np = np.random.uniform(size=arr_size) > 0.5 b_np = np.zeros((arr_size,), dtype="int32") a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) func(a, b) ref = a_np.astype(np.int32) tvm.testing.assert_allclose(b.numpy(), ref) def check_mod(target, dev, mod, x_np, res_np): res = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()(x_np).numpy() tvm.testing.assert_allclose(res, res_np, atol=1e-5) def test_sqrt(target, dev): # Three 32 bit pushconstants: any_dim, stride, stride dtype = "float32" x = relay.var("x", shape=(relay.Any(),), dtype=dtype) mod = tvm.IRModule() mod["main"] = relay.Function([x], relay.sqrt(x)) x_np = np.random.uniform(size=(10,)).astype(dtype) res_np = np.sqrt(x_np) check_mod(target, dev, mod, x_np, res_np) def test_argsort(target, dev): # One 64 bit and one 32 bit constants dtype = "int32" x = relay.var("x", shape=(relay.Any(),), dtype=dtype) mod = tvm.IRModule() mod["main"] = relay.Function([x], relay.argsort(x)) x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype) res_np = np.argsort(x_np) check_mod(target, dev, mod, x_np, res_np) def test_cumsum(target, dev): # One 64 bit and one 32 bit constants dtype = "int32" x = relay.var("x", shape=(relay.Any(),), dtype=dtype) mod = tvm.IRModule() mod["main"] = relay.Function([x], relay.cumsum(x)) x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype) res_np = np.cumsum(x_np) check_mod(target, dev, mod, x_np, res_np) @tvm.testing.skip_if_wheel_test def test_unique(target, dev): dtype = "int32" x = relay.var("x", shape=(relay.Any(),), dtype=dtype) mod = tvm.IRModule() [unique, _, _, num_unique] = relay.unique(x, is_sorted=True) mod["main"] = relay.Function([x], relay.op.strided_slice(unique, begin=[0], end=num_unique)) x_np = np.random.randint(0, high=10, size=(10,)).astype(dtype) res_np = np.unique(x_np) check_mod(target, dev, mod, x_np, res_np) vulkan_parameter_impl = tvm.testing.parameter("push_constants", "ubo") vulkan_parameter_dtype = tvm.testing.parameter("int32", "float32", "int64") # Only run on vulkan because extremely large numbers of input # parameters can crash cuda/llvm compiler. @tvm.testing.parametrize_targets("vulkan -from_device=0") def test_vulkan_constant_passing(target, dev, vulkan_parameter_impl, vulkan_parameter_dtype): target = tvm.target.Target(target) dtype = vulkan_parameter_dtype if not target.attrs.get("supports_int64", False): pytest.xfail("Vulkan target does not support Int64 variables") # f_add has 3+num_int_params scalar parameters. The other three # are length_n, stride1, and stride2. if vulkan_parameter_impl == "push_constants": # 4 params, 32 bytes. Within 128-byte spec-guaranteed size of # push constants. Uses push constants. num_int_params = 1 else: # 24 params, 192 bytes. May be above spec-guaranteed size of 128 # bytes for push constants. Uses either push constants or UBO, # depending on the device. max_push_constants_size = int(target.attrs.get("max_push_constants_size", 128)) max_int_params_in_push = max_push_constants_size // 8 - 3 num_int_params = max_int_params_in_push + 1 n = te.var("n") scalars = [te.var("scale{}".format(i), dtype=dtype) for i in range(num_int_params)] scalar_sum = scalars[0] for s in scalars[1:]: scalar_sum += s A = te.placeholder((n,), name="A", dtype=dtype) B = te.compute(A.shape, lambda i: scalar_sum + A[i], name="B") s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], factor=64) s[B].bind(xo, te.thread_axis("blockIdx.x")) s[B].bind(xi, te.thread_axis("threadIdx.x")) f_add = tvm.build(s, scalars + [A, B], target) n = 1024 scalars = np.array([1 for _ in scalars]).astype(dtype) a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) f_add(*scalars, a, b) tvm.testing.assert_allclose(a.numpy() + sum(scalars), b.numpy()) def test_vulkan_while_if(target, dev): target = tvm.target.Target(target) def do_compute(A, B, n): ib = tvm.tir.ir_builder.create() A = ib.buffer_ptr(A) B = ib.buffer_ptr(B) if "gpu" in target.keys: ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0) iterations = ib.allocate("int32", (1,), name="iterations", scope="local") iterations[0] = 0 B[0] = 0 # WhileNode's condition is re-evaluated every loop. The # if_then_else block introduces additional labels/blocks that # must be kept separate from the WhileNode's block. loop_condition = iterations[0] < tvm.tir.if_then_else(A[0] > 0, 10, 20) with ib.while_loop(loop_condition): iterations[0] += 1 B[0] += iterations[0] return ib.get() n = 1 dtype = "int32" A = te.placeholder((n,), name="A", dtype=dtype) B = te.extern( A.shape, [A], lambda ins, outs: do_compute(ins[0], outs[0], n), dtype=dtype, ) s = te.create_schedule(B.op) # Point of failure would be here, at tvm.build. with tvm.transform.PassContext(opt_level=3): func = tvm.build(s, [A, B], target) a = tvm.nd.array(np.array([5], dtype=A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev) func(a, b) tvm.testing.assert_allclose(b.numpy(), [55]) a = tvm.nd.array(np.array([-5], dtype=A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=A.dtype), dev) func(a, b) tvm.testing.assert_allclose(b.numpy(), [210]) @tvm.testing.exclude_targets("llvm") def test_vulkan_local_threadidx(target, dev): # To access the thread index, the vulkan runtime accesses a global # array of thread indices, storing the result in a local variable. # In CUDA, these are the built-in threadIdx.x variables, which are # globally accessible. In vulkan, these local variables must be # defined inside a function, but are hoisted up to the function # header to mimic the global CUDA semantics. Before this # hoisting, this test could trigger spvValidate errors for # potentially undeclared variables. def do_compute(A, B, n): ib = tvm.tir.ir_builder.create() A = ib.buffer_ptr(A) B = ib.buffer_ptr(B) # One single declaration of te.thread_axis. tx = te.thread_axis("threadIdx.x") with ib.for_range(0, 1): # Used inside a for-loop scope, defines local thread_id # variable. ib.scope_attr(tx, "thread_extent", 16) B[tx + 0] = A[tx + 0] with ib.for_range(0, 1): # Used in next scope. If local variable defined at point # of use instead of function header, will fail spvValidate # for access of out-of-scope local variable. ib.scope_attr(tx, "thread_extent", 16) B[tx + 16] = A[tx + 16] return ib.get() n = te.var("n") A = te.placeholder((n,), name="A", dtype="int32") B = te.placeholder((n,), name="B", dtype="int32") B = te.extern( A.shape, [A], lambda ins, outs: do_compute(ins[0], outs[0], n), dtype="int32", ) s = te.create_schedule(B.op) # Expected failure occurs at build step. func = tvm.build(s, [A, B], target) n = 32 a_np = np.arange(n).astype(dtype=A.dtype) b_np = np.zeros((n,), dtype="int32") a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) func(a, b) tvm.testing.assert_allclose(b.numpy(), a_np) class TestVectorizedIndices: load_type, store_type = tvm.testing.parameters( # Load N values, write to N locations. # Vectorized copy. ("ramp", "ramp"), # Load 1 value, write to N locations. # Scalar load, vectorized store. # # Most TVM operations (e.g. schedule[tensor].vectorize(axis)) have # the broadcast outside of the index, but it is semantically okay # for the broadcast to be inside the index, and it shows up with # some optimizations. ("broadcast", "ramp"), # Load 1 values, write to 1 location. # Broadcasting on both sides should be equivalent to a scalar copy. ("broadcast", "broadcast"), # Loads N values, write to 1 location. # Disabled as it would have unclear semantics. # ("ramp","broadcoast"), ) indirect_indices = tvm.testing.parameter(True, False, ids=["reorder", "no_reorder"]) @tvm.testing.fixture def ref_data(self, load_type, store_type, indirect_indices): n = 4 index_map = { "ramp": np.arange(n), "broadcast": np.zeros(n, dtype="int32"), } a_np = np.random.randint(np.iinfo("int32").max, size=n).astype("int32") b_np = np.zeros(shape=n, dtype=a_np.dtype) reorder_np = np.arange(n, dtype="int32")[::-1] load_index = index_map[load_type] store_index = index_map[store_type] if indirect_indices: load_index = reorder_np[load_index] b_np[store_index] = a_np[load_index] return a_np, reorder_np, b_np @tvm.testing.fixture def mod(self, target, load_type, store_type, indirect_indices): target = tvm.target.Target(target) n = 4 dtype = "int32" A = te.placeholder((n,), dtype=dtype, name="A") R = te.placeholder((n,), dtype=dtype, name="R") def do_compute(ins, outs): ib = tvm.tir.ir_builder.create() A, R = map(ib.buffer_ptr, ins) B = ib.buffer_ptr(outs[0]) if "gpu" in target.keys: ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0) index_map = { "ramp": tvm.tir.Ramp(0, 1, 4), "broadcast": tvm.tir.Broadcast(0, 4), } load_index = index_map[load_type] store_index = index_map[store_type] if indirect_indices: load_index = R[load_index] B[store_index] = A[load_index] return ib.get() B = te.extern(A.shape, [A, R], do_compute, dtype="int32") s = te.create_schedule(B.op) return tvm.lower(s, [A, R, B]) def test_ramp_broadcast_index(self, target, dev, mod, ref_data): f = tvm.build(mod, target=target) a_np, reorder_np, b_np = ref_data a = tvm.nd.array(a_np, dev) r = tvm.nd.array(reorder_np, dev) b = tvm.nd.array(np.zeros(shape=b_np.shape, dtype="int32"), dev) f(a, r, b) tvm.testing.assert_allclose(b.numpy(), b_np) @tvm.testing.parametrize_targets("vulkan -max_shared_memory_per_block=16384") def test_shared_mem_alloc(target, dev): alloc_nbytes = 16384 * 2 def do_compute(ins, outs): ib = tvm.tir.ir_builder.create() out = ib.buffer_ptr(outs[0]) ib.scope_attr(te.thread_axis("blockIdx.x"), "thread_extent", 0) array = ib.allocate("int32", (alloc_nbytes,), name="array", scope="shared") array[0] = 0 out[0] = array[0] return ib.get() Out = te.extern( shape=(1,), inputs=[], fcompute=do_compute, dtype="int32", ) s = te.create_schedule(Out.op) # Codegen should raise error when allocating more memory than the # target supports. with pytest.raises(tvm.TVMError): tvm.build(s, [Out], target) def test_negative_operand_divmod(target, dev): """Test handling of negative offsets to floormod/floordiv Even though the SPIR-V spec states that OpSRem and OpSMod can give the signed modulo, the Vulkan spec states that any use of negative operands is undefined behavior. This test starts with negative operands to floordiv, validating that they are simplified into the corresponding positive operands, such that the final TIR can be expressed using only positive operands. SPIR-V: https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpSRem Vulkan: https://registry.khronos.org/vulkan/specs/1.3/html/chap37.html#spirvenv-op-prec """ N = 32 offset = 16 divisor = 5 @T.prim_func def func(A: T.Buffer((N, 2), "int32")): for i in T.serial(N): with T.block("A"): v_i = T.axis.spatial(N, i) A[v_i, 0] = T.floordiv(v_i - offset, divisor) A[v_i, 1] = T.floormod(v_i - offset, divisor) if "gpu" in tvm.target.Target(target).keys: sch = tvm.tir.Schedule(func) sch.bind(sch.get_loops("A")[0], "threadIdx.x") func = sch.mod["main"] built = tvm.build(func, target=target) a_dev = tvm.nd.empty([N, 2], "int32", dev) built(a_dev) a = a_dev.numpy() np.testing.assert_array_equal(a[:, 0], (np.arange(N) - offset) // divisor) np.testing.assert_array_equal(a[:, 1], (np.arange(N) - offset) % divisor) @pytest.mark.parametrize("out_dtype", ["float32", "float16"]) def test_cooperative_matrix(out_dtype): def get_matmul(m, n, k, out_dtype="float32"): X = te.placeholder((m, k), name="X", dtype="float16") W = te.placeholder((k, n), name="W", dtype="float16") ak = te.reduce_axis((0, k), name="k") if out_dtype == "float32": matmul = te.compute( (m, n), lambda i, j: te.sum( X[i, ak].astype("float32") * W[ak, j].astype("float32"), axis=ak, ), name="compute", ) else: matmul = te.compute( (m, n), lambda i, j: te.sum(X[i, ak] * W[ak, j], axis=ak), name="compute", ) return te.create_prim_func([X, W, matmul]) M, N, K = 16, 16, 32 func = get_matmul(M, N, K, out_dtype) sch = Schedule(func) block = sch.get_block("compute") i, j, k = sch.get_loops(block) i_outer, i_inner = sch.split(i, factors=[None, 16]) j_outer, j_inner = sch.split(j, factors=[None, 16]) k_outer, k_inner = sch.split(k, factors=[None, 16]) sch.reorder(i_outer, j_outer, k_outer, i_inner, j_inner, k_inner) fused_outer = sch.fuse(i_outer, j_outer) sch.bind(fused_outer, "blockIdx.x") def fetch_to_shared(block, idx): block_read = sch.cache_read(block, idx, "shared") sch.compute_at(block_read, k_outer) warp_size = 32 fused = sch.fuse(*sch.get_loops(block_read)[-2:]) vector_size = 4 _, f_2, f_3 = sch.split(fused, factors=[None, warp_size, vector_size]) sch.bind(f_2, "threadIdx.x") sch.vectorize(f_3) def tensorize_load(block, dim): loops = sch.get_loops(block) i, j = loops[-dim : (len(loops) - dim + 2)] i0, i1 = sch.split(i, factors=[None, 16]) j0, j1 = sch.split(j, factors=[None, 16]) sch.reorder(i0, j0, i1, j1) sch.unroll(i0) sch.unroll(j0) return i1 fetch_to_shared(block, 0) fetch_to_shared(block, 1) c_warp_scope = "wmma.accumulator" a_warp_scope = "wmma.matrix_a" b_warp_scope = "wmma.matrix_b" A_mat = sch.cache_read(block, 0, a_warp_scope) B_mat = sch.cache_read(block, 1, b_warp_scope) loop_a = tensorize_load(A_mat, 2) sch.tensorize(loop_a, WMMA_LOAD_16x16x16_F16_A_INTRIN) loop_b = tensorize_load(B_mat, 2) sch.tensorize(loop_b, WMMA_LOAD_16x16x16_F16_B_INTRIN) store = sch.cache_write(block, 0, c_warp_scope) sch.reverse_compute_at(store, fused_outer) init = sch.decompose_reduction(block, sch.get_loops(block)[1]) intrin = WMMA_FILL_16x16x16_F32_INTRIN if out_dtype == "float16": intrin = WMMA_FILL_16x16x16_F16_INTRIN sch.tensorize(sch.get_loops(init)[1], intrin) intrin = WMMA_STORE_16x16x16_F32_GLOBAL_INTRIN if out_dtype == "float16": intrin = WMMA_STORE_16x16x16_F16_GLOBAL_INTRIN sch.tensorize(sch.get_loops(store)[1], intrin) intrin = WMMA_SYNC_16x16x16_f16f16f32_INTRIN if out_dtype == "float16": intrin = WMMA_SYNC_16x16x16_f16f16f16_INTRIN sch.tensorize(sch.get_loops(block)[2], intrin) target = "vulkan -from_device=0" tgt_attrs = tvm.target.Target(target).attrs if tgt_attrs.get("supports_cooperative_matrix"): f = tvm.build(sch.mod, target=target) dev = tvm.device(target, 0) A = tvm.nd.array(np.random.randn(M, K).astype("float16"), dev) B = tvm.nd.array(np.random.randn(K, N).astype("float16"), dev) C = tvm.nd.array(np.random.randn(M, N).astype(out_dtype), dev) f(A, B, C) A_np = A.numpy() B_np = B.numpy() ref = np.dot(A_np.astype("float32"), B_np.astype("float32")) tvm.testing.assert_allclose(C.numpy(), ref, rtol=1e-2, atol=1e-2) @tvm.testing.requires_vulkan(support_required="compile-only") def test_codegen_decl_buffer(): """The codegen should accept DeclBuffer nodes in its input""" @I.ir_module class mod: @T.prim_func def kernel(): T.func_attr({"calling_conv": 2, "global_symbol": "kernel", "tir.noalias": True}) A_data = T.allocate([256], dtype="float32", scope="local") A_buf = T.decl_buffer([256], dtype="float32", scope="local", data=A_data) target = tvm.target.Target("vulkan") vulkan_codegen = tvm.get_global_func("target.build.vulkan") vulkan_codegen(mod, target) if __name__ == "__main__": tvm.testing.main()
25,474
32.966667
99
py
tvm
tvm-main/tests/python/unittest/test_tir_schedule_compute_inline.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import pytest import tvm import tvm.testing import tvm.tir.tensor_intrin from tvm import tir from tvm.script import tir as T from tvm.tir.schedule.testing import verify_trace_roundtrip # pylint: disable=no-member,invalid-name,unused-variable @T.prim_func def elementwise(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def elementwise_multi_producer_consumer(a: T.handle, c: T.handle, d: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((128, 128)) C = T.match_buffer(c, (128, 128)) D = T.match_buffer(d, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 # B has two consumers for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 for i, j in T.grid(128, 128): with T.block("D"): vi, vj = T.axis.remap("SS", [i, j]) D[vi, vj] = B[vi, vj] + 2.0 + C[vi, vj] # D has two producers @T.prim_func def elementwise_multi_consumer_inlined(a: T.handle, c: T.handle, d: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) D = T.match_buffer(d, (128, 128)) for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = A[vi, vj] * 2.0 + 1.0 for i, j in T.grid(128, 128): with T.block("D"): vi, vj = T.axis.remap("SS", [i, j]) D[vi, vj] = A[vi, vj] * 2.0 + 2.0 + C[vi, vj] @T.prim_func def elementwise_standalone(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = A[vi, vj] + 1.0 @T.prim_func def elementwise_standalone_dce(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = A[vi, vj] + 1.0 @T.prim_func def elementwise_under_loop(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) B = T.alloc_buffer((128, 128)) for i in T.serial(0, 128): for j in T.serial(0, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for j in T.serial(0, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def elementwise_inlined(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = A[vi, vj] * 2.0 + 1.0 @T.prim_func def fail_multi_reader_writer(a: T.handle, d: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((128, 128)) C = T.alloc_buffer((128, 128)) D = T.match_buffer(d, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 C[vi, vj] = A[vi, vj] + 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) D[vi, vj] = B[vi, vj] + C[vi, vj] @T.prim_func def elementwise_multi_reverse_loads(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = (B[vi, vj] + 1.0) * (B[vi, vj] * 2.0) + 3.0 @T.prim_func def elementwise_multi_reverse_loads_inlined(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = (A[vi, vj] * 2.0 + 1.0) * (A[vi, vj] * 2.0 * 2.0) + 3.0 @T.prim_func def elementwise_reverse_affine_load( A: T.Buffer((128, 128), "float32"), C: T.Buffer((8, 32, 8, 8), "float32") ) -> None: B = T.alloc_buffer((128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j, k, l in T.grid(8, 32, 8, 8): with T.block("C"): vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l]) C[vi, vj, vk, vl] = B[ ((((vi * 32) + vj) * 8 + vk) * 8 + vl) // 128, ((((vi * 32) + vj) * 8 + vk) * 8 + vl) % 128, ] @T.prim_func def elementwise_reverse_affine_load_inlined( A: T.Buffer((128, 128), "float32"), C: T.Buffer((8, 32, 8, 8), "float32") ) -> None: for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) C[ (vj + vi * 128) // 2048, (vj + vi * 128) // 64 % 32, ((vj + vi * 128) // 8) % 8, (vj + vi * 128) % 8, ] = ( A[vi, vj] * 2.0 ) @T.prim_func def elementwise_reverse_affine_load_unit_iter( A: T.Buffer((128, 128), "float32"), B: T.Buffer((8, 16, 1), "float32"), D: T.Buffer((1, 8, 16, 128), "float32"), ) -> None: C = T.alloc_buffer((128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = A[vi, vj] * 2.0 for i, j, k, l in T.grid(1, 8, 16, 128): with T.block("C"): vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l]) D[vi, vj, vk, vl] = C[vj * 16 + vk, vl] + B[vj, vk, vi] @T.prim_func def elementwise_reverse_affine_load_unit_iter_inlined( A: T.Buffer((128, 128), "float32"), B: T.Buffer((8, 16, 1), "float32"), D: T.Buffer((1, 8, 16, 128), "float32"), ) -> None: for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) D[0, vi // 16, vi % 16, vj] = A[vi, vj] * 2.0 + B[vi // 16, vi % 16, 0] @T.prim_func def elementwise_reverse_affine_load_unit_iter_simplified( A: T.Buffer((128, 128), "float32"), B: T.Buffer((8, 16, 1), "float32"), D: T.Buffer((1, 8, 16, 128), "float32"), ) -> None: C = T.alloc_buffer((128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = A[vi, vj] * 2.0 for i, j, k in T.grid(8, 16, 128): with T.block("C"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) D[0, vi, vj, vk] = C[vi * 16 + vj, vk] + B[vi, vj, 0] @T.prim_func def elementwise_reverse_affine_load_unit_iter_simplified_inlined( A: T.Buffer((128, 128), "float32"), B: T.Buffer((8, 16, 1), "float32"), D: T.Buffer((1, 8, 16, 128), "float32"), ) -> None: for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) D[0, vi // 16, vi % 16, vj] = A[vi, vj] * 2.0 + B[vi // 16, vi % 16, 0] @T.prim_func def elementwise_reverse_affine_chain( A: T.Buffer((128, 128), "float32"), D: T.Buffer((1, 8, 16, 128), "float32") ): B = T.alloc_buffer((128, 128)) C = T.alloc_buffer((8, 16, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j, k in T.grid(8, 16, 128): with T.block("C"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) C[vi, vj, vk] = B[vi * 16 + vj, vk] + 1.0 for i, j, k, l in T.grid(1, 8, 16, 128): with T.block("D"): vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l]) D[vi, vj, vk, vl] = C[vj, vk, vl] @T.prim_func def elementwise_reverse_affine_chain_inlined( A: T.Buffer((128, 128), "float32"), D: T.Buffer((1, 8, 16, 128), "float32") ) -> None: for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) D[0, vi // 16, vi % 16, vj] = A[vi, vj] * 2.0 + 1.0 @T.prim_func def elementwise_multi_reverse_affine_load( A: T.Buffer((128, 128), "float32"), C: T.Buffer((8, 16, 128), "float32"), ) -> None: B = T.alloc_buffer((128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j, k in T.grid(8, 16, 128): with T.block("C"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) C[vi, vj, vk] = B[vi * 16 + vj, vk] + B[vi * 16 + vj, vk] @T.prim_func def elementwise_multi_reverse_affine_load_inlined( A: T.Buffer((128, 128), "float32"), C: T.Buffer((8, 16, 128), "float32"), ) -> None: for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) C[vi // 16, vi % 16, vj] = A[vi, vj] * 2.0 + A[vi, vj] * 2.0 @T.prim_func def elementwise_reverse_non_affine_load( A: T.Buffer((128, 128), "float32"), C: T.Buffer((8, 16, 128), "float32") ) -> None: B = T.alloc_buffer((128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j, k in T.grid(8, 16, 128): with T.block("C"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) C[vi, vj, vk] = B[vi * 16 + vj, vi * 16 + vj] @T.prim_func def opaque_access_load(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(B[0:128, 0:128]) T.writes(C[0:128, 0:128]) T.evaluate(B.access_ptr("r", extent=128)) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def opaque_access_store(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(B[0:128, 0:128]) T.writes(C[0:128, 0:128]) T.evaluate(B.access_ptr("r", extent=128)) T.evaluate(C.access_ptr("w", extent=128)) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def buffer_matched(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) Bb = T.match_buffer(B[vi : vi + 1, vj], (1, 1)) C[vi, vj] = Bb[0, 0] + 1.0 @T.prim_func def elementwise_predicate(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) T.where(B[i, j] < 10.0) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def elementwise_predicate_inlined(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) T.where(A[i, j] * 2.0 < 10.0) C[vi, vj] = A[vi, vj] * 2.0 + 1.0 @T.prim_func def elementwise_multi_loads(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + B[vi, vj + 1] + B[vi, vj + 2] @T.prim_func def elementwise_multi_loads_inlined(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = A[vi, vj] * 2.0 + A[vi, vj + 1] * 2.0 + A[vi, vj + 2] * 2.0 @T.prim_func def access_opaque_ptr_then_elemwise(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [1024]) B = T.match_buffer(b, [1024]) A_cache = T.alloc_buffer([1024]) BB = T.alloc_buffer([1024]) with T.block("opaque"): # annotated opaque partial access T.reads(A[0:512]) T.writes(A_cache[0:512]) T.evaluate(A.access_ptr("r", extent=512)) T.evaluate(A_cache.access_ptr("w", extent=512)) for i in range(512): with T.block("BB"): vi = T.axis.remap("S", [i]) BB[vi] = A_cache[vi] * 2.0 for i in range(512): with T.block("B"): vi = T.axis.remap("S", [i]) B[vi] = BB[vi] + 1.0 @T.prim_func def access_opaque_ptr_then_elemwise_inline(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [1024], dtype="float32") B = T.match_buffer(b, [1024], dtype="float32") A_cache = T.alloc_buffer([1024], dtype="float32") with T.block("opaque"): # annotated opaque partial access should be kept T.reads(A[0:512]) T.writes([A_cache[0:512]]) T.evaluate(A.access_ptr("r", extent=512)) T.evaluate(A_cache.access_ptr("w", extent=512)) for i in T.serial(0, 512): with T.block("B"): vi = T.axis.spatial(512, i) T.reads([A_cache[vi]]) T.writes([B[vi]]) B[vi] = A_cache[vi] * 2.0 + 1.0 @T.prim_func def matmul_relu(var_A: T.handle, var_B: T.handle, var_compute: T.handle) -> None: A = T.match_buffer(var_A, [512, 512], dtype="float32") B = T.match_buffer(var_B, [512, 512], dtype="float32") compute = T.match_buffer(var_compute, [512, 512], dtype="float32") C = T.alloc_buffer([512, 512], dtype="float32") for i0, i1, i2 in T.grid(512, 512, 512): with T.block("C"): i, j, k = T.axis.remap("SSR", [i0, i1, i2]) T.reads([C[i, j], A[i, k], B[k, j]]) T.writes([C[i, j]]) with T.init(): C[i, j] = T.float32(0) C[i, j] = C[i, j] + A[i, k] * B[k, j] for i0, i1 in T.grid(512, 512): with T.block("compute"): i0_1, i1_1 = T.axis.remap("SS", [i0, i1]) T.reads([C[i0_1, i1_1]]) T.writes([compute[i0_1, i1_1]]) compute[i0_1, i1_1] = T.max(C[i0_1, i1_1], T.float32(0)) @T.prim_func def elementwise_output(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.match_buffer(b, (128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def inline_block_with_init( A: T.Buffer((1, 512, 7, 7), "float32"), B: T.Buffer((1, 512, 1, 1), "float32"), ) -> None: B_rf = T.alloc_buffer([1, 512, 1, 1, 49], dtype="float32") for i0, i1, i2, i3, i4, i5 in T.grid(1, 512, 1, 1, 49, 1): with T.block("tensor_rf"): vi4 = T.axis.spatial(49, i4) ax0 = T.axis.spatial(1, 0) ax1 = T.axis.spatial(512, i1) ax2 = T.axis.spatial(1, 0) ax3 = T.axis.spatial(1, 0) with T.init(): B_rf[ax0, ax1, ax2, ax3, vi4] = T.float32(0) B_rf[ax0, ax1, ax2, ax3, vi4] = ( B_rf[ax0, ax1, ax2, ax3, vi4] + A[ ax0, ax1, ax2 * 7 + vi4 // 7, ax3 * 7 + vi4 % 7, ] ) for i0, i1 in T.grid(1, 512): for ax0, ax1, ax2, ax3, ax4 in T.grid(49, 1, 1, 1, 1): with T.block("tensor"): vi4, ax0_1 = T.axis.remap("RS", [ax0, ax1]) ax1_1 = T.axis.spatial(512, i1 + ax2) ax2_1, ax3_1 = T.axis.remap("SS", [ax3, ax4]) with T.init(): B[ax0_1, ax1_1, ax2_1, ax3_1] = T.float32(0) B[ax0_1, ax1_1, ax2_1, ax3_1] = ( B[ax0_1, ax1_1, ax2_1, ax3_1] + B_rf[ax0_1, ax1_1, ax2_1, ax3_1, vi4] ) @T.prim_func def exp_exp_opaque_access_with_tvm_access_ptr( lookup_table: T.Buffer((1024,), "int8"), x: T.Buffer((16,), "float16"), compute: T.Buffer((16,), "float16"), ) -> None: compute_1 = T.alloc_buffer([16], dtype="float16") for i0 in T.serial(16): with T.block("compute"): i0_1 = T.axis.spatial(16, i0) T.reads(x[i0_1]) T.writes(compute_1[i0_1]) compute_1[i0_1] = T.exp(x[i0_1], dtype="float16") for i0 in T.serial(16): with T.block("compute_1"): i0_2 = T.axis.spatial(16, i0) T.reads(lookup_table[0:1024], compute_1[i0_2]) T.writes(compute[i0_2]) T.evaluate(lookup_table.access_ptr("r")) compute[i0_2] = T.exp( compute_1[i0_2], dtype="float16", ) @T.prim_func def exp_exp_opaque_access_with_tvm_access_ptr_inlined( lookup_table: T.Buffer((1024,), "int8"), x: T.Buffer((16,), "float16"), compute: T.Buffer((16,), "float16"), ) -> None: for i0 in T.serial(16): with T.block("compute_1"): i0_1 = T.axis.spatial(16, i0) # Do not put the opaque access to new write region when opaque access # wrapped with a tvm_access_ptr and the access mask set to "read only" T.reads(lookup_table[0:1024], x[i0_1]) T.writes(compute[i0_1]) T.evaluate(lookup_table.access_ptr("r")) compute[i0_1] = T.exp( T.exp(x[i0_1], dtype="float16"), dtype="float16", ) @T.prim_func def elementwise_overcomputed_producer( A: T.Buffer((128, 128), "float32"), C: T.Buffer((127, 127), "float32") ) -> None: B = T.alloc_buffer((128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(127, 127): with T.block("C"): cvi, cvj = T.axis.remap("SS", [i, j]) C[cvi, cvj] = B[cvi, cvj] + 1.0 @T.prim_func def elementwise_overcomputed_producer_reverse_inlined( A: T.Buffer((128, 128), "float32"), C: T.Buffer((127, 127), "float32") ) -> None: for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) if vi < 127 and vj < 127: C[vi, vj] = A[vi, vj] * 2.0 + 1.0 @T.prim_func def elementwise_overcomputed_producer_simplify_predicate( A: T.Buffer((128, 128), "float32"), C: T.Buffer((127, 127), "float32") ) -> None: B = T.alloc_buffer((128, 128)) for i in T.grid(16384): with T.block("B"): vi = T.axis.spatial(128, i // 128) vj = T.axis.spatial(128, i % 128) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(127, 127): with T.block("C"): cvi, cvj = T.axis.remap("SS", [i, j]) C[cvi, cvj] = B[cvi, cvj] + 1.0 @T.prim_func def elementwise_overcomputed_producer_simplify_predicate_reverse_inlined( A: T.Buffer((128, 128), "float32"), C: T.Buffer((127, 127), "float32") ) -> None: for i in T.grid(16384): with T.block("B"): vi = T.axis.spatial(128, i // 128) vj = T.axis.spatial(128, i % 128) if vi < 127 and vj < 127: C[vi, vj] = A[vi, vj] * 2.0 + 1.0 @T.prim_func def elementwise_overcomputed_producer_injective_load( A: T.Buffer((128, 128), "float32"), C: T.Buffer((127, 127), "float32") ) -> None: B = T.alloc_buffer((8, 8, 16, 16)) for i0, j0, i1, j1 in T.grid(8, 8, 16, 16): with T.block("B"): vi, vj, vm, vn = T.axis.remap("SSSS", [i0, j0, i1, j1]) B[vi, vj, vm, vn] = A[vi * 16 + vm, vj * 16 + vn] * 2.0 for i, j in T.grid(127, 127): with T.block("C"): cvi, cvj = T.axis.remap("SS", [i, j]) C[cvi, cvj] = B[cvi // 16, cvj // 16, cvi % 16, cvj % 16] + 1.0 @T.prim_func def elementwise_overcomputed_producer_injective_load_reverse_inlined( A: T.Buffer((128, 128), "float32"), C: T.Buffer((127, 127), "float32") ) -> None: for i0, j0, i1, j1 in T.grid(8, 8, 16, 16): with T.block("B"): vi, vj, vm, vn = T.axis.remap("SSSS", [i0, j0, i1, j1]) if vi * 16 + vm < 127 and vj * 16 + vn < 127: C[vm + vi * 16, vn + vj * 16] = A[vi * 16 + vm, vj * 16 + vn] * 2.0 + 1.0 @T.prim_func def elementwise_producer_not_cover_consumer( A: T.Buffer((128, 128), "float32"), D: T.Buffer((256, 128), "float32") ) -> None: B = T.alloc_buffer((128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(256, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) D[vi, vj] = T.if_then_else(vi >= 128, B[vi - 128, vj], T.float32(0), dtype="float32") @T.prim_func def elementwise_predicate_producer(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((127, 128)) C = T.match_buffer(c, (127, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) T.where(i < 127) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(127, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def elementwise_predicate_producer_inlined(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (127, 128)) for i, j in T.grid(128, 128): with T.block("B"): T.where(i < 127) vi, vj = T.axis.remap("SS", [i, j]) T.reads(A[vi, vj]) T.writes(C[vi, vj]) if vi < 127: C[vi, vj] = A[vi, vj] * T.float32(2) + T.float32(1) # fmt: off @tvm.script.ir_module class Conv2dInt8_TensorCore_with_predicate_before: @T.prim_func def main(p0: T.Buffer((16, 56, 56, 64), "int8"), p1: T.Buffer((256, 1, 1, 64), "int8"), p2: T.Buffer((1, 1, 1, 256), "int32"), p3: T.Buffer((1, 1, 1, 256), "int32"), p4: T.Buffer(256, "int32"), p5: T.Buffer(256, "int32"), p6: T.Buffer(256, "int32"), p7: T.Buffer((), "int32"), p8: T.Buffer(1, "int32"), p9: T.Buffer((16, 56, 56, 256), "int32"), compute: T.Buffer((16, 56, 56, 256), "int32")): # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) # body with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.unroll_explicit":1024}) compute_3 = T.alloc_buffer([16, 56, 56, 256], dtype="int32") conv2d_nhwc_reindex_shared = T.alloc_buffer([50176, 256], dtype="int32", scope="shared") conv2d_nhwc_reindex_shared_wmma_accumulator = T.alloc_buffer([50176, 256], dtype="int32", scope="wmma.accumulator") pad_temp_reindex_shared = T.alloc_buffer([50176, 64], dtype="int8", scope="shared") p1_reindex_shared = T.alloc_buffer([1, 1, 256, 64], dtype="int8", scope="shared") pad_temp_reindex_shared_wmma_matrix_a = T.alloc_buffer([50176, 64], dtype="int8", scope="wmma.matrix_a") p1_reindex_shared_wmma_matrix_b = T.alloc_buffer([1, 1, 256, 64], dtype="int8", scope="wmma.matrix_b") for ax2_0_0_ax3_0_0_fused in T.thread_binding(32, thread="blockIdx.y"): for ax2_0_1_ax3_0_1_fused in T.thread_binding(196, thread="blockIdx.x"): for ax2_0_2_ax3_0_2_fused in T.thread_binding(4, thread="threadIdx.y"): for ax0_0, ax1_0, ax4_0_0 in T.grid(1, 1, 2): for ax0_ax1_fused in T.serial(1024): with T.block("pad_temp_reindex_shared"): v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused // 4 * 6272 + ax2_0_1_ax3_0_1_fused * 32 + ax0_ax1_fused // 32) v1 = T.axis.spatial(64, ax4_0_0 * 32 + ax0_ax1_fused % 32) T.reads(p0[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1]) T.writes(pad_temp_reindex_shared[v0, v1]) T.block_attr({"buffer_dim_align":[[0, 0, 32, 16]], "meta_schedule.cooperative_fetch":4}) pad_temp_reindex_shared[v0, v1] = p0[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1] for ax0_ax1_ax2_ax3_fused in T.serial(2048): with T.block("p1_reindex_shared"): v0 = T.axis.spatial(1, 0) v1 = T.axis.spatial(1, 0) v2 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 4 * 64 + ax0_ax1_ax2_ax3_fused // 32) v3 = T.axis.spatial(64, ax4_0_0 * 32 + ax0_ax1_ax2_ax3_fused % 32) T.reads(p1[v2, v0, v1, v3]) T.writes(p1_reindex_shared[v0, v1, v2, v3]) T.block_attr({"buffer_dim_align":[[0, 2, 32, 16]], "meta_schedule.cooperative_fetch":3}) p1_reindex_shared[v0, v1, v2, v3] = p1[v2, v0, v1, v3] for ax0_1, ax1_1, ax4_0_1 in T.grid(1, 1, 2): for ax0_0_1, ax1_0_1 in T.grid(1, 1): with T.block("pad_temp_reindex_shared_wmma.matrix_a_o"): v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 4 * 392 + ax2_0_1_ax3_0_1_fused * 2 + ax2_0_2_ax3_0_2_fused // 2) v1_o = T.axis.spatial(4, ax4_0_0 * 2 + ax4_0_1) T.reads(pad_temp_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16]) T.writes(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_s8_a_shared"}) for ax0_1_1, ax1_1_1 in T.grid(16, 16): with T.block("pad_temp_reindex_shared_wmma.matrix_a"): v0_i, v1_i = T.axis.remap("SS", [ax0_1_1, ax1_1_1]) T.reads(pad_temp_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = pad_temp_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0, ax1, ax2_0, ax3_0 in T.grid(1, 1, 2, 1): with T.block("p1_reindex_shared_wmma.matrix_b_o"): v0 = T.axis.spatial(1, 0) v1 = T.axis.spatial(1, 0) v2_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax2_0) v3_o = T.axis.spatial(4, ax4_0_0 * 2 + ax4_0_1) T.reads(p1_reindex_shared[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16]) T.writes(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize":"wmma_load_16x16x16_s8_b_trans_shared"}) for ax2_1, ax3_1 in T.grid(16, 16): with T.block("p1_reindex_shared_wmma.matrix_b"): v2_i, v3_i = T.axis.remap("SS", [ax2_1, ax3_1]) T.reads(p1_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i]) T.writes(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i]) p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i] = p1_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i] for ax2_0_3, ax3_0_3, ax0_2, ax1_2, ax4_0_2, ax2_0_4, ax3_0_4 in T.grid(1, 1, 1, 1, 1, 1, 2): with T.block("conv2d_nhwc_o"): v0 = T.axis.reduce(1, 0) v1 = T.axis.reduce(1, 0) v2_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 4 * 392 + ax2_0_1_ax3_0_1_fused * 2 + ax2_0_2_ax3_0_2_fused // 2 + ax2_0_3 + ax2_0_4) v3_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax3_0_3 * 2 + ax3_0_4) v4_o = T.axis.reduce(4, ax4_0_0 * 2 + ax4_0_1 + ax4_0_2) T.reads(pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 : v2_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16], p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 : v3_o * 16 + 16, v4_o * 16 : v4_o * 16 + 16]) T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 : v2_o * 16 + 16, v3_o * 16 : v3_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_s8s8s32_trans", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_s32", "meta_schedule.thread_extent_high_inclusive":1024, "meta_schedule.thread_extent_low_inclusive":32, "warp_execution":1}) with T.init(): for ax2_1, ax3_1 in T.grid(16, 16): with T.block("conv2d_nhwc_init"): v2_i_init, v3_i_init = T.axis.remap("SS", [ax2_1, ax3_1]) T.reads() T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init]) conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init] = 0 for ax2_1, ax3_1, ax4_1 in T.grid(16, 16, 16): with T.block("conv2d_nhwc"): v2_i, v3_i, v4_i = T.axis.remap("SSR", [ax2_1, ax3_1, ax4_1]) T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i], pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i], p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 + v3_i, v4_o * 16 + v4_i]) T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i]) T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"}) conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i] + T.cast(pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i], "int32") * T.cast(p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 + v3_i, v4_o * 16 + v4_i], "int32") for ax0_0, ax1_0 in T.grid(1, 2): with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator_o"): v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 4 * 392 + ax2_0_1_ax3_0_1_fused * 2 + ax2_0_2_ax3_0_2_fused // 2) v1_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax1_0) T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16]) T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize":"wmma_store_16x16x16_s32_shared"}) for ax0_1, ax1_1 in T.grid(16, 16): with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator"): v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0, ax1_0, ax1_1, ax1_2, ax1_3 in T.grid(32, 1, 4, 32, 2): with T.block("conv2d_nhwc_reindex_shared"): T.where(((ax1_0 * 4 + ax1_1) * 32 + ax1_2) * 2 + ax1_3 < 64) v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused // 4 * 6272 + ax2_0_1_ax3_0_1_fused * 32 + ax0) v1 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 4 * 64 + (ax1_0 * 256 + ax1_1 * 64 + ax1_2 * 2 + ax1_3)) T.reads(p7[()], conv2d_nhwc_reindex_shared[v0, v1], p2[0, 0, 0, v1], p3[0, 0, 0, v1], p4[v1], p5[v1], p6[v1], p8[0]) T.writes(compute_3[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1]) compute_3[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1] = T.q_multiply_shift(T.max(T.min(p7[()] + T.q_multiply_shift_per_axis(conv2d_nhwc_reindex_shared[v0, v1] - p2[0, 0, 0, v1] + p3[0, 0, 0, v1], p4[v1], p5[v1], p6[v1], 31, False, True, dtype="int32"), 255), 0) - p8[0], 1457846997, 31, 0, dtype="int32") for i0_12, i1_12, i2_12, i3_12 in T.grid(16, 56, 56, 256): with T.block("compute_4"): i0_13, i1_13, i2_13, i3_13 = T.axis.remap("SSSS", [i0_12, i1_12, i2_12, i3_12]) T.reads(compute_3[i0_13, i1_13, i2_13, i3_13], p9[i0_13, i1_13, i2_13, i3_13]) T.writes(compute[i0_13, i1_13, i2_13, i3_13]) compute[i0_13, i1_13, i2_13, i3_13] = T.max(T.min(compute_3[i0_13, i1_13, i2_13, i3_13] + T.q_multiply_shift(p9[i0_13, i1_13, i2_13, i3_13], 2101000910, 31, 0, dtype="int32"), 255), 0) @tvm.script.ir_module class Conv2dInt8_TensorCore_with_predicate_after: @T.prim_func def main(p0: T.Buffer((16, 56, 56, 64), "int8"), p1: T.Buffer((256, 1, 1, 64), "int8"), p2: T.Buffer((1, 1, 1, 256), "int32"), p3: T.Buffer((1, 1, 1, 256), "int32"), p4: T.Buffer((256,), "int32"), p5: T.Buffer((256,), "int32"), p6: T.Buffer((256,), "int32"), p7: T.Buffer((), "int32"), p8: T.Buffer((1,), "int32"), p9: T.Buffer((16, 56, 56, 256), "int32"), compute: T.Buffer((16, 56, 56, 256), "int32")): T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.unroll_explicit": 1024}) conv2d_nhwc_reindex_shared = T.alloc_buffer((50176, 256), "int32", scope="shared") conv2d_nhwc_reindex_shared_wmma_accumulator = T.alloc_buffer((50176, 256), "int32", scope="wmma.accumulator") pad_temp_reindex_shared = T.alloc_buffer((50176, 64), "int8", scope="shared") p1_reindex_shared = T.alloc_buffer((1, 1, 256, 64), "int8", scope="shared") pad_temp_reindex_shared_wmma_matrix_a = T.alloc_buffer((50176, 64), "int8", scope="wmma.matrix_a") p1_reindex_shared_wmma_matrix_b = T.alloc_buffer((1, 1, 256, 64), "int8", scope="wmma.matrix_b") for ax2_0_0_ax3_0_0_fused in T.thread_binding(32, thread="blockIdx.y"): for ax2_0_1_ax3_0_1_fused in T.thread_binding(196, thread="blockIdx.x"): for ax2_0_2_ax3_0_2_fused in T.thread_binding(4, thread="threadIdx.y"): for ax0_0, ax1_0, ax4_0_0 in T.grid(1, 1, 2): for ax0_ax1_fused in range(1024): with T.block("pad_temp_reindex_shared"): v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused // 4 * 6272 + ax2_0_1_ax3_0_1_fused * 32 + ax0_ax1_fused // 32) v1 = T.axis.spatial(64, ax4_0_0 * 32 + ax0_ax1_fused % 32) T.reads(p0[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1]) T.writes(pad_temp_reindex_shared[v0, v1]) T.block_attr({"buffer_dim_align": [[0, 0, 32, 16]], "meta_schedule.cooperative_fetch": 4}) pad_temp_reindex_shared[v0, v1] = p0[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1] for ax0_ax1_ax2_ax3_fused in range(2048): with T.block("p1_reindex_shared"): v0 = T.axis.spatial(1, 0) v1 = T.axis.spatial(1, 0) v2 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 4 * 64 + ax0_ax1_ax2_ax3_fused // 32) v3 = T.axis.spatial(64, ax4_0_0 * 32 + ax0_ax1_ax2_ax3_fused % 32) T.reads(p1[v2, v0, v1, v3]) T.writes(p1_reindex_shared[v0, v1, v2, v3]) T.block_attr({"buffer_dim_align": [[0, 2, 32, 16]], "meta_schedule.cooperative_fetch": 3}) p1_reindex_shared[v0, v1, v2, v3] = p1[v2, v0, v1, v3] for ax0_1, ax1_1, ax4_0_1 in T.grid(1, 1, 2): for ax0_0_1, ax1_0_1 in T.grid(1, 1): with T.block("pad_temp_reindex_shared_wmma.matrix_a_o"): v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 4 * 392 + ax2_0_1_ax3_0_1_fused * 2 + ax2_0_2_ax3_0_2_fused // 2) v1_o = T.axis.spatial(4, ax4_0_0 * 2 + ax4_0_1) T.reads(pad_temp_reindex_shared[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_load_16x16x16_s8_a_shared"}) for ax0_1_1, ax1_1_1 in T.grid(16, 16): with T.block("pad_temp_reindex_shared_wmma.matrix_a"): v0_i, v1_i = T.axis.remap("SS", [ax0_1_1, ax1_1_1]) T.reads(pad_temp_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) pad_temp_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = pad_temp_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0, ax1, ax2_0, ax3_0 in T.grid(1, 1, 2, 1): with T.block("p1_reindex_shared_wmma.matrix_b_o"): v0 = T.axis.spatial(1, 0) v1 = T.axis.spatial(1, 0) v2_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax2_0) v3_o = T.axis.spatial(4, ax4_0_0 * 2 + ax4_0_1) T.reads(p1_reindex_shared[v0, v1, v2_o * 16:v2_o * 16 + 16, v3_o * 16:v3_o * 16 + 16]) T.writes(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16:v2_o * 16 + 16, v3_o * 16:v3_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_load_16x16x16_s8_b_trans_shared"}) for ax2_1, ax3_1 in T.grid(16, 16): with T.block("p1_reindex_shared_wmma.matrix_b"): v2_i, v3_i = T.axis.remap("SS", [ax2_1, ax3_1]) T.reads(p1_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i]) T.writes(p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i]) p1_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i] = p1_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i] for ax2_0_3, ax3_0_3, ax0_2, ax1_2, ax4_0_2, ax2_0_4, ax3_0_4 in T.grid(1, 1, 1, 1, 1, 1, 2): with T.block("conv2d_nhwc_o"): v0 = T.axis.reduce(1, 0) v1 = T.axis.reduce(1, 0) v2_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 4 * 392 + ax2_0_1_ax3_0_1_fused * 2 + ax2_0_2_ax3_0_2_fused // 2 + ax2_0_3 + ax2_0_4) v3_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax3_0_3 * 2 + ax3_0_4) v4_o = T.axis.reduce(4, ax4_0_0 * 2 + ax4_0_1 + ax4_0_2) T.reads(pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16:v2_o * 16 + 16, v4_o * 16:v4_o * 16 + 16], p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16:v3_o * 16 + 16, v4_o * 16:v4_o * 16 + 16]) T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16:v2_o * 16 + 16, v3_o * 16:v3_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_sync_16x16x16_s8s8s32_trans", "meta_schedule.auto_tensorize_init": "wmma_fill_16x16x16_s32", "meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "warp_execution": 1}) with T.init(): for ax2_1, ax3_1 in T.grid(16, 16): with T.block("conv2d_nhwc_init"): v2_i_init, v3_i_init = T.axis.remap("SS", [ax2_1, ax3_1]) T.reads() T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init]) conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i_init, v3_o * 16 + v3_i_init] = 0 for ax2_1, ax3_1, ax4_1 in T.grid(16, 16, 16): with T.block("conv2d_nhwc"): v2_i, v3_i, v4_i = T.axis.remap("SSR", [ax2_1, ax3_1, ax4_1]) T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i], pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i], p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 + v3_i, v4_o * 16 + v4_i]) T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i]) T.block_attr({"meta_schedule.tiling_structure": "SSSRRSRS"}) conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o * 16 + v2_i, v3_o * 16 + v3_i] + T.Cast("int32", pad_temp_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i]) * T.Cast("int32", p1_reindex_shared_wmma_matrix_b[v0, v1, v3_o * 16 + v3_i, v4_o * 16 + v4_i]) for ax0_0, ax1_0 in T.grid(1, 2): with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator_o"): v0_o = T.axis.spatial(3136, ax2_0_0_ax3_0_0_fused // 4 * 392 + ax2_0_1_ax3_0_1_fused * 2 + ax2_0_2_ax3_0_2_fused // 2) v1_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused % 4 * 4 + ax2_0_2_ax3_0_2_fused % 2 * 2 + ax1_0) T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(conv2d_nhwc_reindex_shared[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_store_16x16x16_s32_shared"}) for ax0_1, ax1_1 in T.grid(16, 16): with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator"): v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) conv2d_nhwc_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0, ax1_0, ax1_1, ax1_2, ax1_3 in T.grid(32, 1, 4, 32, 2): with T.block("conv2d_nhwc_reindex_shared"): v0 = T.axis.spatial(50176, ax2_0_0_ax3_0_0_fused // 4 * 6272 + ax2_0_1_ax3_0_1_fused * 32 + ax0) v1 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused % 4 * 64 + (ax1_0 * 256 + ax1_1 * 64 + ax1_2 * 2 + ax1_3)) T.where(((ax1_0 * 4 + ax1_1) * 32 + ax1_2) * 2 + ax1_3 < 64) T.reads(p7[()], conv2d_nhwc_reindex_shared[v0, v1], p2[0, 0, 0, v1], p3[0, 0, 0, v1], p4[v1], p5[v1], p6[v1], p8[0], p9[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1]) T.writes(compute[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1]) compute[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1] = T.max(T.min(T.q_multiply_shift(T.max(T.min(p7[()] + T.q_multiply_shift_per_axis(conv2d_nhwc_reindex_shared[v0, v1] - p2[0, 0, 0, v1] + p3[0, 0, 0, v1], p4[v1], p5[v1], p6[v1], 31, T.bool(False), T.bool(True)), 255), 0) - p8[0], 1457846997, 31, 0) + T.q_multiply_shift(p9[v0 // 3136, v0 % 3136 // 56, v0 % 56, v1], 2101000910, 31, 0), 255), 0) # fmt: on # pylint: enable=no-member,invalid-name,unused-variable use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True}) def test_compute_inline_elementwise(use_block_name): sch = tir.Schedule(elementwise, debug_mask="all") block_b = "B" if use_block_name else sch.get_block("B") block_c = sch.get_block("C") sch.compute_inline(block_b) tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"]) assert sch.get(block_c).name_hint == "C" verify_trace_roundtrip(sch=sch, mod=elementwise) def test_compute_inline_under_loop(use_block_name): sch = tir.Schedule(elementwise_under_loop, debug_mask="all") block_b = "B" if use_block_name else sch.get_block("B") block_c = sch.get_block("C") sch.compute_inline(block_b) tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"]) assert sch.get(block_c).name_hint == "C" verify_trace_roundtrip(sch=sch, mod=elementwise_under_loop) def test_compute_inline_as_dce(use_block_name): sch = tir.Schedule(elementwise_standalone, debug_mask="all") block_b = "B" if use_block_name else sch.get_block("B") block_c = sch.get_block("C") sch.compute_inline(block_b) tvm.ir.assert_structural_equal(elementwise_standalone_dce, sch.mod["main"]) assert sch.get(block_c).name_hint == "C" verify_trace_roundtrip(sch=sch, mod=elementwise_standalone) def test_compute_inline_multi_consumer(use_block_name): sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mask="all") block_b = "B" if use_block_name else sch.get_block("B") block_c = sch.get_block("C") block_d = sch.get_block("D") sch.compute_inline(block_b) tvm.ir.assert_structural_equal(elementwise_multi_consumer_inlined, sch.mod["main"]) assert sch.get(block_c).name_hint == "C" assert sch.get(block_d).name_hint == "D" verify_trace_roundtrip(sch=sch, mod=elementwise_multi_producer_consumer) def test_compute_inline_fail_multi_writer(use_block_name): sch = tir.Schedule(fail_multi_reader_writer, debug_mask="all") block_b = "B" if use_block_name else sch.get_block("B") with pytest.raises(tvm.tir.ScheduleError): sch.compute_inline(block_b) def test_reverse_compute_inline_elementwise(use_block_name): sch = tir.Schedule(elementwise, debug_mask="all") block_b = sch.get_block("B") block_c = "C" if use_block_name else sch.get_block("C") sch.reverse_compute_inline(block_c) tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"]) assert sch.get(block_b).name_hint == "B" verify_trace_roundtrip(sch=sch, mod=elementwise) def test_reverse_compute_inline_under_loop(use_block_name): sch = tir.Schedule(elementwise_under_loop, debug_mask="all") block_b = sch.get_block("B") block_c = "C" if use_block_name else sch.get_block("C") sch.reverse_compute_inline(block_c) tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"]) assert sch.get(block_b).name_hint == "B" verify_trace_roundtrip(sch=sch, mod=elementwise_under_loop) def test_reverse_compute_inline_fail_as_dce(use_block_name): sch = tir.Schedule(elementwise_standalone, debug_mask="all") block_b = "B" if use_block_name else sch.get_block("B") with pytest.raises(tvm.tir.ScheduleError): sch.reverse_compute_inline(block_b) def test_reverse_compute_inline_fail_multi_producer(use_block_name): sch = tir.Schedule(elementwise_multi_producer_consumer, debug_mask="all") block_d = "D" if use_block_name else sch.get_block("D") with pytest.raises(tvm.tir.ScheduleError): sch.reverse_compute_inline(block_d) def test_reverse_compute_inline_fail_multi_reader(use_block_name): sch = tir.Schedule(fail_multi_reader_writer, debug_mask="all") block_c = "C" if use_block_name else sch.get_block("C") with pytest.raises(tvm.tir.ScheduleError): sch.reverse_compute_inline(block_c) def test_reverse_compute_multi_reverse_loads(use_block_name): sch = tir.Schedule(elementwise_multi_reverse_loads, debug_mask="all") block_c = "C" if use_block_name else sch.get_block("C") sch.reverse_compute_inline(block_c) tvm.ir.assert_structural_equal(elementwise_multi_reverse_loads_inlined, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise_multi_reverse_loads) def test_reverse_compute_inline_affine_load(use_block_name): sch = tir.Schedule(elementwise_reverse_affine_load, debug_mask="all") block_c = "C" if use_block_name else sch.get_block("C") sch.reverse_compute_inline(block_c) tvm.ir.assert_structural_equal(elementwise_reverse_affine_load_inlined, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise_reverse_affine_load) def test_reverse_compute_inline_multi_affine_load(use_block_name): sch = tir.Schedule(elementwise_multi_reverse_affine_load, debug_mask="all") block_c = "C" if use_block_name else sch.get_block("C") sch.reverse_compute_inline(block_c) tvm.ir.assert_structural_equal(elementwise_multi_reverse_affine_load_inlined, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise_multi_reverse_affine_load) def test_reverse_compute_inline_affine_load_unit_iter(use_block_name): sch = tir.Schedule(elementwise_reverse_affine_load_unit_iter, debug_mask="all") block_c = "C" if use_block_name else sch.get_block("C") sch.reverse_compute_inline(block_c) tvm.ir.assert_structural_equal( elementwise_reverse_affine_load_unit_iter_inlined, sch.mod["main"] ) verify_trace_roundtrip(sch=sch, mod=elementwise_reverse_affine_load_unit_iter) def test_reverse_compute_inline_affine_load_unit_iter_simplified(use_block_name): sch = tir.Schedule(elementwise_reverse_affine_load_unit_iter_simplified, debug_mask="all") block_c = "C" if use_block_name else sch.get_block("C") sch.reverse_compute_inline(block_c) tvm.ir.assert_structural_equal( elementwise_reverse_affine_load_unit_iter_simplified_inlined, sch.mod["main"] ) verify_trace_roundtrip(sch=sch, mod=elementwise_reverse_affine_load_unit_iter_simplified) @pytest.mark.parametrize("reverse_order", [True, False]) def test_reverse_compute_inline_affine_chain(use_block_name, reverse_order): sch = tir.Schedule(elementwise_reverse_affine_chain, debug_mask="all") block_c = "C" if use_block_name else sch.get_block("C") block_d = "D" if use_block_name else sch.get_block("D") if reverse_order: sch.reverse_compute_inline(block_d) sch.reverse_compute_inline(block_c) else: sch.reverse_compute_inline(block_c) sch.reverse_compute_inline(block_d) tvm.ir.assert_structural_equal(elementwise_reverse_affine_chain_inlined, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise_reverse_affine_chain) def test_reverse_compute_fail_non_affine_load(use_block_name): sch = tir.Schedule(elementwise_reverse_non_affine_load, debug_mask="all") block_c = "C" if use_block_name else sch.get_block("C") with pytest.raises(tvm.tir.ScheduleError): sch.reverse_compute_inline(block_c) def test_reverse_compute_fail_multi_reverse_loads(use_block_name): sch = tir.Schedule(elementwise_multi_loads, debug_mask="all") block_c = "C" if use_block_name else sch.get_block("C") with pytest.raises(tvm.tir.ScheduleError): sch.reverse_compute_inline(block_c) def test_opaque_access_load(use_block_name): sch = tir.Schedule(opaque_access_load, debug_mask="all") block_b = "B" if use_block_name else sch.get_block("B") with pytest.raises(tvm.tir.ScheduleError): sch.compute_inline(block_b) def test_opaque_access_store(use_block_name): sch = tir.Schedule(opaque_access_store, debug_mask="all") block_b = "B" if use_block_name else sch.get_block("B") with pytest.raises(tvm.tir.ScheduleError): sch.compute_inline(block_b) def test_buffer_matched(use_block_name): sch = tir.Schedule(buffer_matched, debug_mask="all") block_b = "B" if use_block_name else sch.get_block("B") with pytest.raises(tvm.tir.ScheduleError): sch.compute_inline(block_b) def test_output_block(use_block_name): sch = tir.Schedule(matmul_relu, debug_mask="all") block = sch.get_block("compute") with pytest.raises(tvm.tir.ScheduleError): sch.compute_inline(block) sch = tir.Schedule(elementwise_output, debug_mask="all") block = sch.get_block("B") with pytest.raises(tvm.tir.ScheduleError): sch.compute_inline(block) block = sch.get_block("C") with pytest.raises(tvm.tir.ScheduleError): sch.reverse_compute_inline(block) def test_compute_inline_predicate(use_block_name): sch = tir.Schedule(elementwise_predicate, debug_mask="all") block_b = "B" if use_block_name else sch.get_block("B") sch.compute_inline(block_b) tvm.ir.assert_structural_equal(elementwise_predicate_inlined, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise_predicate) def test_compute_inline_multi_loads(use_block_name): sch = tir.Schedule(elementwise_multi_loads, debug_mask="all") block_b = "B" if use_block_name else sch.get_block("B") sch.compute_inline(block_b) tvm.ir.assert_structural_equal(elementwise_multi_loads_inlined, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise_multi_loads) def test_compute_inline_with_opaque_access(use_block_name): """Test not rewrite opaque reads/writes after irrelavant compute inline""" sch = tir.Schedule(access_opaque_ptr_then_elemwise, debug_mask="all") BB = "BB" if use_block_name else sch.get_block("BB") sch.compute_inline(BB) tvm.ir.assert_structural_equal(access_opaque_ptr_then_elemwise_inline, sch.mod["main"]) def test_inline_block_with_init(): sch = tir.Schedule(inline_block_with_init, debug_mask="all") block = sch.get_block(name="tensor_rf", func_name="main") with pytest.raises(tvm.tir.ScheduleError): sch.compute_inline(block=block) def test_compute_inline_opaque_access_with_tvm_access_ptr(use_block_name): """Test opaque access with tvm_access_ptr after compute inline""" sch = tir.Schedule(exp_exp_opaque_access_with_tvm_access_ptr, debug_mask="all") compute = "compute" if use_block_name else sch.get_block("compute") sch.compute_inline(compute) tvm.ir.assert_structural_equal( exp_exp_opaque_access_with_tvm_access_ptr_inlined, sch.mod["main"] ) def test_reverse_compute_inline_overcomputed_producer(use_block_name): """Test reverse compute inline overcomputed producer""" sch = tir.Schedule(elementwise_overcomputed_producer, debug_mask="all") compute = "C" if use_block_name else sch.get_block("C") sch.reverse_compute_inline(compute) tvm.ir.assert_structural_equal( elementwise_overcomputed_producer_reverse_inlined, sch.mod["main"] ) def test_reverse_compute_inline_overcomputed_producer_simplify_predicate(use_block_name): """Test reverse compute inline overcomputed producer where the predicate should be simplified""" sch = tir.Schedule(elementwise_overcomputed_producer_simplify_predicate, debug_mask="all") compute = "C" if use_block_name else sch.get_block("C") sch.reverse_compute_inline(compute) tvm.ir.assert_structural_equal( elementwise_overcomputed_producer_simplify_predicate_reverse_inlined, sch.mod["main"] ) def test_reverse_compute_inline_overcomputed_producer_injective_load(use_block_name): """Test reverse compute inline overcomputed producer with injective buffer load""" sch = tir.Schedule(elementwise_overcomputed_producer_injective_load, debug_mask="all") compute = "C" if use_block_name else sch.get_block("C") sch.reverse_compute_inline(compute) tvm.ir.assert_structural_equal( elementwise_overcomputed_producer_injective_load_reverse_inlined, sch.mod["main"] ) def test_reverse_compute_inline_error_producer_not_cover_consumer(use_block_name): """Test reverse compute inline failure when the inlined block iter domains are not covered by its producer """ sch = tir.Schedule(elementwise_producer_not_cover_consumer, debug_mask="all") compute = "C" if use_block_name else sch.get_block("C") with pytest.raises(tvm.tir.ScheduleError): sch.reverse_compute_inline(compute) def test_reverse_compute_inline_producer_predicate_allowed(): """Test a case where reverse compute inline is allowed even though the producer has a non-trivial predicate. """ sch = tir.Schedule(elementwise_predicate_producer, debug_mask="all") sch.reverse_compute_inline(sch.get_block("C")) tvm.ir.assert_structural_equal(elementwise_predicate_producer_inlined, sch.mod["main"]) def test_reverse_compute_inline_producer_predicate_disallowed(): """Test reverse compute inline failure when the producer has a non-trivial predicate that cannot be implied by the synthesized predicate of the new inlined block. """ sch = tir.Schedule(Conv2dInt8_TensorCore_with_predicate_before, debug_mask="all") sch.reverse_compute_inline(sch.get_block("compute_4")) tvm.ir.assert_structural_equal( Conv2dInt8_TensorCore_with_predicate_after["main"], sch.mod["main"] ) def test_compute_inline_softmax(): # fmt: off @T.prim_func def before(p_lv44: T.handle, p_output0: T.handle): T.func_attr({"tir.noalias": T.bool(True)}) n, m = T.int64(), T.int64() lv44 = T.match_buffer(p_lv44, (T.int64(1), T.int64(32), n, m)) var_compute_intermediate = T.match_buffer(p_output0, (T.int64(1), T.int64(32), n, m), "float16") T_softmax_maxelem = T.alloc_buffer((T.int64(1), T.int64(32), n)) T_softmax_exp = T.alloc_buffer((T.int64(1), T.int64(32), n, m)) T_softmax_expsum = T.alloc_buffer((T.int64(1), T.int64(32), n)) var_T_softmax_norm_intermediate = T.alloc_buffer((T.int64(1), T.int64(32), n, m)) for i0, i1, i2, k in T.grid(T.int64(1), T.int64(32), n, m): with T.block("T_softmax_maxelem"): v_i0, v_i1, v_i2, v_k = T.axis.remap("SSSR", [i0, i1, i2, k]) T.reads(lv44[v_i0, v_i1, v_i2, v_k]) T.writes(T_softmax_maxelem[v_i0, v_i1, v_i2]) with T.init(): T_softmax_maxelem[v_i0, v_i1, v_i2] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem[v_i0, v_i1, v_i2] = T.max(T_softmax_maxelem[v_i0, v_i1, v_i2], lv44[v_i0, v_i1, v_i2, v_k]) for i0, i1, i2, i3 in T.grid(T.int64(1), T.int64(32), n, m): with T.block("T_softmax_exp"): v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(lv44[v_i0, v_i1, v_i2, v_i3], T_softmax_maxelem[v_i0, v_i1, v_i2]) T.writes(T_softmax_exp[v_i0, v_i1, v_i2, v_i3]) T_softmax_exp[v_i0, v_i1, v_i2, v_i3] = T.exp(lv44[v_i0, v_i1, v_i2, v_i3] - T_softmax_maxelem[v_i0, v_i1, v_i2]) for i0, i1, i2, k in T.grid(T.int64(1), T.int64(32), n, m): with T.block("T_softmax_expsum"): v_i0, v_i1, v_i2, v_k = T.axis.remap("SSSR", [i0, i1, i2, k]) T.reads(T_softmax_exp[v_i0, v_i1, v_i2, v_k]) T.writes(T_softmax_expsum[v_i0, v_i1, v_i2]) with T.init(): T_softmax_expsum[v_i0, v_i1, v_i2] = T.float32(0) T_softmax_expsum[v_i0, v_i1, v_i2] = T_softmax_expsum[v_i0, v_i1, v_i2] + T_softmax_exp[v_i0, v_i1, v_i2, v_k] for i0, i1, i2, i3 in T.grid(T.int64(1), T.int64(32), n, m): with T.block("T_softmax_norm"): v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(T_softmax_exp[v_i0, v_i1, v_i2, v_i3], T_softmax_expsum[v_i0, v_i1, v_i2]) T.writes(var_T_softmax_norm_intermediate[v_i0, v_i1, v_i2, v_i3]) T.block_attr({"axis": 3}) var_T_softmax_norm_intermediate[v_i0, v_i1, v_i2, v_i3] = T_softmax_exp[v_i0, v_i1, v_i2, v_i3] / T_softmax_expsum[v_i0, v_i1, v_i2] for i0, i1, i2, i3 in T.grid(T.int64(1), T.int64(32), n, m): with T.block("compute"): v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(var_T_softmax_norm_intermediate[v_i0, v_i1, v_i2, v_i3]) T.writes(var_compute_intermediate[v_i0, v_i1, v_i2, v_i3]) var_compute_intermediate[v_i0, v_i1, v_i2, v_i3] = T.Cast("float16", var_T_softmax_norm_intermediate[v_i0, v_i1, v_i2, v_i3]) @T.prim_func def after(p_lv44: T.handle, p_output0: T.handle): T.func_attr({"tir.noalias": T.bool(True)}) n, m = T.int64(), T.int64() lv44 = T.match_buffer(p_lv44, (T.int64(1), T.int64(32), n, m)) var_compute_intermediate = T.match_buffer(p_output0, (T.int64(1), T.int64(32), n, m), "float16") # with T.block("root"): T_softmax_maxelem = T.alloc_buffer((T.int64(1), T.int64(32), n)) T_softmax_expsum = T.alloc_buffer((T.int64(1), T.int64(32), n)) var_T_softmax_norm_intermediate = T.alloc_buffer((T.int64(1), T.int64(32), n, m)) for i0, i1, i2, k in T.grid(T.int64(1), T.int64(32), n, m): with T.block("T_softmax_maxelem"): v_i0, v_i1, v_i2, v_k = T.axis.remap("SSSR", [i0, i1, i2, k]) T.reads(lv44[v_i0, v_i1, v_i2, v_k]) T.writes(T_softmax_maxelem[v_i0, v_i1, v_i2]) with T.init(): T_softmax_maxelem[v_i0, v_i1, v_i2] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem[v_i0, v_i1, v_i2] = T.max(T_softmax_maxelem[v_i0, v_i1, v_i2], lv44[v_i0, v_i1, v_i2, v_k]) for i0, i1, i2, k in T.grid(T.int64(1), T.int64(32), n, m): with T.block("T_softmax_expsum"): v_i0, v_i1, v_i2, v_k = T.axis.remap("SSSR", [i0, i1, i2, k]) T.reads(lv44[v_i0, v_i1, v_i2, v_k], T_softmax_maxelem[v_i0, v_i1, v_i2]) T.writes(T_softmax_expsum[v_i0, v_i1, v_i2]) with T.init(): T_softmax_expsum[v_i0, v_i1, v_i2] = T.float32(0) T_softmax_expsum[v_i0, v_i1, v_i2] = T_softmax_expsum[v_i0, v_i1, v_i2] + T.exp(lv44[v_i0, v_i1, v_i2, v_k] - T_softmax_maxelem[v_i0, v_i1, v_i2]) for i0, i1, i2, i3 in T.grid(T.int64(1), T.int64(32), n, m): with T.block("T_softmax_norm"): v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(lv44[v_i0, v_i1, v_i2, v_i3], T_softmax_maxelem[v_i0, v_i1, v_i2], T_softmax_expsum[v_i0, v_i1, v_i2]) T.writes(var_T_softmax_norm_intermediate[v_i0, v_i1, v_i2, v_i3]) T.block_attr({"axis": 3}) var_T_softmax_norm_intermediate[v_i0, v_i1, v_i2, v_i3] = T.exp(lv44[v_i0, v_i1, v_i2, v_i3] - T_softmax_maxelem[v_i0, v_i1, v_i2]) / T_softmax_expsum[v_i0, v_i1, v_i2] for i0, i1, i2, i3 in T.grid(T.int64(1), T.int64(32), n, m): with T.block("compute"): v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(var_T_softmax_norm_intermediate[v_i0, v_i1, v_i2, v_i3]) T.writes(var_compute_intermediate[v_i0, v_i1, v_i2, v_i3]) var_compute_intermediate[v_i0, v_i1, v_i2, v_i3] = T.Cast("float16", var_T_softmax_norm_intermediate[v_i0, v_i1, v_i2, v_i3]) # fmt: on sch = tir.Schedule(before) sch.compute_inline(sch.get_block("T_softmax_exp")) tvm.ir.assert_structural_equal(after, sch.mod["main"]) def test_reverse_compute_inline_layer_norm(): # fmt: off @T.prim_func def before(p_lv6: T.handle, weight1: T.Buffer((T.int64(2560),), "float32"), bias: T.Buffer((T.int64(2560),), "float32"), p_output0: T.handle): T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) n = T.int64() lv6 = T.match_buffer(p_lv6, (T.int64(1), n, T.int64(2560))) var_compute_intermediate = T.match_buffer(p_output0, (T.int64(1), n, T.int64(2560)), "float16") A_red_temp_v0_shared = T.alloc_buffer((T.int64(1), n), scope="shared") A_red_temp_v1_shared = T.alloc_buffer((T.int64(1), n), scope="shared") var_T_layer_norm_intermediate = T.alloc_buffer((T.int64(1), n, T.int64(2560))) for ax0_ax1_fused in T.thread_binding(n, thread="blockIdx.x", annotations={"pragma_auto_unroll_max_step": 256, "pragma_unroll_explicit": 1}): for ax0, ax1, ax2_0 in T.grid(T.int64(1), T.int64(1), T.int64(10)): for ax2_1 in T.thread_binding(T.int64(256), thread="threadIdx.x"): with T.block("A_red_temp"): v_ax0 = T.axis.spatial(T.int64(1), ax0) v_ax1 = T.axis.spatial(n, ax0_ax1_fused + ax1) v_k2 = T.axis.reduce(T.int64(2560), ax2_0 * T.int64(256) + ax2_1) T.reads(lv6[v_ax0, v_ax1, v_k2]) T.writes(A_red_temp_v0_shared[v_ax0, v_ax1], A_red_temp_v1_shared[v_ax0, v_ax1]) with T.init(): A_red_temp_v0_shared[v_ax0, v_ax1] = T.float32(0) A_red_temp_v1_shared[v_ax0, v_ax1] = T.float32(0) v_A_red_temp_v0: T.float32 = A_red_temp_v0_shared[v_ax0, v_ax1] + lv6[v_ax0, v_ax1, v_k2] v_A_red_temp_v1: T.float32 = A_red_temp_v1_shared[v_ax0, v_ax1] + lv6[v_ax0, v_ax1, v_k2] * lv6[v_ax0, v_ax1, v_k2] A_red_temp_v0_shared[v_ax0, v_ax1] = v_A_red_temp_v0 A_red_temp_v1_shared[v_ax0, v_ax1] = v_A_red_temp_v1 for ax2_0 in range(T.int64(10)): for ax2_1 in T.thread_binding(T.int64(256), thread="threadIdx.x"): with T.block("T_layer_norm"): v_ax0 = T.axis.spatial(T.int64(1), T.int64(0)) v_ax1 = T.axis.spatial(n, ax0_ax1_fused) v_ax2 = T.axis.spatial(T.int64(2560), ax2_0 * T.int64(256) + ax2_1) T.reads(lv6[v_ax0, v_ax1, v_ax2], A_red_temp_v0_shared[v_ax0, v_ax1], A_red_temp_v1_shared[v_ax0, v_ax1], weight1[v_ax2], bias[v_ax2]) T.writes(var_T_layer_norm_intermediate[v_ax0, v_ax1, v_ax2]) var_T_layer_norm_intermediate[v_ax0, v_ax1, v_ax2] = (lv6[v_ax0, v_ax1, v_ax2] - A_red_temp_v0_shared[v_ax0, v_ax1] * T.float32(0.00039062500000000002)) * T.rsqrt(A_red_temp_v1_shared[v_ax0, v_ax1] * T.float32(0.00039062500000000002) - A_red_temp_v0_shared[v_ax0, v_ax1] * T.float32(0.00039062500000000002) * (A_red_temp_v0_shared[v_ax0, v_ax1] * T.float32(0.00039062500000000002)) + T.float32(1.0000000000000001e-05)) * weight1[v_ax2] + bias[v_ax2] for i0, i1, i2 in T.grid(T.int64(1), n, T.int64(2560)): with T.block("compute"): v_i0, v_i1, v_i2 = T.axis.remap("SSS", [i0, i1, i2]) T.reads(var_T_layer_norm_intermediate[v_i0, v_i1, v_i2]) T.writes(var_compute_intermediate[v_i0, v_i1, v_i2]) var_compute_intermediate[v_i0, v_i1, v_i2] = T.Cast("float16", var_T_layer_norm_intermediate[v_i0, v_i1, v_i2]) @T.prim_func def after(p_lv6: T.handle, weight1: T.Buffer((T.int64(2560),), "float32"), bias: T.Buffer((T.int64(2560),), "float32"), p_output0: T.handle): T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) n = T.int64() lv6 = T.match_buffer(p_lv6, (T.int64(1), n, T.int64(2560))) var_compute_intermediate = T.match_buffer(p_output0, (T.int64(1), n, T.int64(2560)), "float16") # with T.block("root"): A_red_temp_v0_shared = T.alloc_buffer((T.int64(1), n), scope="shared") A_red_temp_v1_shared = T.alloc_buffer((T.int64(1), n), scope="shared") for ax0_ax1_fused in T.thread_binding(n, thread="blockIdx.x", annotations={"pragma_auto_unroll_max_step": 256, "pragma_unroll_explicit": 1}): for ax0, ax1, ax2_0 in T.grid(T.int64(1), T.int64(1), T.int64(10)): for ax2_1 in T.thread_binding(T.int64(256), thread="threadIdx.x"): with T.block("A_red_temp"): v_ax0 = T.axis.spatial(T.int64(1), ax0) v_ax1 = T.axis.spatial(n, ax0_ax1_fused + ax1) v_k2 = T.axis.reduce(T.int64(2560), ax2_0 * T.int64(256) + ax2_1) T.reads(lv6[v_ax0, v_ax1, v_k2]) T.writes(A_red_temp_v0_shared[v_ax0, v_ax1], A_red_temp_v1_shared[v_ax0, v_ax1]) with T.init(): A_red_temp_v0_shared[v_ax0, v_ax1] = T.float32(0) A_red_temp_v1_shared[v_ax0, v_ax1] = T.float32(0) v_A_red_temp_v0: T.float32 = A_red_temp_v0_shared[v_ax0, v_ax1] + lv6[v_ax0, v_ax1, v_k2] v_A_red_temp_v1: T.float32 = A_red_temp_v1_shared[v_ax0, v_ax1] + lv6[v_ax0, v_ax1, v_k2] * lv6[v_ax0, v_ax1, v_k2] A_red_temp_v0_shared[v_ax0, v_ax1] = v_A_red_temp_v0 A_red_temp_v1_shared[v_ax0, v_ax1] = v_A_red_temp_v1 for ax2_0 in range(T.int64(10)): for ax2_1 in T.thread_binding(T.int64(256), thread="threadIdx.x"): with T.block("T_layer_norm"): v_ax0 = T.axis.spatial(T.int64(1), T.int64(0)) v_ax1 = T.axis.spatial(n, ax0_ax1_fused) v_ax2 = T.axis.spatial(T.int64(2560), ax2_0 * T.int64(256) + ax2_1) T.reads(lv6[v_ax0, v_ax1, v_ax2], A_red_temp_v0_shared[v_ax0, v_ax1], A_red_temp_v1_shared[v_ax0, v_ax1], weight1[v_ax2], bias[v_ax2]) T.writes(var_compute_intermediate[v_ax0, v_ax1, v_ax2]) var_compute_intermediate[v_ax0, v_ax1, v_ax2] = T.Cast("float16", (lv6[v_ax0, v_ax1, v_ax2] - A_red_temp_v0_shared[v_ax0, v_ax1] * T.float32(0.00039062500000000002)) * T.rsqrt(A_red_temp_v1_shared[v_ax0, v_ax1] * T.float32(0.00039062500000000002) - A_red_temp_v0_shared[v_ax0, v_ax1] * T.float32(0.00039062500000000002) * (A_red_temp_v0_shared[v_ax0, v_ax1] * T.float32(0.00039062500000000002)) + T.float32(1.0000000000000001e-05)) * weight1[v_ax2] + bias[v_ax2]) # fmt: on sch = tir.Schedule(before) sch.reverse_compute_inline(sch.get_block("compute")) tvm.ir.assert_structural_equal(after, sch.mod["main"]) if __name__ == "__main__": tvm.testing.main()
77,609
52.524138
487
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_unroll_loop.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te from tvm.script import tir as T import os def test_unroll_loop(): ib = tvm.tir.ir_builder.create() dtype = "int64" n = te.size_var("n") Ab = tvm.tir.decl_buffer((n,), dtype) Aptr = ib.buffer_ptr(Ab) # for i in 0 to n-1: with ib.for_range(n, n + 2, name="i") as i: with ib.for_range(0, 8, name="i", kind="unroll") as j: Aptr[j + 1] = Aptr[i] + 1 stmt = ib.get() mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt)) assert isinstance(stmt, tvm.tir.For) with tvm.transform.PassContext(config={"tir.UnrollLoop": {"auto_max_step": 16}}): ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body assert not isinstance(ret, tvm.tir.For) with tvm.transform.PassContext(config={"tir.UnrollLoop": {"auto_max_step": 15}}): ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body assert isinstance(ret, tvm.tir.For) with tvm.transform.PassContext( config={"tir.UnrollLoop": {"auto_max_step": 16, "explicit_unroll": False}} ): ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body assert isinstance(ret, tvm.tir.For) assert ret.kind == tvm.tir.ForKind.UNROLLED ib = tvm.tir.ir_builder.create() ib.scope_attr(tvm.tir.const(0, "int32"), "pragma_auto_unroll_max_step", 16) ib.emit(stmt) wrapped = ib.get() wrapped = tvm.tir.SeqStmt([wrapped, stmt]) assert isinstance(ret, tvm.tir.For) mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], wrapped)) with tvm.transform.PassContext( config={"tir.UnrollLoop": {"auto_max_depth": 8, "explicit_unroll": False}} ): ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body assert isinstance(ret[0], tvm.tir.For) assert ret[0].kind == tvm.tir.ForKind.UNROLLED assert isinstance(ret[1], tvm.tir.For) assert ret[1].kind != tvm.tir.ForKind.UNROLLED def test_unroll_fake_loop(): ib = tvm.tir.ir_builder.create() dtype = "int32" n = te.size_var("n") Ab = tvm.tir.decl_buffer((n,), dtype) Aptr = ib.buffer_ptr(Ab) # for i in 0 to n-1: with ib.for_range(0, 1, name="i") as i: Aptr[i * 2] = 3 with ib.for_range(0, 10, name="j") as j: Aptr[j + 1] = Aptr[i] + 1 stmt = ib.get() mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt)) with tvm.transform.PassContext( config={ "tir.UnrollLoop": {"auto_max_depth": 8, "auto_max_extent": 1, "explicit_unroll": False} } ): ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body assert isinstance(ret[0], tvm.tir.BufferStore) def test_unroll_single_count_loops(): n = te.size_var("n") A = te.placeholder((n,), name="A") B = te.compute((n,), lambda *i: A(*i), name="B") s = te.create_schedule(B.op) s = s.normalize() dom_map = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, dom_map) # all parameters to UnrolLoops are default values except for # auto_unroll_max_extent which has been set to 1 (default:0) mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([], stmt)) with tvm.transform.PassContext(config={"tir.UnrollLoop": {"auto_max_step": 1}}): ret = tvm.tir.transform.UnrollLoop()(mod)["main"].body assert ret == stmt def test_unroll_allocations(): @tvm.script.ir_module class before: @T.prim_func def main(): for i in T.unroll(2): with T.decl_buffer([16], "float32") as buf: buf[0] = 0.0 @tvm.script.ir_module class expected: @T.prim_func def main(): with T.decl_buffer([16], "float32") as buf1: buf1[0] = 0.0 with T.decl_buffer([16], "float32") as buf2: buf2[0] = 0.0 after = tvm.tir.transform.UnrollLoop()(before) tvm.ir.assert_structural_equal(after, expected) def test_unroll_local_access(): @tvm.script.ir_module class Before: @T.prim_func def main(B: T.Buffer((64,), "float32")): for bx in T.thread_binding(4, thread="blockIdx.x"): for tx in T.thread_binding(4, thread="threadIdx.x"): A_local_data = T.allocate([4], dtype="float32", scope="local") A_local = T.Buffer([4], dtype="float32", data=A_local_data) for i in T.serial(4): A_local[i] = T.float32(i) @tvm.script.ir_module class Expected: @T.prim_func def main(B: T.Buffer((64,), "float32")): for bx in T.thread_binding(4, thread="blockIdx.x"): for tx in T.thread_binding(4, thread="threadIdx.x"): A_local_data = T.allocate([4], dtype="float32", scope="local") A_local = T.Buffer([4], dtype="float32", data=A_local_data) A_local[0] = T.float32(0) A_local[1] = T.float32(1) A_local[2] = T.float32(2) A_local[3] = T.float32(3) with tvm.transform.PassContext( config={ "tir.UnrollLoop": { "auto_max_depth": 0, "auto_max_extent": 1, "explicit_unroll": True, "unroll_local_access": True, } } ): after = tvm.tir.transform.UnrollLoop()(Before) after = tvm.tir.transform.Simplify()(after) tvm.ir.assert_structural_equal(after, Expected) if __name__ == "__main__": test_unroll_local_access() test_unroll_loop() test_unroll_fake_loop() test_unroll_single_count_loops() test_unroll_allocations()
6,493
34.293478
99
py
tvm
tvm-main/tests/python/unittest/test_crt.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test C runtime""" import pathlib import pytest import numpy as np import tvm import tvm.relay import tvm.testing from tvm.target import Target from tvm.relay.backend import Runtime from tvm.relay.backend import Executor pytest.importorskip("pty") BUILD = True DEBUG = False TARGET = tvm.target.target.micro("host") def _make_sess_from_op(temp_dir, op_name, sched, arg_bufs): runtime = Runtime("crt", {"system-lib": True}) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): mod = tvm.build(sched, arg_bufs, Target(TARGET, TARGET), runtime=runtime, name=op_name) return _make_session(temp_dir, mod) def _make_session(temp_dir, mod): template_project_dir = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt")) project = tvm.micro.generate_project( template_project_dir, mod, temp_dir / "project", {"verbose": 1} ) project.build() project.flash() return tvm.micro.Session(project.transport()) def _make_add_sess(temp_dir): a = tvm.te.placeholder((2,), dtype="int8") b = tvm.te.placeholder((1,), dtype="int8") c = tvm.te.compute(a.shape, lambda i: a[i] + b[0], name="c") sched = tvm.te.create_schedule(c.op) return _make_sess_from_op(temp_dir, "add", sched, [a, b, c]) @tvm.testing.requires_micro def test_compile_runtime(): """Test compiling the on-device runtime.""" temp_dir = tvm.contrib.utils.tempdir() with _make_add_sess(temp_dir) as sess: a_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device) assert (a_data.numpy() == np.array([2, 3])).all() b_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device) assert (b_data.numpy() == np.array([4])).all() c_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device) assert (c_data.numpy() == np.array([0, 0])).all() system_lib = sess.get_system_lib() system_lib.get_function("add")(a_data, b_data, c_data) assert (c_data.numpy() == np.array([6, 7])).all() @tvm.testing.requires_micro def test_compile_runtime_llvm(): """Test targeting the on-device runtime with the llvm backend.""" global TARGET old_target = TARGET try: # NOTE: test_compile_runtime uses the "c" backend--re run it using the llvm backend. target_str = str(TARGET) assert target_str.startswith("c ") TARGET = tvm.target.Target("llvm " + str(TARGET)[len("c ") :]) test_compile_runtime() finally: TARGET = old_target @tvm.testing.requires_micro def test_reset(): """Test when the remote end resets during a session.""" temp_dir = tvm.contrib.utils.tempdir() with _make_add_sess(temp_dir) as sess: try: sess._rpc.get_function("tvm.testing.reset_server")() assert False, "expected to raise SessionTerminatedError; did not raise" except tvm.micro.SessionTerminatedError: pass @tvm.testing.requires_micro def test_graph_executor(): """Test use of the graph executor with microTVM.""" temp_dir = tvm.contrib.utils.tempdir() relay_mod = tvm.relay.fromtext( """ #[version = "0.0.5"] def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), uint8]) { %0 = %a + %b; %0 }""" ) runtime = Runtime("crt", {"system-lib": True}) with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): factory = tvm.relay.build(relay_mod, target=TARGET, runtime=runtime) def do_test(graph_mod): a_data = tvm.nd.array(np.array([2, 3], dtype="uint8"), device=sess.device) assert (a_data.numpy() == np.array([2, 3])).all() b_data = tvm.nd.array(np.array([4, 7], dtype="uint8"), device=sess.device) assert (b_data.numpy() == np.array([4, 7])).all() assert graph_mod.get_input_index("a") == 0 assert graph_mod.get_input_index("b") == 1 graph_mod.run(a=a_data, b=b_data) out = graph_mod.get_output(0) assert (out.numpy() == np.array([6, 10])).all() with _make_session(temp_dir, factory) as sess: graph_mod_local = tvm.micro.create_local_graph_executor( factory.get_graph_json(), sess.get_system_lib(), sess.device ) do_test(graph_mod_local) graph_mod = tvm.contrib.graph_executor.create( factory.get_graph_json(), sess.get_system_lib(), sess.device ) do_test(graph_mod) @tvm.testing.requires_micro def test_aot_executor(): """Test use of the AOT executor with microTVM.""" temp_dir = tvm.contrib.utils.tempdir() relay_mod = tvm.relay.fromtext( """ #[version = "0.0.5"] def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), uint8]) { %0 = %a + %b; %0 }""" ) runtime = Runtime("crt", {"system-lib": True}) executor = Executor("aot") with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): factory = tvm.relay.build(relay_mod, target=TARGET, runtime=runtime, executor=executor) def do_test(): aot_executor = tvm.micro.create_local_aot_executor(sess) assert aot_executor.get_input_index("a") == 0 assert aot_executor.get_input_index("b") == 1 assert aot_executor.get_input_name(0) == "a" assert aot_executor.get_input_name(1) == "b" shape_dict, dtype_dict = aot_executor.get_input_info() assert shape_dict == {"a": (1, 2), "b": (1, 2)} assert dtype_dict == {"a": "uint8", "b": "uint8"} assert aot_executor.get_num_inputs() == 2 assert aot_executor.get_num_outputs() == 1 a_np = np.array([[2, 3]], dtype="uint8") b_np = np.array([[4, 7]], dtype="uint8") aot_executor.get_input("a").copyfrom(a_np) b_data = aot_executor.get_input("b").copyfrom(b_np) aot_executor.run() out = aot_executor.get_output(0) assert (out.numpy() == np.array([6, 10])).all() b_np_new = np.array([[5, 8]]) aot_executor.set_input("b", b_np_new) assert (b_data.numpy() == b_np_new).all() with _make_session(temp_dir, factory) as sess: do_test() @tvm.testing.requires_micro def test_aot_executor_usmp_const_pool(): """Test the AOT executor with microTVM using USMP to generate a constant data pool.""" temp_dir = tvm.contrib.utils.tempdir() relay_mod = tvm.relay.fromtext( """ #[version = "0.0.5"] def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), uint8], %c : Tensor[(1,2), uint8]) { %0 = %a + %b; %1 = %0 + %c; %1 }""" ) runtime = Runtime("crt", {"system-lib": True}) executor = Executor("aot") main_func = relay_mod["main"] type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params} c_np = np.array([[8, 9]], dtype="uint8").astype(type_dict["c"]) params = {"c": c_np} with tvm.transform.PassContext( opt_level=3, config={"tir.disable_vectorize": True, "tir.usmp.enable": True} ): factory = tvm.relay.build( relay_mod, target=TARGET, runtime=runtime, executor=executor, params=params, ) def do_test(): try: aot_executor = tvm.micro.create_local_aot_executor(sess) except tvm._ffi.base.TVMError as excpt: raise excpt assert aot_executor.get_input_index("a") == 0 assert aot_executor.get_input_index("b") == 1 assert aot_executor.get_num_inputs() == 2 assert aot_executor.get_num_outputs() == 1 a_np = np.array([[2, 3]], dtype="uint8") b_np = np.array([[4, 7]], dtype="uint8") aot_executor.get_input("a").copyfrom(a_np) b_data = aot_executor.get_input("b").copyfrom(b_np) aot_executor.run() out = aot_executor.get_output(0) assert (out.numpy() == np.array([14, 19])).all() b_np_new = np.array([[5, 8]]) aot_executor.set_input("b", b_np_new) assert (b_data.numpy() == b_np_new).all() with _make_session(temp_dir, factory) as sess: do_test() @tvm.testing.requires_micro def test_std_math_functions(): """Verify that standard math functions can be used.""" temp_dir = tvm.contrib.utils.tempdir() with _make_add_sess(temp_dir) as sess: a_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device) assert (a_data.numpy() == np.array([2, 3])).all() b_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device) assert (b_data.numpy() == np.array([4])).all() c_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device) assert (c_data.numpy() == np.array([0, 0])).all() system_lib = sess.get_system_lib() system_lib.get_function("add")(a_data, b_data, c_data) temp_dir = tvm.contrib.utils.tempdir() a = tvm.te.placeholder((2,), dtype="float32", name="a") b = tvm.te.compute(a.shape, lambda i: tvm.te.exp(a[i]), name="b") s = tvm.te.create_schedule(b.op) with _make_sess_from_op(temp_dir, "myexpf", s, [a, b]) as sess: a_data = tvm.nd.array(np.array([2.0, 3.0], dtype="float32"), device=sess.device) b_data = tvm.nd.array(np.array([2.0, 3.0], dtype="float32"), device=sess.device) lib = sess.get_system_lib() func = lib["myexpf"] func(a_data, b_data) np.testing.assert_allclose(b_data.numpy(), np.array([7.389056, 20.085537])) @tvm.testing.requires_micro def test_platform_timer(): """Verify the platform timer can be used to time remote functions.""" temp_dir = tvm.contrib.utils.tempdir() a = tvm.te.placeholder((2,), dtype="float32", name="a") b = tvm.te.compute(a.shape, lambda i: tvm.te.exp(a[i]), name="b") s = tvm.te.create_schedule(b.op) with _make_sess_from_op(temp_dir, "myexpf", s, [a, b]) as sess: a_data = tvm.nd.array(np.array([2.0, 3.0], dtype="float32"), device=sess.device) b_data = tvm.nd.array(np.array([2.0, 3.0], dtype="float32"), device=sess.device) lib = sess.get_system_lib() time_eval_f = lib.time_evaluator( "myexpf", sess.device, number=2000, repeat=3, min_repeat_ms=40 ) result = time_eval_f(a_data, b_data) assert result.mean > 0 assert len(result.results) == 3 @tvm.testing.requires_micro def test_autotune(): """Verify that autotune works with micro.""" runtime = Runtime("crt", {"system-lib": True}) data = tvm.relay.var("data", tvm.relay.TensorType((1, 3, 64, 64), "float32")) weight = tvm.relay.var("weight", tvm.relay.TensorType((8, 3, 5, 5), "float32")) y = tvm.relay.nn.conv2d( data, weight, padding=(2, 2), kernel_size=(5, 5), kernel_layout="OIHW", out_dtype="float32", ) f = tvm.relay.Function([data, weight], y) mod = tvm.IRModule.from_expr(f) mod = tvm.relay.transform.InferType()(mod) main_func = mod["main"] shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params} type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params} weight_data = np.ones(shape_dict["weight"]).astype(type_dict["weight"]) input_data = np.ones(shape_dict["data"]).astype(type_dict["data"]) params = {"weight": weight_data} inputs = {"data": input_data} target = tvm.target.target.micro("host") template_project_dir = pathlib.Path(tvm.micro.get_microtvm_template_projects("crt")) pass_context = tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}) with pass_context: tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target) assert len(tasks) > 0 module_loader = tvm.micro.AutoTvmModuleLoader( template_project_dir=template_project_dir, project_options={}, ) builder = tvm.autotvm.LocalBuilder( n_parallel=1, build_kwargs={"build_option": {"tir.disable_vectorize": True}}, do_fork=True, build_func=tvm.micro.autotvm_build_func, runtime=runtime, ) runner = tvm.autotvm.LocalRunner(number=1, repeat=1, module_loader=module_loader) measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner) tune_log_file = pathlib.Path("crt_autotune.log") if tune_log_file.exists(): tune_log_file.unlink() num_trials = 10 for task in tasks: tuner = tvm.autotvm.tuner.GATuner(task) tuner.tune( n_trial=num_trials, measure_option=measure_option, callbacks=[ tvm.autotvm.callback.log_to_file(str(tune_log_file)), tvm.autotvm.callback.progress_bar(num_trials, si_prefix="M"), ], si_prefix="M", ) assert tuner.best_flops > 0 # TODO(mehrdadh): commented due to autotuning errors # check_tune_log(tune_log_file) # Build without tuning with pass_context: lowered = tvm.relay.build(mod, target=TARGET, runtime=runtime, params=params) temp_dir = tvm.contrib.utils.tempdir() with _make_session(temp_dir, lowered) as sess: graph_mod = tvm.micro.create_local_graph_executor( lowered.get_graph_json(), sess.get_system_lib(), sess.device ) graph_mod.set_input(**lowered.get_params()) graph_mod.run(**inputs) expected_output = graph_mod.get_output(0).numpy() del graph_mod # Build using autotune logs with tvm.autotvm.apply_history_best(str(tune_log_file)): with pass_context: lowered_tuned = tvm.relay.build(mod, target=target, runtime=runtime, params=params) temp_dir = tvm.contrib.utils.tempdir() with _make_session(temp_dir, lowered_tuned) as sess: graph_mod = tvm.micro.create_local_graph_executor( lowered_tuned.get_graph_json(), sess.get_system_lib(), sess.device ) graph_mod.set_input(**lowered_tuned.get_params()) graph_mod.run(**inputs) output = graph_mod.get_output(0).numpy() del graph_mod tvm.testing.assert_allclose(output, expected_output, rtol=1e-4, atol=1e-5) if __name__ == "__main__": tvm.testing.main()
15,168
33.553531
100
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_relay_tir_compute.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import tvm import tvm.testing import tvm.topi.testing from tvm import autotvm from tvm import meta_schedule as ms from tvm import relay, te from tvm.relay.testing.temp_op_attr import TempOpAttr from tvm.script import tir as T def compute_tir_conv2d_nchw_oihw(data_shape, weight_shape, dtype): assert dtype == "float32" OC, IC, FH, FW = weight_shape padding = (0, 0, 0, 0) strides = (1, 1) dilation = (1, 1) output_shape = ( data_shape[0], weight_shape[0], (data_shape[2] - ((weight_shape[2] - 1) * dilation[0] + 1) + padding[0] + padding[1]) // strides[0] + 1, (data_shape[3] - ((weight_shape[3] - 1) * dilation[1] + 1) + padding[2] + padding[3]) // strides[1] + 1, ) N, K, BH, BW = output_shape # fmt: off @T.prim_func def conv2d(a: T.handle, filt: T.handle, b: T.handle) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": True}) A = T.match_buffer(a, data_shape, dtype=dtype) Filter = T.match_buffer(filt, weight_shape, dtype=dtype) B = T.match_buffer(b, output_shape, dtype=dtype) for n, k, bh, bw in T.grid(N, K, BH, BW): with T.block("init"): vn, vk, vbh, vbw = T.axis.remap("SSSS", [n, k, bh, bw]) B[vn, vk, vbh, vbw] = T.float32(0) for ic, fh, fw in T.grid(IC, FH, FW): with T.block("update"): vn, vk, vbh, vbw, vc, vfh, vfw = T.axis.remap("SSSSRRR", [n, k, bh, bw, ic, fh, fw]) B[vn, vk, vbh, vbw] = B[vn, vk, vbh, vbw] + A[vn, vc, vbh + vfh, vbw + vfw] * Filter[vk, vc, vfh, vfw] # fmt: on return conv2d def schedule_tir_conv2d_nchw_oihw(sch): update_block = sch.get_block("update") vn, vk, vbh, vbw, vc, vfh, vfw = sch.get_loops(update_block) sch.split(vk, factors=(None, 32)) @autotvm.register_topi_compute("test/conv2d_1") def _compute_conv2d_1(cfg, input, filter, strides, padding, dilation, out_dtype): prim_func = compute_tir_conv2d_nchw_oihw(input.shape, filter.shape, input.dtype) output = te.extern_primfunc([input, filter], prim_func, name="tir") return output @autotvm.register_topi_schedule("test/conv2d_1") def _schedule_conv2d_1(cfg, outs): s = te.create_schedule([x.op for x in outs]) return s @tvm.target.override_native_generic_func("test_conv2d_strategy") def _tmp_strategy(attrs, inputs, out_type, target): strategy = relay.op.OpStrategy() if attrs.groups == 1 and attrs.data_layout == "NCHW" and attrs.kernel_layout == "OIHW": strategy.add_implementation( relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_1), relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_1), name="conv2d_2", plevel=15, ) else: raise ValueError("No valid strategy found") return strategy def get_conv2d(data_shape, weight_shape, **kwargs): data = relay.var("data", shape=data_shape, dtype="float32") weight = relay.var("weight", shape=weight_shape, dtype="float32") conv2d = relay.nn.conv2d( data, weight, **kwargs, ) return relay.Function([data, weight], conv2d) def get_ref(data, weight, stride, padding): return tvm.topi.testing.conv2d_nchw_python(data, weight, stride, padding) def test_conv2d(): N, IC, H, W = 1, 64, 56, 56 OC, IC, FH, FW = 128, 64, 3, 3 data_shape = (N, IC, H, W) weight_shape = (OC, IC, FH, FW) padding = (0, 0) strides = (1, 1) relay_mod = tvm.IRModule.from_expr( get_conv2d( data_shape, weight_shape, padding=padding, strides=strides, channels=OC, kernel_size=(FH, FW), data_layout="NCHW", kernel_layout="OIHW", ) ) data_np = np.random.randn(*data_shape).astype("float32") weight_np = np.random.randn(*weight_shape).astype("float32") target = "llvm" params = {"weight": weight_np} def schedule_fn(sch): if "nn_conv2d" in sch.mod.attrs["task_name"]: schedule_tir_conv2d_nchw_oihw(sch) return True return False with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy): with ms.database.ScheduleFnDatabase(schedule_fn), tvm.transform.PassContext( opt_level=3, config={ "relay.backend.use_meta_schedule": True, "relay.backend.tir_converter": "allow_extern", }, ): lib = relay.build(relay_mod, target=target, params=params) dev = tvm.device(target, 0) runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev)) runtime.set_input("data", data_np) runtime.run() out = runtime.get_output(0).numpy() ref = get_ref(data_np, weight_np, strides, padding) tvm.testing.assert_allclose(out, ref, atol=1e-4, rtol=1e-4) if __name__ == "__main__": test_conv2d()
5,812
32.217143
122
py
tvm
tvm-main/tests/python/unittest/test_tir_schedule_set_axis_separator.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import pytest import tvm import tvm.testing from tvm import tir from tvm.tir import IndexMap from tvm.script import tir as T from tvm.tir.schedule.testing import verify_trace_roundtrip # fmt: off # pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg @T.prim_func def element_wise(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None: B = T.alloc_buffer((128, 128), dtype="float32") for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def element_wise_set_axis_separator(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None: B = T.alloc_buffer([128, 128], dtype="float32", axis_separators=[1]) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * T.float32(2) for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + T.float32(1) @T.prim_func def element_wise_set_axis_separator_input_buffer(A: T.Buffer(shape=(128, 128), dtype="float32", axis_separators=(1,)), C: T.Buffer((128, 128), "float32")) -> None: B = T.alloc_buffer([128, 128], dtype="float32") for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * T.float32(2) for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + T.float32(1) @T.prim_func def element_wise_subregion_match(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None: B = T.alloc_buffer((128, 128), dtype="float32") for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B_subregion0 = T.match_buffer(B[vi, vj], [], offset_factor=1) B_subregion0[()] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) B_subregion1 = T.match_buffer(B[vi, vj], [], offset_factor=1) C[vi, vj] = B_subregion1[()] + 1.0 @T.prim_func def element_wise_subregion_match_set_axis_separator(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None: B = T.alloc_buffer([128, 128], dtype="float32", axis_separators=[1]) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B_subregion0 = T.match_buffer(B[vi, vj], [], dtype="float32", offset_factor=1, axis_separators=[1]) B_subregion0[()] = A[vi, vj] * T.float32(2) for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) B_subregion1 = T.match_buffer(B[vi, vj], [], dtype="float32", offset_factor=1, axis_separators=[1]) C[vi, vj] = B_subregion1[()] + T.float32(1) # pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg argument_style = tvm.testing.parameter('set_axis_separators', 'transform_layout_named', 'transform_layout_buffer_object', ) def test_set_axis_separator(argument_style): func = element_wise s = tir.Schedule(func, debug_mask='all') if argument_style=='set_axis_separators': s.set_axis_separator(s.get_block("B"), ("write",0), [1]) elif argument_style=='transform_layout_named': s.transform_layout(block='B', buffer='B', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j]) elif argument_style =='transform_layout_buffer_object': B = s.get(s.get_block('B')).writes[0].buffer s.transform_layout(block='B', buffer=B, index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j]) else: raise ValueError(f'Unexpected argument_style: {argument_style}') tvm.ir.assert_structural_equal(element_wise_set_axis_separator, s.mod["main"]) verify_trace_roundtrip(sch=s, mod=func) def test_set_scope_fail_on_index_out_of_bound(): func = element_wise s = tir.Schedule(func, debug_mask='all') with pytest.raises(AssertionError): s.set_axis_separator(s.get_block("B"), ("write",1),[1]) with pytest.raises(AssertionError): s.set_axis_separator(s.get_block("B"), ("read",-1),[1]) def test_set_axis_separator_input_buffer(argument_style): func = element_wise s = tir.Schedule(func, debug_mask='all') if argument_style=='set_axis_separators': s.set_axis_separator(s.get_block("B"), ("read",0), [1]) elif argument_style=='transform_layout_named': s.transform_layout(block='B', buffer='A', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j]) elif argument_style =='transform_layout_buffer_object': A = s.get(s.get_block('B')).reads[0].buffer s.transform_layout(block='B', buffer=A, index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j]) else: raise ValueError(f'Unexpected argument_style: {argument_style}') tvm.ir.assert_structural_equal(element_wise_set_axis_separator_input_buffer, s.mod["main"]) verify_trace_roundtrip(sch=s, mod=func) def test_set_axis_separator_subregion(argument_style): func = element_wise_subregion_match s = tir.Schedule(func, debug_mask='all') if argument_style=='set_axis_separators': s.set_axis_separator(s.get_block("B"), ("write",0), [1]) elif argument_style=='transform_layout_named': s.transform_layout(block='B', buffer='B', index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j]) elif argument_style =='transform_layout_buffer_object': B = s.get(s.get_block('B')).writes[0].buffer s.transform_layout(block='B', buffer=B, index_map=lambda i,j: [i,IndexMap.AXIS_SEPARATOR,j]) else: raise ValueError(f'Unexpected argument_style: {argument_style}') tvm.ir.assert_structural_equal(element_wise_subregion_match_set_axis_separator, s.mod["main"]) verify_trace_roundtrip(sch=s, mod=func) class TestIndexedLookup(tvm.testing.CompareBeforeAfter): def transform(self): def func(mod): sch = tir.Schedule(mod) sch.set_axis_separator('block', 'B', [1]) return sch.mod return func @T.prim_func def before(): A = T.alloc_buffer([4,4], dtype="int32") B = T.alloc_buffer([1,1], dtype="int32") for j in T.serial(4): with T.block('block'): A[B[0,0],j] = 0 @T.prim_func def expected(): A = T.alloc_buffer([4,4], dtype="int32") B = T.alloc_buffer([1,1], dtype="int32", axis_separators=[1]) for j in T.serial(4): with T.block('block'): A[B[0,0],j] = 0 if __name__ == "__main__": tvm.testing.main()
7,999
38.800995
163
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_manifest_shared_memory_local_stage.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm.script import tir as T # fmt: off # pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks @tvm.script.ir_module class MatmulBefore: @T.prim_func def main(A: T.Buffer((1024, 1024), "float32"), B: T.Buffer((1024, 1024), "float32"), C: T.Buffer((1024, 1024), "float32")) -> None: # function attr dict T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) # body # with T.block("root") for blockIdx_y in T.thread_binding(32, thread="blockIdx.y"): for blockIdx_x in T.thread_binding(32, thread="blockIdx.x"): for threadIdx_y in T.thread_binding(2, thread="threadIdx.y"): for threadIdx_x in T.thread_binding(2, thread="threadIdx.x"): for k_0 in T.serial(32): with T.block(): T.reads(A[blockIdx_y * 32 : blockIdx_y * 32 + 32, k_0 * 32 : k_0 * 32 + 32], B[k_0 * 32 : k_0 * 32 + 32, blockIdx_x * 32 : blockIdx_x * 32 + 32]) T.writes(C[blockIdx_y * 32 : blockIdx_y * 32 + 32, blockIdx_x * 32 : blockIdx_x * 32 + 32]) A_shared = T.alloc_buffer([1024, 1024], dtype="float32", scope="shared") B_shared = T.alloc_buffer([1024, 1024], dtype="float32", scope="shared") for ax0_ax1_fused_0 in T.serial(64): for ax0_ax1_fused_3 in T.vectorized(4): with T.block("A_shared"): T.reads(A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]) T.writes(A_shared[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]) T.block_attr({"tir.manifest_shared_memory_local_stage":1}) A_shared[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32] = A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32] for ax0_ax1_fused_0 in T.serial(64): for ax0_ax1_fused_3 in T.vectorized(4): with T.block("B_shared"): T.reads(B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]) T.writes(B_shared[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]) T.block_attr({"tir.manifest_shared_memory_local_stage":1}) B_shared[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32] = B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32] for k_1, i_2, j_2, k_2 in T.grid(2, 16, 16, 16): with T.block("C"): T.reads(A_shared[blockIdx_y * 32 + threadIdx_y * 16 + i_2, k_0 * 32 + k_1 * 16 + k_2], B_shared[k_0 * 32 + k_1 * 16 + k_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2]) T.writes(C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2]) if k_0 * 32 + k_1 * 16 + k_2 == 0: C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] = T.float32(0) C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] = C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] + A_shared[blockIdx_y * 32 + threadIdx_y * 16 + i_2, k_0 * 32 + k_1 * 16 + k_2] * B_shared[k_0 * 32 + k_1 * 16 + k_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] @tvm.script.ir_module class MatmulAfter: @T.prim_func def main(A: T.Buffer((1024, 1024), "float32"), B: T.Buffer((1024, 1024), "float32"), C: T.Buffer((1024, 1024), "float32")) -> None: # function attr dict T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) # body # with T.block("root") for blockIdx_y in T.thread_binding(32, thread="blockIdx.y"): for blockIdx_x in T.thread_binding(32, thread="blockIdx.x"): for threadIdx_y in T.thread_binding(2, thread="threadIdx.y"): for threadIdx_x in T.thread_binding(2, thread="threadIdx.x"): for k_0 in T.serial(32): with T.block(): T.reads(A[blockIdx_y * 32 : blockIdx_y * 32 + 32, k_0 * 32 : k_0 * 32 + 32], B[k_0 * 32 : k_0 * 32 + 32, blockIdx_x * 32 : blockIdx_x * 32 + 32]) T.writes(C[blockIdx_y * 32 : blockIdx_y * 32 + 32, blockIdx_x * 32 : blockIdx_x * 32 + 32]) A_shared = T.alloc_buffer([1024, 1024], dtype="float32", scope="shared") B_shared = T.alloc_buffer([1024, 1024], dtype="float32", scope="shared") A_shared_local = T.alloc_buffer([64, 4], dtype="float32", scope="local") B_shared_local = T.alloc_buffer([64, 4], dtype="float32", scope="local") for ax0_ax1_fused_0 in T.serial(64): for ax0_ax1_fused_3 in T.vectorized(4): with T.block(): T.reads(A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]) T.writes(A_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3]) A_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3] = A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32] for ax0_ax1_fused_0 in T.serial(64): for ax0_ax1_fused_3 in T.vectorized(4): with T.block("A_shared"): T.reads(A[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]) T.writes(A_shared[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]) A_shared[blockIdx_y * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32] = A_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3] for ax0_ax1_fused_0 in T.serial(64): for ax0_ax1_fused_3 in T.vectorized(4): with T.block(): T.reads(B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]) T.writes(B_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3]) B_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3] = B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32] for ax0_ax1_fused_0 in T.serial(64): for ax0_ax1_fused_3 in T.vectorized(4): with T.block("B_shared"): T.reads(B[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]) T.writes(B_shared[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32]) B_shared[k_0 * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) // 32, blockIdx_x * 32 + (ax0_ax1_fused_0 * 16 + threadIdx_y * 8 + threadIdx_x * 4 + ax0_ax1_fused_3) % 32] = B_shared_local[ax0_ax1_fused_0, ax0_ax1_fused_3] for k_1, i_2, j_2, k_2 in T.grid(2, 16, 16, 16): with T.block("C"): T.reads(A_shared[blockIdx_y * 32 + threadIdx_y * 16 + i_2, k_0 * 32 + k_1 * 16 + k_2], B_shared[k_0 * 32 + k_1 * 16 + k_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2]) T.writes(C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2]) if k_0 * 32 + k_1 * 16 + k_2 == 0: C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] = T.float32(0) C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] = C[blockIdx_y * 32 + threadIdx_y * 16 + i_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] + A_shared[blockIdx_y * 32 + threadIdx_y * 16 + i_2, k_0 * 32 + k_1 * 16 + k_2] * B_shared[k_0 * 32 + k_1 * 16 + k_2, blockIdx_x * 32 + threadIdx_x * 16 + j_2] # fmt: on # pylint: enable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks def _check(before, expected): after = tvm.tir.transform.ManifestSharedMemoryLocalStage()(before) tvm.ir.assert_structural_equal(after, expected) def test_transform_matmul(): _check(MatmulBefore, MatmulAfter) if __name__ == "__main__": tvm.testing.main()
12,578
92.177778
448
py
tvm
tvm-main/tests/python/unittest/test_tir_schedule_reduction.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import sys import pytest import tvm import tvm.testing from tvm import tir from tvm.script import tir as T from tvm.tir.schedule.testing import verify_trace_roundtrip # pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg @T.prim_func def rowsum_blockized(a: T.handle, b: T.handle) -> None: B = T.match_buffer(b, [32, 4]) A = T.match_buffer(a, [32, 4, 128]) for i0, i2_0 in T.grid(32, 16): with T.block("blockized_B"): io, ko = T.axis.remap("SR", [i0, i2_0]) with T.init(): for i1 in T.serial(0, 4): with T.block("B_init"): ii_init = T.axis.S(4, i1) B[io, ii_init] = 0.0 for i1_1, i2_1 in T.grid(4, 8): with T.block("B"): ii = T.axis.S(4, i1_1) k = T.axis.R(128, ko * 8 + i2_1) B[io, ii] = B[io, ii] + A[io, ii, k] @T.prim_func def matmul(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i, j, k in T.grid(128, 128, 128): with T.block("update"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] @T.prim_func def matmul_decompose0(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i, j in T.grid(128, 128): with T.block("init"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = 0.0 for i, j, k in T.grid(128, 128, 128): with T.block("update"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] @T.prim_func def matmul_decompose1(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [32, 4, 128], elem_offset=0, align=64, offset_factor=1) B = T.match_buffer(b, [32, 4], elem_offset=0, align=64, offset_factor=1) for i0 in T.serial(0, 32): with T.block("blockized_B_init"): io = T.axis.S(32, i0) for i1 in T.serial(0, 4): with T.block("B_init"): ii = T.axis.S(4, i1) B[io, ii] = T.float32(0) for i0, i2_o in T.grid(32, 16): with T.block("blockized_B_update"): io, ko = T.axis.remap("SR", [i0, i2_o]) for i1, i2_i in T.grid(4, 8): with T.block("B"): ii = T.axis.S(4, i1) k = T.axis.R(128, ko * 8 + i2_i) B[io, ii] = B[io, ii] + A[io, ii, k] @T.prim_func def matmul_decompose2(a: T.handle, b: T.handle, c: T.handle) -> None: C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1) B = T.match_buffer(b, [128, 128], elem_offset=0, align=64, offset_factor=1) A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1) for i0, i1 in T.grid(128, 128): with T.block("update_init"): vi_init, vj_init = T.axis.remap("SS", [i0, i1]) C[vi_init, vj_init] = T.float32(0) for i2 in T.serial(0, 128): with T.block("update_update"): vi, vj, vk = T.axis.remap("SSR", [i0, i1, i2]) C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk]) @T.prim_func def matmul_decompose_fail3(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i, k, j in T.grid(128, 128, 128): with T.block("update"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] @T.prim_func def matmul_decompose4(a: T.handle, b: T.handle, c: T.handle) -> None: C = T.match_buffer(c, [128, 128], elem_offset=0, align=64, offset_factor=1) B = T.match_buffer(b, [128, 128], elem_offset=0, align=64, offset_factor=1) A = T.match_buffer(a, [128, 128], elem_offset=0, align=64, offset_factor=1) # body with T.block("root"): T.reads([]) T.writes([]) for i0_0 in T.serial(0, 16): for i0_1_init, i1_init in T.grid(8, 128): with T.block("update_init"): vi_init = T.axis.S(128, i0_0 * 8 + i0_1_init) vj_init = T.axis.S(128, i1_init) C[vi_init, vj_init] = T.float32(0) for i0_1, i1, i2_0, i2_1 in T.grid(8, 128, 19, 7): with T.block("update_update"): T.where((((i2_0 * 7) + i2_1) < 128)) vi = T.axis.S(128, i0_0 * 8 + i0_1) vj = T.axis.S(128, i1) vk = T.axis.R(128, i2_0 * 7 + i2_1) C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk]) @T.prim_func def matmul_with_annotation(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i, j, k in T.grid(128, 128, 128): with T.block("update"): T.block_attr({"test_annotation": 1}) vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] @T.prim_func def matmul_decompose_with_annotation(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i, j in T.grid(128, 128): with T.block("init"): T.block_attr({"test_annotation": 1}) vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = 0.0 for i, j, k in T.grid(128, 128, 128): with T.block("update"): T.block_attr({"test_annotation": 1}) vi, vj, vk = T.axis.remap("SSR", [i, j, k]) C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] @T.prim_func def colsum_with_vectorization(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 32], dtype="float32") B = T.match_buffer(b, [32], dtype="float32") for k in T.serial(0, 128): for i in T.vectorized(0, 32): with T.block("B"): vk, vi = T.axis.remap("RS", [k, i]) with T.init(): B[vi] = T.float32(0) B[vi] = B[vi] + A[vk, vi] @T.prim_func def colsum_decompose_with_vectorization(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 32], dtype="float32") B = T.match_buffer(b, [32], dtype="float32") for i in T.vectorized(0, 32): with T.block("B_init"): vi = T.axis.S(32, i) B[vi] = T.float32(0) for k in T.serial(0, 128): for i in T.vectorized(0, 32): with T.block("B"): vk, vi = T.axis.remap("RS", [k, i]) B[vi] = B[vi] + A[vk, vi] # pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True}) def test_reduction_decompose0(use_block_name): s = tir.Schedule(matmul, debug_mask="all") C = "update" if use_block_name else s.get_block("update") i, j, k = s.get_loops(C) s.decompose_reduction(C, i) tvm.ir.assert_structural_equal(matmul_decompose0, s.mod["main"]) verify_trace_roundtrip(s, mod=matmul) def test_reduction_decompose1(use_block_name): s = tir.Schedule(rowsum_blockized, debug_mask="all") blockized_B = "blockized_B" if use_block_name else s.get_block("blockized_B") io, ko = s.get_loops(blockized_B) s.decompose_reduction(blockized_B, io) tvm.ir.assert_structural_equal(matmul_decompose1, s.mod["main"]) verify_trace_roundtrip(s, mod=rowsum_blockized) def test_reduction_decompose2(): s = tir.Schedule(matmul, debug_mask="all") C = s.get_block("update") i, j, k = s.get_loops(C) s.decompose_reduction(C, k) tvm.ir.assert_structural_equal(matmul_decompose2, s.mod["main"]) verify_trace_roundtrip(s, mod=matmul) def test_reduction_decompose3(): s = tir.Schedule(matmul_decompose_fail3, debug_mask="all") C = s.get_block("update") i, j, k = s.get_loops(C) with pytest.raises(tvm.tir.ScheduleError): s.decompose_reduction(C, k) def test_reduction_decompose4(): s = tir.Schedule(matmul, debug_mask="all") C = s.get_block("update") i, j, k = s.get_loops(C) io, ii = s.split(i, factors=[16, 8]) ko, ki = s.split(k, factors=[19, 7]) s.decompose_reduction(C, ii) tvm.ir.assert_structural_equal(matmul_decompose4, s.mod["main"]) verify_trace_roundtrip(s, mod=matmul) def test_reduction_decompose_with_annotation(): s = tir.Schedule(matmul_with_annotation, debug_mask="all") C = s.get_block("update") i, j, k = s.get_loops(C) s.decompose_reduction(C, i) tvm.ir.assert_structural_equal(matmul_decompose_with_annotation, s.mod["main"]) verify_trace_roundtrip(s, mod=matmul_with_annotation) def test_reduction_decompose_with_different_for_kind(): s = tir.Schedule(colsum_with_vectorization, debug_mask="all") B = s.get_block("B") k, _ = s.get_loops(B) B_init = s.decompose_reduction(B, k) tvm.ir.assert_structural_equal(s.mod["main"], colsum_decompose_with_vectorization) assert s.get(B).same_as(s.get(s.get_block("B_update"))) assert s.get(B_init).same_as(s.get(s.get_block("B_init"))) verify_trace_roundtrip(s, mod=colsum_with_vectorization) def test_decompose_reduction_ref_hash_check(): mod = tvm.IRModule.from_expr(matmul) mod_bak = mod hash_before = tvm.ir.structural_hash(mod_bak) s = tir.Schedule(mod["main"], debug_mask="all") C = s.get_block("update") i, j, k = s.get_loops(C) s.decompose_reduction(C, k) hash_after = tvm.ir.structural_hash(mod_bak) assert hash_before == hash_after def test_decompose_reduction_nested_block(): @T.prim_func def nested_block(A: T.Buffer((1, 64), "float32"), B: T.Buffer((1,), "float32")): for i, ko in T.grid(1, 2): with T.block("outer"): vi, vko = T.axis.remap("SR", [i, ko]) C = T.alloc_buffer((32,), dtype="float32") with T.init(): B[vi] = T.float32(0) for ki in T.serial(32): with T.block("inner_1"): vki = T.axis.remap("S", [ki]) C[vki] = A[vi, vko * 32 + vki] for ki in T.serial(32): with T.block("inner_2"): vki = T.axis.remap("R", [ki]) B[vi] += C[vki] @T.prim_func def decomposed_nested_block(A: T.Buffer((1, 64), "float32"), B: T.Buffer((1,), "float32")): for i in range(1): with T.block("outer_init"): vi = T.axis.spatial(1, i) T.reads() T.writes(B[vi]) B[vi] = T.float32(0) for ko in range(2): with T.block("outer_update"): vi, vko = T.axis.remap("SR", [i, ko]) T.reads(B[vi], A[vi, vko * 32 : vko * 32 + 32]) T.writes(B[vi]) C = T.alloc_buffer((32,)) for ki in range(32): with T.block("inner_1"): vki = T.axis.spatial(32, ki) T.reads(A[vi, vko * 32 + vki]) T.writes(C[vki]) C[vki] = A[vi, vko * 32 + vki] for ki in range(32): with T.block("inner_2"): vki = T.axis.reduce(32, ki) T.reads(B[vi], C[vki]) T.writes(B[vi]) B[vi] = B[vi] + C[vki] sch = tir.Schedule(nested_block, debug_mask="all") outer = sch.get_block("outer") i, ko = sch.get_loops(outer) sch.decompose_reduction(outer, ko) tvm.ir.assert_structural_equal(decomposed_nested_block, sch.mod["main"]) verify_trace_roundtrip(sch, mod=nested_block) if __name__ == "__main__": tvm.testing.main()
13,347
36.6
95
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_space_cuda_async.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Tests for MetaSchedule search space on CUDA""" from tvm import meta_schedule as ms from tvm.meta_schedule.testing.space_generation import ( check_sketches, generate_design_space, print_sketches, ) from tvm.meta_schedule.testing.te_workload import create_te_workload from tvm.script import tir as T from tvm.target import Target def _target(): return Target("nvidia/geforce-rtx-3070") def _design_space(mod): return generate_design_space( kind="cuda", mod=mod, target=_target(), types=ms.ScheduleRule, ) def get_c2d_prim_func(stage: int): if stage == 0: # fmt: off @T.prim_func def c2d(inputs: T.Buffer((1, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 3, 64), "float32"), conv2d_nhwc: T.Buffer((1, 112, 112, 64), "float32")): T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.unroll_explicit": 1024}) conv2d_nhwc_local = T.alloc_buffer((1, 112, 112, 64), scope="local") PadInput_shared = T.alloc_buffer((1, 230, 230, 3), scope="shared") weight_shared = T.alloc_buffer((7, 7, 3, 64), scope="shared") for n_0_h_0_w_0_co_0_fused in T.thread_binding(112, thread="blockIdx.x"): for n_1_h_1_w_1_co_1_fused in T.thread_binding(8, thread="vthread.x"): for n_2_h_2_w_2_co_2_fused in T.thread_binding(64, thread="threadIdx.x"): for rh_0, rw_0, rc_0 in T.grid(1, 1, 3): for ax0_ax1_ax2_ax3_fused in range(693): with T.block("PadInput_shared"): v0 = T.axis.spatial(1, 0) v1 = T.axis.spatial(230, n_0_h_0_w_0_co_0_fused // 8 * 16 + ax0_ax1_ax2_ax3_fused // 33) v2 = T.axis.spatial(230, n_0_h_0_w_0_co_0_fused % 8 * 28 + ax0_ax1_ax2_ax3_fused % 33) v3 = T.axis.spatial(3, rc_0) T.reads(inputs[v0, v1 - 3, v2 - 3, v3]) T.writes(PadInput_shared[v0, v1, v2, v3]) T.block_attr({"meta_schedule.cooperative_fetch": 4}) PadInput_shared[v0, v1, v2, v3] = T.if_then_else(3 <= v1 and v1 < 227 and 3 <= v2 and v2 < 227, inputs[v0, v1 - 3, v2 - 3, v3], T.float32(0)) for ax0_ax1_ax2_ax3_fused in range(3136): with T.block("weight_shared"): v0 = T.axis.spatial(7, ax0_ax1_ax2_ax3_fused // 448) v1 = T.axis.spatial(7, ax0_ax1_ax2_ax3_fused % 448 // 64) v2 = T.axis.spatial(3, rc_0) v3 = T.axis.spatial(64, ax0_ax1_ax2_ax3_fused % 64) T.reads(weight[v0, v1, v2, v3]) T.writes(weight_shared[v0, v1, v2, v3]) T.block_attr({"meta_schedule.cooperative_fetch": 3}) weight_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3] for rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3, rh_2, rw_2, rc_2, n_4, h_4, w_4, co_4 in T.grid(7, 1, 1, 1, 1, 14, 1, 1, 7, 1, 1, 1, 1, 1): with T.block("conv2d_nhwc"): v_n = T.axis.spatial(1, n_3 + n_4) v_h = T.axis.spatial(112, n_0_h_0_w_0_co_0_fused // 8 * 8 + n_1_h_1_w_1_co_1_fused // 4 * 4 + n_2_h_2_w_2_co_2_fused // 16 + h_3 + h_4) v_w = T.axis.spatial(112, n_0_h_0_w_0_co_0_fused % 8 * 14 + w_3 + w_4) v_co = T.axis.spatial(64, n_1_h_1_w_1_co_1_fused % 4 * 16 + n_2_h_2_w_2_co_2_fused % 16 + co_3 + co_4) v_rh = T.axis.reduce(7, rh_0 * 7 + rh_1 + rh_2) v_rw = T.axis.reduce(7, rw_0 * 7 + rw_1 * 7 + rw_2) v_rc = T.axis.reduce(3, rc_0 + rc_1 + rc_2) T.reads(PadInput_shared[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc], weight_shared[v_rh, v_rw, v_rc, v_co]) T.writes(conv2d_nhwc_local[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"}) with T.init(): conv2d_nhwc_local[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_nhwc_local[v_n, v_h, v_w, v_co] = conv2d_nhwc_local[v_n, v_h, v_w, v_co] + PadInput_shared[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc] * weight_shared[v_rh, v_rw, v_rc, v_co] for ax0, ax1, ax2, ax3 in T.grid(1, 1, 14, 1): with T.block("conv2d_nhwc_local"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(112, n_0_h_0_w_0_co_0_fused // 8 * 8 + n_1_h_1_w_1_co_1_fused // 4 * 4 + n_2_h_2_w_2_co_2_fused // 16 + ax1) v2 = T.axis.spatial(112, n_0_h_0_w_0_co_0_fused % 8 * 14 + ax2) v3 = T.axis.spatial(64, n_1_h_1_w_1_co_1_fused % 4 * 16 + n_2_h_2_w_2_co_2_fused % 16 + ax3) T.reads(conv2d_nhwc_local[v0, v1, v2, v3]) T.writes(conv2d_nhwc[v0, v1, v2, v3]) conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_local[v0, v1, v2, v3] # fmt: on else: # fmt: off @T.prim_func def c2d(inputs: T.Buffer((1, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 3, 64), "float32"), conv2d_nhwc: T.Buffer((1, 112, 112, 64), "float32")): T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.unroll_explicit": 1024}) conv2d_nhwc_local = T.alloc_buffer((1, 112, 112, 64), scope="local") PadInput_shared = T.alloc_buffer((1, 230, 230, 3), scope="shared") weight_shared = T.alloc_buffer((7, 7, 3, 64), scope="shared") for n_0_h_0_w_0_co_0_fused in T.thread_binding(112, thread="blockIdx.x"): for n_1_h_1_w_1_co_1_fused in T.thread_binding(8, thread="vthread.x"): for n_2_h_2_w_2_co_2_fused in T.thread_binding(64, thread="threadIdx.x"): for rh_0_rw_0_rc_0_fused in T.serial(3, annotations={"software_pipeline_async_stages": [0], "software_pipeline_order": [0, 1, 2], "software_pipeline_stage": [0, 0, stage - 2]}): for ax0_ax1_ax2_ax3_fused in range(693): with T.block("PadInput_shared"): v0 = T.axis.spatial(1, 0) v1 = T.axis.spatial(230, n_0_h_0_w_0_co_0_fused // 8 * 16 + ax0_ax1_ax2_ax3_fused // 33) v2 = T.axis.spatial(230, n_0_h_0_w_0_co_0_fused % 8 * 28 + ax0_ax1_ax2_ax3_fused % 33) v3 = T.axis.spatial(3, rh_0_rw_0_rc_0_fused) T.reads(inputs[v0, v1 - 3, v2 - 3, v3]) T.writes(PadInput_shared[v0, v1, v2, v3]) T.block_attr({"meta_schedule.cooperative_fetch": 4}) PadInput_shared[v0, v1, v2, v3] = T.if_then_else(3 <= v1 and v1 < 227 and 3 <= v2 and v2 < 227, inputs[v0, v1 - 3, v2 - 3, v3], T.float32(0)) for ax0_ax1_ax2_ax3_fused in range(3136): with T.block("weight_shared"): v0 = T.axis.spatial(7, ax0_ax1_ax2_ax3_fused // 448) v1 = T.axis.spatial(7, ax0_ax1_ax2_ax3_fused % 448 // 64) v2 = T.axis.spatial(3, rh_0_rw_0_rc_0_fused) v3 = T.axis.spatial(64, ax0_ax1_ax2_ax3_fused % 64) T.reads(weight[v0, v1, v2, v3]) T.writes(weight_shared[v0, v1, v2, v3]) T.block_attr({"meta_schedule.cooperative_fetch": 3}) weight_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3] for rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3, rh_2, rw_2, rc_2, n_4, h_4, w_4, co_4 in T.grid(7, 1, 1, 1, 1, 14, 1, 1, 7, 1, 1, 1, 1, 1): with T.block("conv2d_nhwc"): v_n = T.axis.spatial(1, n_3 + n_4) v_h = T.axis.spatial(112, n_0_h_0_w_0_co_0_fused // 8 * 8 + n_1_h_1_w_1_co_1_fused // 4 * 4 + n_2_h_2_w_2_co_2_fused // 16 + h_3 + h_4) v_w = T.axis.spatial(112, n_0_h_0_w_0_co_0_fused % 8 * 14 + w_3 + w_4) v_co = T.axis.spatial(64, n_1_h_1_w_1_co_1_fused % 4 * 16 + n_2_h_2_w_2_co_2_fused % 16 + co_3 + co_4) v_rh = T.axis.reduce(7, rh_1 + rh_2) v_rw = T.axis.reduce(7, rw_1 * 7 + rw_2) v_rc = T.axis.reduce(3, rh_0_rw_0_rc_0_fused + rc_1 + rc_2) T.reads(PadInput_shared[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc], weight_shared[v_rh, v_rw, v_rc, v_co]) T.writes(conv2d_nhwc_local[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"}) with T.init(): conv2d_nhwc_local[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_nhwc_local[v_n, v_h, v_w, v_co] = conv2d_nhwc_local[v_n, v_h, v_w, v_co] + PadInput_shared[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc] * weight_shared[v_rh, v_rw, v_rc, v_co] for ax0, ax1, ax2, ax3 in T.grid(1, 1, 14, 1): with T.block("conv2d_nhwc_local"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(112, n_0_h_0_w_0_co_0_fused // 8 * 8 + n_1_h_1_w_1_co_1_fused // 4 * 4 + n_2_h_2_w_2_co_2_fused // 16 + ax1) v2 = T.axis.spatial(112, n_0_h_0_w_0_co_0_fused % 8 * 14 + ax2) v3 = T.axis.spatial(64, n_1_h_1_w_1_co_1_fused % 4 * 16 + n_2_h_2_w_2_co_2_fused % 16 + ax3) T.reads(conv2d_nhwc_local[v0, v1, v2, v3]) T.writes(conv2d_nhwc[v0, v1, v2, v3]) conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_local[v0, v1, v2, v3] # fmt: on return c2d def test_cuda_c2d(): c2d_decision = [ ("SamplePerfectTile", [1, 1, 1, 1, 1]), ("SamplePerfectTile", [14, 2, 4, 1, 1]), ("SamplePerfectTile", [8, 1, 1, 14, 1]), ("SamplePerfectTile", [1, 4, 16, 1, 1]), ("SamplePerfectTile", [1, 7, 1]), ("SamplePerfectTile", [1, 1, 7]), ("SamplePerfectTile", [3, 1, 1]), ("SampleCategorical", 3), ("SampleCategorical", 2), ("SampleCategorical", 4), ] mod = create_te_workload("C2D", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[ get_c2d_prim_func(stage=0), get_c2d_prim_func(stage=4), get_c2d_prim_func(stage=5), ], expected_decisions=[c2d_decision, c2d_decision, c2d_decision], ) def get_gmm_prim_func(stage: int): if stage == 0: # fmt: off @T.prim_func def gmm(X: T.Buffer((1, 1024, 1024), "float32"), Y: T.Buffer((1, 1024, 1024), "float32"), Z: T.Buffer((1, 1024, 1024), "float32")): T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.unroll_explicit": 16}) Z_local = T.alloc_buffer((1, 1024, 1024), scope="local") X_shared = T.alloc_buffer((1, 1024, 1024), scope="shared") Y_shared = T.alloc_buffer((1, 1024, 1024), scope="shared") for b_0_i_0_j_0_fused in T.thread_binding(256, thread="blockIdx.x"): for b_1_i_1_j_1_fused in T.thread_binding(32, thread="vthread.x"): for b_2_i_2_j_2_fused in T.thread_binding(64, thread="threadIdx.x"): for k_0 in range(64): for ax0_ax1_ax2_fused in range(1024): with T.block("X_shared"): v0 = T.axis.spatial(1, 0) v1 = T.axis.spatial(1024, b_0_i_0_j_0_fused // 16 * 64 + ax0_ax1_ax2_fused // 16) v2 = T.axis.spatial(1024, k_0 * 16 + ax0_ax1_ax2_fused % 16) T.reads(X[v0, v1, v2]) T.writes(X_shared[v0, v1, v2]) T.block_attr({"meta_schedule.cooperative_fetch": 4}) X_shared[v0, v1, v2] = X[v0, v1, v2] for ax0_ax1_ax2_fused in range(1024): with T.block("Y_shared"): v0 = T.axis.spatial(1, 0) v1 = T.axis.spatial(1024, k_0 * 16 + ax0_ax1_ax2_fused // 64) v2 = T.axis.spatial(1024, b_0_i_0_j_0_fused % 16 * 64 + ax0_ax1_ax2_fused % 64) T.reads(Y[v0, v1, v2]) T.writes(Y_shared[v0, v1, v2]) T.block_attr({"meta_schedule.cooperative_fetch": 4}) Y_shared[v0, v1, v2] = Y[v0, v1, v2] for k_1, b_3, i_3, j_3, k_2, b_4, i_4, j_4 in T.grid(2, 1, 1, 1, 8, 1, 1, 2): with T.block("Z"): v_b = T.axis.spatial(1, b_3 + b_4) v_i = T.axis.spatial(1024, b_0_i_0_j_0_fused // 16 * 64 + b_1_i_1_j_1_fused // 4 * 8 + b_2_i_2_j_2_fused // 8 + i_3 + i_4) v_j = T.axis.spatial(1024, b_0_i_0_j_0_fused % 16 * 64 + b_1_i_1_j_1_fused % 4 * 16 + b_2_i_2_j_2_fused % 8 * 2 + j_3 * 2 + j_4) v_k = T.axis.reduce(1024, k_0 * 16 + k_1 * 8 + k_2) T.reads(X_shared[v_b, v_i, v_k], Y_shared[v_b, v_k, v_j]) T.writes(Z_local[v_b, v_i, v_j]) T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"}) with T.init(): Z_local[v_b, v_i, v_j] = T.float32(0) Z_local[v_b, v_i, v_j] = Z_local[v_b, v_i, v_j] + X_shared[v_b, v_i, v_k] * Y_shared[v_b, v_k, v_j] for ax0, ax1, ax2 in T.grid(1, 1, 2): with T.block("Z_local"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(1024, b_0_i_0_j_0_fused // 16 * 64 + b_1_i_1_j_1_fused // 4 * 8 + b_2_i_2_j_2_fused // 8 + ax1) v2 = T.axis.spatial(1024, b_0_i_0_j_0_fused % 16 * 64 + b_1_i_1_j_1_fused % 4 * 16 + b_2_i_2_j_2_fused % 8 * 2 + ax2) T.reads(Z_local[v0, v1, v2]) T.writes(Z[v0, v1, v2]) Z[v0, v1, v2] = Z_local[v0, v1, v2] # fmt: on else: # fmt: off @T.prim_func def gmm(X: T.Buffer((1, 1024, 1024), "float32"), Y: T.Buffer((1, 1024, 1024), "float32"), Z: T.Buffer((1, 1024, 1024), "float32")): T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.unroll_explicit": 16}) Z_local = T.alloc_buffer((1, 1024, 1024), scope="local") X_shared = T.alloc_buffer((1, 1024, 1024), scope="shared") Y_shared = T.alloc_buffer((1, 1024, 1024), scope="shared") for b_0_i_0_j_0_fused in T.thread_binding(256, thread="blockIdx.x"): for b_1_i_1_j_1_fused in T.thread_binding(32, thread="vthread.x"): for b_2_i_2_j_2_fused in T.thread_binding(64, thread="threadIdx.x"): for k_0_fused in T.serial(64, annotations={"software_pipeline_async_stages": [0], "software_pipeline_order": [0, 1, 2], "software_pipeline_stage": [0, 0, stage - 2]}): for ax0_ax1_ax2_fused in range(1024): with T.block("X_shared"): v0 = T.axis.spatial(1, 0) v1 = T.axis.spatial(1024, b_0_i_0_j_0_fused // 16 * 64 + ax0_ax1_ax2_fused // 16) v2 = T.axis.spatial(1024, k_0_fused * 16 + ax0_ax1_ax2_fused % 16) T.reads(X[v0, v1, v2]) T.writes(X_shared[v0, v1, v2]) T.block_attr({"meta_schedule.cooperative_fetch": 4}) X_shared[v0, v1, v2] = X[v0, v1, v2] for ax0_ax1_ax2_fused in range(1024): with T.block("Y_shared"): v0 = T.axis.spatial(1, 0) v1 = T.axis.spatial(1024, k_0_fused * 16 + ax0_ax1_ax2_fused // 64) v2 = T.axis.spatial(1024, b_0_i_0_j_0_fused % 16 * 64 + ax0_ax1_ax2_fused % 64) T.reads(Y[v0, v1, v2]) T.writes(Y_shared[v0, v1, v2]) T.block_attr({"meta_schedule.cooperative_fetch": 4}) Y_shared[v0, v1, v2] = Y[v0, v1, v2] for k_1, b_3, i_3, j_3, k_2, b_4, i_4, j_4 in T.grid(2, 1, 1, 1, 8, 1, 1, 2): with T.block("Z"): v_b = T.axis.spatial(1, b_3 + b_4) v_i = T.axis.spatial(1024, b_0_i_0_j_0_fused // 16 * 64 + b_1_i_1_j_1_fused // 4 * 8 + b_2_i_2_j_2_fused // 8 + i_3 + i_4) v_j = T.axis.spatial(1024, b_0_i_0_j_0_fused % 16 * 64 + b_1_i_1_j_1_fused % 4 * 16 + b_2_i_2_j_2_fused % 8 * 2 + j_3 * 2 + j_4) v_k = T.axis.reduce(1024, k_0_fused * 16 + k_1 * 8 + k_2) T.reads(X_shared[v_b, v_i, v_k], Y_shared[v_b, v_k, v_j]) T.writes(Z_local[v_b, v_i, v_j]) T.block_attr({"meta_schedule.thread_extent_high_inclusive": 1024, "meta_schedule.thread_extent_low_inclusive": 32, "meta_schedule.tiling_structure": "SSSRRSRS"}) with T.init(): Z_local[v_b, v_i, v_j] = T.float32(0) Z_local[v_b, v_i, v_j] = Z_local[v_b, v_i, v_j] + X_shared[v_b, v_i, v_k] * Y_shared[v_b, v_k, v_j] for ax0, ax1, ax2 in T.grid(1, 1, 2): with T.block("Z_local"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(1024, b_0_i_0_j_0_fused // 16 * 64 + b_1_i_1_j_1_fused // 4 * 8 + b_2_i_2_j_2_fused // 8 + ax1) v2 = T.axis.spatial(1024, b_0_i_0_j_0_fused % 16 * 64 + b_1_i_1_j_1_fused % 4 * 16 + b_2_i_2_j_2_fused % 8 * 2 + ax2) T.reads(Z_local[v0, v1, v2]) T.writes(Z[v0, v1, v2]) Z[v0, v1, v2] = Z_local[v0, v1, v2] # fmt: on return gmm def test_cuda_gmm(): gmm_decision = [ ("SamplePerfectTile", [1, 1, 1, 1, 1]), ("SamplePerfectTile", [16, 8, 8, 1, 1]), ("SamplePerfectTile", [16, 4, 8, 1, 2]), ("SamplePerfectTile", [64, 2, 8]), ("SampleCategorical", 3), ("SampleCategorical", 3), ("SampleCategorical", 1), ] mod = create_te_workload("GMM", 3) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[ get_gmm_prim_func(stage=0), get_gmm_prim_func(stage=4), get_gmm_prim_func(stage=5), ], expected_decisions=[gmm_decision, gmm_decision, gmm_decision], ) if __name__ == "__main__": test_cuda_c2d() test_cuda_gmm()
23,401
68.236686
237
py
tvm
tvm-main/tests/python/unittest/test_tir_analysis_verify_gpu_code.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test gpu code verifier""" import tvm from tvm import te from tvm import topi import tvm.testing import tvm.topi.testing def get_verify_pass(valid, **kwargs): def _fverify(f, *_): valid[0] = tvm.tir.analysis.verify_gpu_code(f, kwargs) return f return tvm.tir.transform.prim_func_pass(_fverify, opt_level=0) @tvm.testing.requires_gpu def test_shared_memory(): def check_shared_memory(storage_scope, dtype): N = 1024 M = 128 tvm_type = tvm.runtime.DataType(dtype) type_size = tvm_type.bits // 8 * tvm_type.lanes A = te.placeholder((N,), name="A", dtype=dtype) B = te.compute((N,), lambda i: A[i], name="B") s = te.create_schedule([B.op]) AA = s.cache_read(A, storage_scope, [B]) o, i = s[B].split(s[B].op.axis[0], M) s[AA].compute_at(s[B], o) s[B].bind(o, te.thread_axis("blockIdx.x")) s[B].bind(i, te.thread_axis("threadIdx.x")) # shared memory usage: M * sizeof(dtype) Bytes # thread usage: M for target in ["opencl", "cuda"]: if not tvm.testing.device_enabled(target): continue valid = [None] with tvm.transform.PassContext( config={ "tir.add_lower_pass": [ ( 2, get_verify_pass( valid, max_shared_memory_per_block=type_size * M - 1, max_threads_per_block=M, ), ) ] } ): tvm.build(s, [A, B], target) assert not valid[0] with tvm.transform.PassContext( config={ "tir.add_lower_pass": [ ( 2, get_verify_pass( valid, max_shared_memory_per_block=type_size * M, max_threads_per_block=M, ), ) ] } ): tvm.build(s, [A, B], target) assert valid[0] check_shared_memory("shared", "float32") check_shared_memory("shared", "int8x4") check_shared_memory("shared.dyn", "float32") @tvm.testing.requires_gpu def test_local_memory(): N = 1024 M = 128 A = te.placeholder((N,), name="A", dtype="float32") B = te.compute((N,), lambda i: A[i], name="B") s = te.create_schedule([B.op]) AA = s.cache_read(A, "local", [B]) o, i = s[B].split(s[B].op.axis[0], M) s[AA].compute_at(s[B], o) s[B].bind(o, te.thread_axis("blockIdx.x")) # local memory usage: M * 4B # thread usage: M for target in ["opencl", "cuda"]: if not tvm.testing.device_enabled(target): continue valid = [None] with tvm.transform.PassContext( config={ "tir.add_lower_pass": [ ( 2, get_verify_pass( valid, max_local_memory_per_block=4 * M - 1, max_threads_per_block=1 ), ) ] } ): tvm.build(s, [A, B], target) assert not valid[0] with tvm.transform.PassContext( config={ "tir.add_lower_pass": [ ( 2, get_verify_pass( valid, max_local_memory_per_block=4 * M, max_threads_per_block=1 ), ) ] } ): tvm.build(s, [A, B], target) assert valid[0] @tvm.testing.requires_gpu def test_num_thread(): N = 1024 M = 128 A = te.placeholder((N,), name="A", dtype="float32") B = te.compute((N,), lambda i: A[i], name="B") s = te.create_schedule([B.op]) o, i = s[B].split(s[B].op.axis[0], M) s[B].bind(o, te.thread_axis("threadIdx.x")) s[B].bind(i, te.thread_axis("threadIdx.y")) # shared memory usage: 0 # thread usage: N for target in ["opencl", "cuda"]: if not tvm.testing.device_enabled(target): continue valid = [None] with tvm.transform.PassContext( config={ "tir.add_lower_pass": [ ( 2, get_verify_pass( valid, max_shared_memory_per_block=0, max_threads_per_block=N - 1 ), ) ] } ): tvm.build(s, [A, B], target) assert not valid[0] with tvm.transform.PassContext( config={ "tir.add_lower_pass": [ ( 2, get_verify_pass( valid, max_shared_memory_per_block=0, max_threads_per_block=N ), ) ] } ): tvm.build(s, [A, B], target) assert valid[0] with tvm.transform.PassContext( config={ "tir.add_lower_pass": [ ( 2, get_verify_pass( valid, max_shared_memory_per_block=0, max_threads_per_block=N, max_thread_y=M - 1, ), ) ] } ): tvm.build(s, [A, B], target) assert not valid[0] with tvm.transform.PassContext( config={ "tir.add_lower_pass": [ ( 2, get_verify_pass( valid, max_shared_memory_per_block=0, max_threads_per_block=N, max_thread_y=M, ), ) ] } ): tvm.build(s, [A, B], target) assert valid[0] @tvm.testing.requires_gpu def test_multiple_kernels(): N = 1024 A = te.placeholder((N, N), name="A") B = te.compute((N, N), lambda i, j: A[i, j]) C = te.compute((N, N), lambda i, j: B[i, j]) s = te.create_schedule([C.op]) s[C].bind(s[C].op.axis[1], te.thread_axis("threadIdx.x")) s[B].bind(s[B].op.axis[1], te.thread_axis("threadIdx.x")) # shared memory usage: 0 # thread usage: N for target in ["opencl", "cuda"]: if not tvm.testing.device_enabled(target): continue valid = [None] with tvm.transform.PassContext( config={ "tir.add_lower_pass": [ ( 2, get_verify_pass( valid, max_shared_memory_per_block=0, max_threads_per_block=N - 1 ), ) ] } ): tvm.build(s, [A, C], target) assert not valid[0] with tvm.transform.PassContext( config={ "tir.add_lower_pass": [ ( 2, get_verify_pass( valid, max_shared_memory_per_block=0, max_threads_per_block=N ), ) ] } ): tvm.build(s, [A, C], target) assert valid[0] @tvm.testing.requires_gpu def test_wrong_bind(): N = 1024 A = te.placeholder((N, N - 1), name="A") B = te.compute((N, N - 1), lambda i, j: A[i, j]) s = te.create_schedule([B.op]) # bind a thread axis to two loop axes with different lengths s[B].bind(s[B].op.axis[0], te.thread_axis("threadIdx.x")) s[B].bind(s[B].op.axis[1], te.thread_axis("threadIdx.x")) for target in ["opencl", "cuda"]: if not tvm.testing.device_enabled(target): continue valid = [None] with tvm.transform.PassContext( config={ "tir.add_lower_pass": [(2, get_verify_pass(valid, max_threads_per_block=N * N))] } ): tvm.build(s, [A, B], target) assert not valid[0] @tvm.testing.requires_gpu def test_vectorize(): N = 1024 A = te.placeholder((N, N), name="A") B = te.compute((N, N), lambda i, j: A[i, j]) s = te.create_schedule([B.op]) i, j = s[B].op.axis s[B].bind(i, te.thread_axis("blockIdx.x")) jo, ji = s[B].split(j, factor=64) s[B].bind(jo, te.thread_axis("threadIdx.x")) s[B].vectorize(ji) for target in ["opencl", "cuda"]: if not tvm.testing.device_enabled(target): continue valid = [None] with tvm.transform.PassContext( config={"tir.add_lower_pass": [(2, get_verify_pass(valid, max_vector_bytes=16))]} ): tvm.lower(s, [A, B]) assert not valid[0] @tvm.testing.requires_gpu def test_vectorize_half(): N = 1024 A = te.placeholder((N, N), name="A", dtype="float16") B = te.compute((N, N), lambda i, j: A[i, j]) s = te.create_schedule([B.op]) i, j = s[B].op.axis s[B].bind(i, te.thread_axis("blockIdx.x")) jo, ji = s[B].split(j, factor=8) s[B].bind(jo, te.thread_axis("threadIdx.x")) s[B].vectorize(ji) for target in ["opencl", "cuda"]: if not tvm.testing.device_enabled(target): continue valid = [None] with tvm.transform.PassContext( config={"tir.add_lower_pass": [(2, get_verify_pass(valid, max_vector_bytes=16))]} ): tvm.lower(s, [A, B]) assert valid[0] @tvm.testing.requires_gpu def test_vectorize_strided(): N = 1024 A = te.placeholder((N, N), name="A", dtype="float16") B = te.compute((N, N), lambda i, j: A[j, i]) s = te.create_schedule([B.op]) i, j = s[B].op.axis s[B].bind(i, te.thread_axis("blockIdx.x")) jo, ji = s[B].split(j, factor=8) s[B].vectorize(ji) for target in ["opencl", "cuda"]: if not tvm.testing.device_enabled(target): continue valid = [None] with tvm.transform.PassContext( config={"tir.add_lower_pass": [(2, get_verify_pass(valid, max_vector_bytes=16))]} ): tvm.lower(s, [A, B]) assert not valid[0] @tvm.testing.requires_gpu def test_vthread(): N = 1024 A = te.placeholder((N, 16), name="A") B = te.compute((N, 16), lambda i, j: A[i, j]) s = te.create_schedule([B.op]) s[B].bind(s[B].op.axis[0], te.thread_axis("blockIdx.x")) s[B].bind(s[B].op.axis[1], te.thread_axis("vthread")) for target in ["opencl", "cuda"]: if not tvm.testing.device_enabled(target): continue valid = [None] for phase in [1, 2]: with tvm.transform.PassContext( config={"tir.add_lower_pass": [(phase, get_verify_pass(valid, max_vthread=16))]} ): tvm.build(s, [A, B], target) assert valid[0] with tvm.transform.PassContext( config={"tir.add_lower_pass": [(phase, get_verify_pass(valid, max_vthread=15))]} ): tvm.build(s, [A, B], target) assert not valid[0] @tvm.testing.requires_gpu def test_redundant_kernels(): dtype = "float32" A = te.placeholder(shape=(1,), name="A", dtype=dtype) B = te.placeholder(shape=(1,), name="B", dtype=dtype) C = te.placeholder(shape=(1,), name="C", dtype=dtype) D = topi.less(A, C) E = topi.less(B, C) F = topi.logical_or(D, E) G = topi.identity(F) for target in ["opencl", "cuda"]: if not tvm.testing.device_enabled(target): continue print("Running on target: %s" % target) valid = [None] with tvm.target.Target(target): s = tvm.topi.testing.get_reduce_schedule(target)(G) with tvm.transform.PassContext( config={"tir.add_lower_pass": [(2, get_verify_pass(valid, max_kernels=1))]} ): tvm.build(s, [A, B, C, G], target) assert valid[0] if __name__ == "__main__": tvm.testing.main()
13,448
28.047516
96
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_relay_integration.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Integration test for MetaSchedule""" import platform import tempfile from typing import List import numpy as np import pytest import tvm import tvm.testing from tvm import IRModule from tvm import meta_schedule as ms from tvm import relay, te, tir from tvm._ffi import register_func from tvm.contrib import graph_executor from tvm.ir.transform import PassContext from tvm.meta_schedule.database import TuningRecord, Workload from tvm.meta_schedule.testing.relay_workload import get_network from tvm.meta_schedule.testing.tlcbench import load_quantized_bert_base from tvm.meta_schedule.tune_context import _normalize_mod from tvm.script import tir as T from tvm.target import Target # pylint: disable=no-member,line-too-long,too-many-nested-blocks,unbalanced-tuple-unpacking,no-self-argument,missing-docstring,invalid-name @tvm.script.ir_module class MockModule: @T.prim_func def main(a: T.handle, b: T.handle) -> None: # type: ignore T.func_attr({"global_symbol": "main", "tir.noalias": True}) A = T.match_buffer(a, (16,), "float32") B = T.match_buffer(b, (16,), "float32") for i in T.serial(0, 16): with T.block("matmul"): vi = T.axis.remap("S", [i]) B[vi] = A[vi] # pylint: enable=no-member,line-too-long,too-many-nested-blocks,unbalanced-tuple-unpacking,no-self-argument @pytest.mark.skip("Integration tests") def test_meta_schedule_dynamic_loop_extent(): a = relay.var("a", shape=(1, 8, 8, 512), dtype="float32") b = relay.nn.adaptive_avg_pool2d(a, (7, 7), "NHWC") mod = IRModule({"main": relay.Function([a], b)}) extracted_tasks = ms.relay_integration.extract_tasks(mod, target="llvm", params={}) assert not extracted_tasks @pytest.mark.skip("Integration tests") @pytest.mark.skipif( platform.machine() == "aarch64", reason="Currently torch.jit.trace fails on AArch64", ) @tvm.testing.requires_package("torch") def test_meta_schedule_integration_extract_from_resnet(): mod, params, _ = get_network(name="resnet_18", input_shape=[1, 3, 224, 224]) extracted_tasks = ms.relay_integration.extract_tasks(mod, target="llvm", params=params) expected_task_names = [ "fused_" + s for s in [ "nn_max_pool2d", "nn_adaptive_avg_pool2d", "nn_dense_add", "nn_conv2d_add", "nn_conv2d_add_1", "nn_conv2d_add_2", "nn_conv2d_add_add_nn_relu", "nn_conv2d_add_add_nn_relu_1", "nn_conv2d_add_nn_relu", "nn_conv2d_add_nn_relu_1", "nn_conv2d_add_nn_relu_2", "nn_conv2d_add_nn_relu_3", "nn_conv2d_add_nn_relu_4", "nn_conv2d_add_nn_relu_5", "nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu", "nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu_1", "nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu", "nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu_1", # The two tasks below are purely spatial and are ruled out by AutoScheduler "layout_transform", "layout_transform_reshape_squeeze", ] ] assert len(extracted_tasks) == len(expected_task_names) for t in extracted_tasks: assert t.task_name in expected_task_names, t.task_name @pytest.mark.skip("Integration tests") @pytest.mark.skipif( platform.machine() == "aarch64", reason="Currently torch.jit.trace fails on AArch64", ) @tvm.testing.requires_package("torch") def test_task_extraction_winograd_tensorcore(): mod, params, _ = get_network(name="resnet_50", input_shape=[16, 3, 224, 224]) seq = tvm.transform.Sequential( [ relay.transform.ToMixedPrecision("float16"), relay.transform.ConvertLayout({"nn.conv2d": ["NHWC", "HWIO"]}), ] ) with tvm.transform.PassContext(opt_level=3): mod = seq(mod) target = tvm.target.Target("nvidia/geforce-rtx-3070") extracted_tasks = ms.relay_integration.extract_tasks(mod, target=target, params=params) assert len([t for t in extracted_tasks if "winograd" in t.task_name]) == 4 @pytest.mark.skip("Integration tests") @pytest.mark.skipif( platform.machine() == "aarch64", reason="Currently torch.jit.trace fails on AArch64", ) @tvm.testing.requires_package("torch") def test_task_extraction_anchor_block(): mod, params, _ = get_network(name="resnet_18", input_shape=[1, 3, 224, 224]) extracted_tasks = ms.relay_integration.extract_tasks( mod, target="llvm", params=params, module_equality="anchor-block" ) # Note that there is no task from residual blocks expected_task_names = [ "fused_" + s for s in [ "nn_max_pool2d", "nn_adaptive_avg_pool2d", "nn_dense_add", "nn_conv2d_add", "nn_conv2d_add_1", "nn_conv2d_add_2", "nn_conv2d_add_nn_relu", "nn_conv2d_add_nn_relu_1", "nn_conv2d_add_nn_relu_2", "nn_conv2d_add_nn_relu_3", "nn_conv2d_add_nn_relu_4", "nn_conv2d_add_nn_relu_5", "nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu", "nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu_1", "layout_transform", "layout_transform_reshape_squeeze", ] ] assert len(extracted_tasks) == len(expected_task_names) for t in extracted_tasks: assert t.task_name in expected_task_names, t.task_name @pytest.mark.skip("Integration tests") @tvm.testing.requires_package("torch") def test_meta_schedule_integration_extract_from_bert_base(): pytest.importorskip( "transformers", reason="transformers package is required to import bert_base" ) expected = { "fused_nn_dense_2": ( 12, [[64, 3072], [768, 3072], [64, 768]], ), "fused_nn_dense": ( 48, [[64, 768], [768, 768], [64, 768]], ), "fused_nn_dense_1": ( 12, [[64, 768], [3072, 768], [64, 3072]], ), "fused_subtract_add_rsqrt_multiply_multiply_add": ( 25, [[1, 64, 768], [1, 64, 1], [1, 64, 1], [768], [768], [1, 64, 768]], ), "fused_nn_batch_matmul": ( 24, [[12, 64, 64], [12, 64, 64], [12, 64, 64]], ), "fused_reshape_add_add": ( 24, [[64, 768], [768], [1, 64, 768], [1, 64, 768]], ), "fused_variance": ( 25, [[1, 64, 768], [1, 64, 1], [1, 64, 1]], ), "fused_mean": ( 25, [[1, 64, 768], [1, 64, 1]], ), "fused_reshape_add_reshape_transpose_reshape": ( 12, [[64, 768], [768], [12, 64, 64]], ), "fused_reshape_add_multiply_fast_erf_multiply_add_multiply_reshape": ( 12, [[64, 3072], [3072], [64, 3072]], ), "fused_nn_fast_softmax": ( 12, [[1, 12, 64, 64], [1, 12, 64, 64]], ), "fused_reshape_add_reshape_transpose_reshape_1": ( 24, [[64, 768], [768], [12, 64, 64]], ), "fused_reshape_divide_add": ( 12, [[12, 64, 64], [1, 1, 1, 64], [1, 12, 64, 64]], ), "fused_reshape_transpose_reshape": ( 12, [[12, 64, 64], [64, 768]], ), "fused_nn_dense_add_fast_tanh": ( 1, [[1, 768], [768, 768], [1, 768], [1, 768]], ), "fused_cast_take_add": ( 1, [[1, 64], [30522, 768], [1, 64, 768], [1, 64, 768]], ), "fused_take": ( 1, [[1, 64, 768], [1, 768]], ), "fused_reshape": ( 12, [[1, 12, 64, 64], [12, 64, 64]], ), "fused_reshape_1": ( 24, [[1, 64, 768], [64, 768]], ), } mod, params, _ = get_network(name="bert_base", input_shape=[1, 64]) extracted_tasks = ms.relay_integration.extract_tasks(mod, target="llvm", params=params) assert len(extracted_tasks) == len(expected) for t in extracted_tasks: prim_func = None for _, v in t.dispatched[0].functions.items(): prim_func = v shape = [[int(x) for x in prim_func.buffer_map[b].shape] for b in prim_func.params] assert t.task_name in expected expected_weight, expected_shape = expected[t.task_name] assert expected_weight == t.weight, t.task_name assert expected_shape == shape, t.task_name @pytest.mark.skip("Integration tests") @pytest.mark.skipif( platform.machine() == "aarch64", reason="Currently torch.jit.trace fails on AArch64", ) @tvm.testing.requires_package("torch") def test_meta_schedule_integration_extract_from_resnet_with_filter_func(): @register_func("relay.backend.tir_converter.remove_purely_spatial", override=True) def filter_func(args, _) -> bool: from tvm.te import create_prim_func # pylint: disable=import-outside-toplevel has_complex_op = False visited = set() def traverse(t): nonlocal has_complex_op assert t.handle is not None if t.handle.value in visited: return if isinstance(t.op, te.PlaceholderOp): pass elif isinstance(t.op, te.ComputeOp): has_complex_op = has_complex_op or any(isinstance(e, tir.Reduce) for e in t.op.body) for x in t.op.input_tensors: traverse(x) visited.add(t.handle.value) for t in args: traverse(t) if not has_complex_op: return None return create_prim_func(args) mod, params, _ = get_network(name="resnet_18", input_shape=[1, 3, 224, 224]) extracted_tasks = ms.relay_integration.extract_tasks( mod, target="llvm", params=params, pass_config={ "relay.backend.use_meta_schedule": True, "relay.backend.tir_converter": "remove_purely_spatial", }, ) expected_task_names = [ "fused_" + s for s in [ "nn_max_pool2d", "nn_adaptive_avg_pool2d", "nn_dense_add", "nn_conv2d_add", "nn_conv2d_add_1", "nn_conv2d_add_2", "nn_conv2d_add_add_nn_relu", "nn_conv2d_add_add_nn_relu_1", "nn_conv2d_add_nn_relu", "nn_conv2d_add_nn_relu_1", "nn_conv2d_add_nn_relu_2", "nn_conv2d_add_nn_relu_3", "nn_conv2d_add_nn_relu_4", "nn_conv2d_add_nn_relu_5", "nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu", "nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu_1", "nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu", "nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu_1", ] ] assert len(extracted_tasks) == len(expected_task_names) for t in extracted_tasks: assert t.task_name in expected_task_names, t.task_name def extract_task_qbert(target, sch_rule_tag): def _test(mod, params, target, sch_rule_tag): extracted_tasks = ms.relay_integration.extract_tasks(mod, target, params) tune_tasks = list( filter( lambda task: "dense" in task.task_name or "batch_matmul" in task.task_name, extracted_tasks, ) ) # three int8 dense, two int8 bmm, and one fp32 dense assert len(tune_tasks) == 6 for task in tune_tasks: relay_func = list(task.mod.functions.values())[0] out_type = relay_func.body.checked_type if out_type.dtype == "float32": continue sch = tvm.tir.Schedule(_normalize_mod(task.dispatched[0])) block = sch.get_block("compute") annotations = sch.get(block).annotations assert "schedule_rule" in annotations assert sch_rule_tag in annotations["schedule_rule"] mod, params, _ = load_quantized_bert_base(batch_size=1, seq_len=128) _test(mod, params, target=target, sch_rule_tag=sch_rule_tag) @pytest.mark.skip("Too slow on CI") def extract_task_qbert_vnni(): extract_task_qbert("llvm -mcpu=cascadelake", "vnni") @pytest.mark.skip("Too slow on CI") def extract_task_qbert_avx512(): extract_task_qbert("llvm -mcpu=skylake-avx512", "avx512") @pytest.mark.skip("Integration tests") @tvm.testing.skip_if_32bit(reason="Apparently the LLVM version on i386 image is too old") def test_extract_task_arm_conv2d_nchwc(): data_shape = (1, 64, 128, 128) weight_shape = (32, 64, 1, 1) bias_shape = (weight_shape[0],) padding = (1, 1) data = relay.var("data", shape=data_shape, dtype="int8") weight = relay.var("weight", shape=weight_shape, dtype="int8") bias = relay.var("bias", shape=bias_shape, dtype="int32") conv2d = relay.nn.conv2d( data=data, weight=weight, kernel_size=weight_shape[2:], channels=weight_shape[0], padding=padding, strides=(1, 1), out_dtype="int32", ) bias_add = relay.nn.bias_add(conv2d, bias) relay_mod = tvm.IRModule.from_expr(bias_add) weight_np = np.random.uniform(1, 10, size=weight_shape).astype("int8") bias_np = np.random.uniform(1, 10, size=bias_shape).astype("int32") params = {"weight": weight_np, "bias": bias_np} target = "llvm -device arm_cpu -mtriple aarch64-linux-gnu -mattr=+neon" extracted_tasks = ms.relay_integration.extract_tasks(relay_mod, target, params) tune_tasks = list( filter( lambda task: "conv2d" in task.task_name, extracted_tasks, ) ) assert len(tune_tasks) == 1 relay_func = list(tune_tasks[0].mod.functions.values())[0] out_type = relay_func.body.checked_type # Check that the output is in NCHWc layout assert list(out_type.shape) == [1, 8, 130, 130, 4] @pytest.mark.skip("Integration tests") def test_meta_schedule_te2primfunc_argument_order_and_lowering(): # pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument # fmt: off @tvm.script.ir_module class _fused_layout_transform: @T.prim_func def main( # type: ignore placeholder: T.Buffer((T.int64(1), T.int64(3), T.int64(16), T.int64(16)), "float32"), # type: ignore T_layout_trans: T.Buffer((T.int64(1), T.int64(1), T.int64(16), T.int64(16), T.int64(3)), "float32"), # type: ignore ) -> None: # type: ignore # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) # body # with T.block("root") for i0, i1, i2, i3, i4 in T.grid(T.int64(1), T.int64(1), T.int64(16), T.int64(16), T.int64(3)): with T.block("T_layout_trans"): ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4]) T.reads(placeholder[ax0, ax1 * T.int64(3) + ax4, ax2, ax3]) T.writes(T_layout_trans[ax0, ax1, ax2, ax3, ax4]) T.block_attr({"dst_layout": "NCHW3c", "input_shape": [1, 3, 16, 16], "schedule_rule": "None", "src_layout": "NCHW"}) T_layout_trans[ax0, ax1, ax2, ax3, ax4] = T.if_then_else( ax0 < T.int64(1) and ax1 * T.int64(3) + ax4 < T.int64(3) and ax2 < T.int64(16) and ax3 < T.int64(16), # type: ignore placeholder[ax0, ax1 * T.int64(3) + ax4, ax2, ax3], T.float32(0), dtype="float32", ) @tvm.script.ir_module class _fused_layout_transform_1: @T.prim_func def main(placeholder: T.Buffer((T.int64(1), T.int64(2), T.int64(16), T.int64(16), T.int64(4)), "float32"), T_layout_trans: T.Buffer((T.int64(1), T.int64(8), T.int64(16), T.int64(16)), "float32")) -> None: # type: ignore # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) # body # with T.block("root") for i0, i1, i2, i3 in T.grid(T.int64(1), T.int64(8), T.int64(16), T.int64(16)): with T.block("T_layout_trans"): ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(placeholder[ax0, ax1 // T.int64(4), ax2, ax3, ax1 % T.int64(4)]) # type: ignore T.writes(T_layout_trans[ax0, ax1, ax2, ax3]) T.block_attr({"dst_layout": "NCHW", "input_shape": [1, 2, 16, 16, 4], "schedule_rule": "None", "src_layout": "NCHW4c"}) T_layout_trans[ax0, ax1, ax2, ax3] = T.if_then_else(ax0 < T.int64(1) and ax1 < T.int64(8) and ax2 < T.int64(16) and ax3 < T.int64(16), placeholder[ax0, ax1 // T.int64(4), ax2, ax3, ax1 % T.int64(4)], T.float32(0), dtype="float32") # type: ignore @tvm.script.ir_module class _fused_nn_contrib_conv2d_NCHWc: @T.prim_func def main(placeholder: T.Buffer((T.int64(1), T.int64(1), T.int64(16), T.int64(16), T.int64(3)), "float32"), placeholder_1: T.Buffer((T.int64(2), T.int64(1), T.int64(5), T.int64(5), T.int64(3), T.int64(4)), "float32"), conv2d_NCHWc: T.Buffer((T.int64(1), T.int64(2), T.int64(16), T.int64(16), T.int64(4)), "float32")) -> None: # type: ignore # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) # body # with T.block("root") data_pad = T.alloc_buffer([T.int64(1), T.int64(1), T.int64(20), T.int64(20), T.int64(3)], dtype="float32") for i0, i1, i2, i3, i4 in T.grid(T.int64(1), T.int64(1), T.int64(20), T.int64(20), T.int64(3)): with T.block("data_pad"): i0_1, i1_1, i2_1, i3_1, i4_1 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4]) T.reads(placeholder[i0_1, i1_1, i2_1 - T.int64(2), i3_1 - T.int64(2), i4_1]) T.writes(data_pad[i0_1, i1_1, i2_1, i3_1, i4_1]) data_pad[i0_1, i1_1, i2_1, i3_1, i4_1] = T.if_then_else(T.int64(2) <= i2_1 and i2_1 < T.int64(18) and T.int64(2) <= i3_1 and i3_1 < T.int64(18), placeholder[i0_1, i1_1, i2_1 - T.int64(2), i3_1 - T.int64(2), i4_1], T.float32(0), dtype="float32") # type: ignore # pylint: disable=R1716 for i0, i1, i2, i3, i4, i5, i6, i7 in T.grid(T.int64(1), T.int64(2), T.int64(16), T.int64(16), T.int64(4), T.int64(3), T.int64(5), T.int64(5)): with T.block("conv2d_NCHWc"): n, oc_chunk, oh, ow, oc_block, ic, kh, kw = T.axis.remap("SSSSSRRR", [i0, i1, i2, i3, i4, i5, i6, i7]) T.reads(data_pad[n, ic // T.int64(3), oh + kh, ow + kw, ic % T.int64(3)], placeholder_1[oc_chunk, ic // T.int64(3), kh, kw, ic % T.int64(3), oc_block]) # type: ignore T.writes(conv2d_NCHWc[n, oc_chunk, oh, ow, oc_block]) with T.init(): conv2d_NCHWc[n, oc_chunk, oh, ow, oc_block] = T.float32(0) conv2d_NCHWc[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc[n, oc_chunk, oh, ow, oc_block] + data_pad[n, ic // T.int64(3), oh + kh, ow + kw, ic % T.int64(3)] * placeholder_1[oc_chunk, ic // T.int64(3), kh, kw, ic % T.int64(3), oc_block] # type: ignore # fmt: on # pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument def _create_verification_database(): @ms.derived_object class VerificationDatabase(ms.database.PyDatabase): def __init__(self): super().__init__() self.tuning_records_: List[TuningRecord] = [] self.workloads_: List[Workload] = [] def has_workload(self, mod: IRModule) -> bool: for workload in self.workloads_: if tvm.ir.structural_equal(mod, workload.mod): return True # Note: The database has already put in all correct workloads # This is where we can check if the workload is correct raise ValueError( "The workload searched for is not in given database!" + " Incorrect TIR was generated from TE subgraph." ) def commit_workload(self, mod: IRModule) -> ms.database.Workload: # No need to deduplicate workload because they are specified workload = ms.database.Workload(mod) self.workloads_.append(workload) return workload def commit_tuning_record(self, record: TuningRecord) -> None: self.tuning_records_.append(record) def get_all_tuning_records(self) -> List[TuningRecord]: return self.tuning_records_ def get_top_k(self, workload: ms.database.Workload, top_k: int) -> List[TuningRecord]: return sorted( list( filter( lambda x: tvm.ir.structural_equal(workload.mod, x.workload.mod), self.tuning_records_, ) ), key=lambda x: sum(x.run_secs) / len(x.run_secs) if x.run_secs else 1e9, )[:top_k] def __len__(self) -> int: return len(self.tuning_records_) database = VerificationDatabase() def _commit(mod): workload = database.commit_workload(mod) database.commit_tuning_record( ms.database.TuningRecord( tir.schedule.Trace([], {}), workload=workload, run_secs=[0.1], ) ) _commit(_fused_layout_transform) _commit(_fused_layout_transform_1) _commit(_fused_nn_contrib_conv2d_NCHWc) return database data_shape = (1, 3, 16, 16) weight_shape = (8, 3, 5, 5) def _create_relay_mod(): data = relay.var("data", relay.TensorType(data_shape, "float32")) weight = relay.var("weight", relay.TensorType(weight_shape, "float32")) y = relay.nn.conv2d( data, weight, padding=(2, 2), kernel_size=(5, 5), kernel_layout="OIHW", out_dtype="float32", ) f = relay.Function([data, weight], y) mod = tvm.IRModule.from_expr(f) mod = relay.transform.InferType()(mod) return mod mod = _create_relay_mod() dev = tvm.cpu() target = Target("llvm --num-cores=16") params = { "weight": np.random.rand(*weight_shape).astype("float32"), } data = tvm.nd.array( np.random.rand(*data_shape).astype("float32"), dev, ) with ( target ), _create_verification_database(), PassContext( # pylint: disable=not-context-manager opt_level=3, config={ "relay.backend.use_meta_schedule": True, "relay.backend.use_meta_schedule_dispatch": 7, "relay.backend.tir_converter": "default", }, ): rt_mod1 = relay.build(mod, target=target, params=params) # Compile without meta-schedule for correctness check with tvm.transform.PassContext(opt_level=0): rt_mod2 = relay.build(mod, target=target, params=params) def get_output(data, lib): module = graph_executor.GraphModule(lib["default"](dev)) module.set_input("data", data) module.run() return module.get_output(0).numpy() # Check correctness actual_output = get_output(data, rt_mod1) expected_output = get_output(data, rt_mod2) assert np.allclose(actual_output, expected_output, rtol=1e-4, atol=2e-4) @pytest.mark.skip("Integration tests") def test_rewrite_layout_link_params(): I, O, H, W = 64, 64, 56, 56 kH = kW = 3 strides = (1, 1) padding = (1, 1) data_shape = (1, H, W, I) w_shape = (kH, kW, I, O) bias_shape = (1, 1, 1, O) data = relay.var("data", shape=data_shape, dtype="float32") weight = relay.var("weight1", shape=w_shape, dtype="float32") bias = relay.var("bias", shape=bias_shape, dtype="float32") conv = relay.nn.conv2d( data=data, weight=weight, kernel_size=(kH, kW), channels=O, padding=padding, strides=strides, data_layout="NHWC", kernel_layout="HWIO", out_dtype="float32", ) mod = tvm.IRModule.from_expr(conv + bias) weight_np = np.random.randn(*w_shape).astype("float32") bias_np = np.random.randn(*bias_shape).astype("float32") params = {"weight1": weight_np, "bias": bias_np} data_np = np.random.randn(*data_shape).astype("float32") ref = ( relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm") .evaluate()(*[data_np, weight_np, bias_np]) .numpy() ) link_params = True target = "llvm --num-cores=4" executor = relay.backend.Executor("graph", {"link-params": link_params}) mod = mod.with_attr("executor", executor) for strategy in ["replay-trace", "evolutionary"]: with tempfile.TemporaryDirectory() as work_dir: database = ms.relay_integration.tune_relay( mod=mod, target=target, params=params, work_dir=work_dir, max_trials_global=4, strategy=strategy, ) lib = ms.relay_integration.compile_relay( database=database, mod=mod, target=target, params=params, ) dev = tvm.device(target, 0) runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev)) runtime.set_input("data", data_np) runtime.run() out = runtime.get_output(0).numpy() np.testing.assert_allclose(ref, out, rtol=1e-4, atol=1e-4) @pytest.mark.skip("Integration tests") def test_module_equality_ignore_ndarray(): target = "llvm --num-cores=4" data_shape = (128, 128) weight_shape1 = (128, 128) weight_shape2 = (128, 128) data = relay.var("data", shape=data_shape, dtype="float32") weight1 = relay.var("weight1", shape=weight_shape1, dtype="float32") weight2 = relay.var("weight2", shape=weight_shape2, dtype="float32") dense1 = relay.nn.dense(data, weight1) dense2 = relay.nn.dense(dense1, weight2) mod = tvm.IRModule.from_expr(dense2) weight1_np = np.random.randn(*weight_shape1).astype("float32") weight2_np = np.random.randn(*weight_shape2).astype("float32") params = {"weight1": weight1_np, "weight2": weight2_np} executor = relay.backend.Executor("graph", {"link-params": True}) mod = mod.with_attr("executor", executor) # Without using ignore-ndarray for module equality, we get duplicated tasks assert len(ms.relay_integration.extract_tasks(mod, target, params)) == 2 module_eqality = "ignore-ndarray" extracted_tasks = ms.relay_integration.extract_tasks( mod, target, params, module_equality=module_eqality ) assert len(extracted_tasks) == 1 with tempfile.TemporaryDirectory() as work_dir: tasks, task_weights = ms.relay_integration.extracted_tasks_to_tune_contexts( extracted_tasks, work_dir, strategy="replay-trace" ) database = ms.tune.tune_tasks( tasks=tasks, task_weights=task_weights, work_dir=work_dir, max_trials_global=4, module_equality=module_eqality, ) lib = ms.relay_integration.compile_relay(database, mod, target, params) dev = tvm.device(target, 0) runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev)) data_np = np.random.randn(*data_shape).astype("float32") runtime.set_input("data", data_np) runtime.run() out = runtime.get_output(0).numpy() ref = np.dot(np.dot(data_np, weight1_np.transpose()), weight2_np.transpose()) np.testing.assert_allclose(ref, out, rtol=1e-4, atol=1e-4) def _test_anchor_tuning(target, space): data_shape = (128, 128) weight_shape1 = (128, 128) weight_shape2 = (128, 128) data = relay.var("data", shape=data_shape, dtype="float32") weight1 = relay.var("weight1", shape=weight_shape1, dtype="float32") weight2 = relay.var("weight2", shape=weight_shape2, dtype="float32") dense1 = relay.nn.dense(data, weight1) dense2 = relay.nn.dense(dense1 + relay.const(1.0, dtype="float32"), weight2) mod = tvm.IRModule.from_expr(dense2 - data + relay.const(1.0, dtype="float32")) weight1_np = np.random.randn(*weight_shape1).astype("float32") weight2_np = np.random.randn(*weight_shape2).astype("float32") data_np = np.random.randn(*data_shape).astype("float32") params = {"weight1": weight1_np, "weight2": weight2_np} module_equality = "anchor-block" extracted_tasks = ms.relay_integration.extract_tasks( mod, target, params, module_equality=module_equality ) assert len(extracted_tasks) == 1 with tempfile.TemporaryDirectory() as work_dir: database = ms.relay_integration.tune_relay( mod=mod, target=target, params=params, work_dir=work_dir, space=space, max_trials_global=4, strategy="replay-trace", module_equality=module_equality, num_tuning_cores=4, ) lib = ms.relay_integration.compile_relay(database, mod, target, params) dev = tvm.device(target, 0) runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev)) runtime.set_input("data", data_np) runtime.run() out = runtime.get_output(0).numpy() ref = ( relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm") .evaluate()(*[data_np, weight1_np, weight2_np]) .numpy() ) np.testing.assert_allclose(ref, out, atol=1e-3) @pytest.mark.skip("Integration tests") @pytest.mark.parametrize( "space", [ ms.space_generator.PostOrderApply(), ms.space_generator.PostOrderApply(sch_rules=[], postprocs=[], mutator_probs={}), ], ) def test_anchor_tuning_cpu(space): _test_anchor_tuning("llvm --num-cores=4", space) @pytest.mark.skip("Integration tests") def test_anchor_tuning_cpu_link_params(): data_shape = (128, 128) weight_shape1 = (128, 128) weight_shape2 = (128, 128) data = relay.var("data", shape=data_shape, dtype="float32") weight1 = relay.var("weight1", shape=weight_shape1, dtype="float32") weight2 = relay.var("weight2", shape=weight_shape2, dtype="float32") dense1 = relay.nn.dense(data, weight1) dense2 = relay.nn.dense(dense1, weight2) mod = tvm.IRModule.from_expr(dense2 + relay.const(1.0, dtype="float32")) weight1_np = np.random.randn(*weight_shape1).astype("float32") weight2_np = np.random.randn(*weight_shape2).astype("float32") data_np = np.random.randn(*data_shape).astype("float32") params = {"weight1": weight1_np, "weight2": weight2_np} module_equality = "anchor-block" target = "llvm --num-cores=4" executor = relay.backend.Executor("graph", {"link-params": True}) mod = mod.with_attr("executor", executor) with tempfile.TemporaryDirectory() as work_dir: database = ms.relay_integration.tune_relay( mod=mod, target=target, params=params, work_dir=work_dir, max_trials_global=4, strategy="replay-trace", module_equality=module_equality, ) lib = ms.relay_integration.compile_relay(database, mod, target, params) dev = tvm.device(target, 0) runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev)) runtime.set_input("data", data_np) runtime.run() out = runtime.get_output(0).numpy() ref = ( relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm") .evaluate()(*[data_np, weight1_np, weight2_np]) .numpy() ) np.testing.assert_allclose(ref, out, atol=1e-3) @pytest.mark.skip("Integration tests") @pytest.mark.xfail(raises=tvm.error.TVMError) def test_disabled_pass_param(): """ Check 'disabled_pass' parameter in tune_relay. Should throw exception in case of correct work. """ data_shape = [1, 4, 16, 16] weight_shape = [32, 4, 2, 2] data = relay.var("data", shape=data_shape, dtype="uint8") weight = relay.var("weight", shape=weight_shape, dtype="int8") op = relay.qnn.op.conv2d( data, weight, input_zero_point=relay.const(0), kernel_zero_point=relay.const(0), input_scale=relay.const(0.7), kernel_scale=relay.const(0.3), kernel_size=[2, 2], channels=32, ) mod = tvm.IRModule.from_expr(op) weight_np = np.random.randint(-10, 10, size=weight_shape).astype("int8") params = {"weight": weight_np} executor = relay.backend.Executor("graph", {"link-params": True}) mod = mod.with_attr("executor", executor) with tempfile.TemporaryDirectory() as work_dir: database = ms.relay_integration.tune_relay( mod=mod, target="llvm --num-cores=4", params=params, work_dir=work_dir, max_trials_global=4, strategy="replay-trace", disabled_pass=["qnn.Legalize"], ) # Test failed, otherwise we can not reach this point. pytest.fail("'disabled_pass' argument does not work") @pytest.mark.skip("Integration tests") def test_rewrite_layout_link_params_1x1_conv2d(): I, O, H, W = 32, 16, 256, 256 kH = kW = 1 strides = (1, 1) padding = (0, 0) data_shape = (1, H, W, I) w_shape = (kH, kW, I, O) data = relay.var("data", shape=data_shape, dtype="float32") weight = relay.var("weight", shape=w_shape, dtype="float32") conv = relay.nn.conv2d( data=data, weight=weight, kernel_size=(kH, kW), channels=O, padding=padding, strides=strides, data_layout="NHWC", kernel_layout="HWIO", out_dtype="float32", ) mod = tvm.IRModule.from_expr(conv) weight_np = np.random.randn(*w_shape).astype("float32") params = {"weight": weight_np} data_np = np.random.randn(*data_shape).astype("float32") ref = ( relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm") .evaluate()(*[data_np, weight_np]) .numpy() ) link_params = True target = "llvm --num-cores=4" executor = relay.backend.Executor("graph", {"link-params": link_params}) mod = mod.with_attr("executor", executor) with tempfile.TemporaryDirectory() as work_dir: database = ms.relay_integration.tune_relay( mod=mod, target=target, params=params, work_dir=work_dir, max_trials_global=8, strategy="replay-trace", ) lib = ms.relay_integration.compile_relay( database=database, mod=mod, target=target, params=params, ) dev = tvm.device(target, 0) runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev)) runtime.set_input("data", data_np) runtime.run() out = runtime.get_output(0).numpy() np.testing.assert_allclose(ref, out, rtol=1e-4, atol=1e-4) if __name__ == "__main__": tvm.testing.main()
36,817
35.707876
347
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_cost_model.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-docstring import os import re import shutil import tempfile import unittest from functools import partial from typing import List import numpy as np import tvm import tvm.testing from tvm.meta_schedule.cost_model import PyCostModel, RandomModel, XGBModel from tvm.meta_schedule.cost_model.xgb_model import PackSum, _get_custom_call_back from tvm.meta_schedule.feature_extractor import RandomFeatureExtractor from tvm.meta_schedule.runner import RunnerResult from tvm.meta_schedule.search_strategy import MeasureCandidate from tvm.meta_schedule.tune_context import TuneContext from tvm.meta_schedule.utils import derived_object from tvm.script import tir as T from tvm.tir.schedule.schedule import Schedule # pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,missing-docstring @tvm.script.ir_module class Matmul: @T.prim_func def main(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=no-self-argument T.func_attr({"global_symbol": "main", "tir.noalias": True}) A = T.match_buffer(a, (1024, 1024), "float32") B = T.match_buffer(b, (1024, 1024), "float32") C = T.match_buffer(c, (1024, 1024), "float32") for i, j, k in T.grid(1024, 1024, 1024): with T.block("matmul"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj] @tvm.script.ir_module class FullModule: @T.prim_func def main(T_full: T.Buffer((T.int64(2), T.int64(3)), "float32")): T.func_attr({"global_symbol": "main", "tir.noalias": True}) for ax0, ax1 in T.grid(T.int64(2), T.int64(3)): with T.block("T_full"): v_ax0, v_ax1 = T.axis.remap("SS", [ax0, ax1]) T.reads() T.writes(T_full[v_ax0, v_ax1]) T_full[v_ax0, v_ax1] = T.float32(1) # pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,disable=unused-argument def test_meta_schedule_cost_model(): @derived_object class FancyCostModel(PyCostModel): def load(self, path: str) -> None: pass def save(self, path: str) -> None: pass def update( self, context: TuneContext, candidates: List[MeasureCandidate], results: List[RunnerResult], ) -> None: pass def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray: return np.random.rand(10) model = FancyCostModel() model.save("fancy_test_location") model.load("fancy_test_location") model.update(TuneContext(), [], []) results = model.predict( TuneContext(), [MeasureCandidate(Schedule(mod=Matmul), []) for _ in range(10)] ) assert results.shape == (10,) def test_meta_schedule_cost_model_as_string(): @derived_object class NotSoFancyCostModel(PyCostModel): def load(self, path: str) -> None: pass def save(self, path: str) -> None: pass def update( self, context: TuneContext, candidates: List[MeasureCandidate], results: List[RunnerResult], ) -> None: pass def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray: return np.random.rand(10) cost_model = NotSoFancyCostModel() pattern = re.compile(r"meta_schedule.NotSoFancyCostModel\(0x[a-f|0-9]*\)") assert pattern.match(str(cost_model)) def test_meta_schedule_random_model(): model = RandomModel() model.update(TuneContext(), [], []) res = model.predict(TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(10)]) assert len(res) == 10 assert min(res) >= 0 and max(res) <= model.max_range def test_meta_schedule_random_model_reseed(): model = RandomModel(seed=100) res = model.predict(TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(20)]) new_model = RandomModel(seed=100) new_res = new_model.predict( TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(20)] ) assert (res == new_res).all() def test_meta_schedule_random_model_reload(): model = RandomModel(seed=25973) model.predict( TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(30)] ) # change state path = os.path.join(tempfile.mkdtemp(), "test_output_meta_schedule_random_model.npy") model.save(path) res1 = model.predict(TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(70)]) model.load(path) res2 = model.predict(TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(70)]) shutil.rmtree(os.path.dirname(path)) assert (res1 == res2).all() def _dummy_candidate(): return MeasureCandidate(Schedule(Matmul), []) def _dummy_result(num_samples: int = 4, max_run_sec: int = 10): return RunnerResult(list(np.random.rand(num_samples) * max_run_sec + 1e-6), None) def test_meta_schedule_xgb_model(): extractor = RandomFeatureExtractor() model = XGBModel(extractor=extractor, num_warmup_samples=2) update_sample_count = 10 predict_sample_count = 100 model.update( TuneContext(), [_dummy_candidate() for i in range(update_sample_count)], [_dummy_result() for i in range(update_sample_count)], ) model.predict(TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)]) def test_meta_schedule_xgb_model_no_feature(): model = XGBModel(num_warmup_samples=0) tune_ctx = TuneContext( FullModule, target="llvm --num-cores 16", space_generator="post-order-apply", search_strategy="evolutionary", ) candidate = MeasureCandidate(Schedule(FullModule), []) model.update(tune_ctx, [candidate], [_dummy_result()]) model.predict(tune_ctx, [candidate]) def test_meta_schedule_xgb_model_reload(): extractor = RandomFeatureExtractor() model = XGBModel(extractor=extractor, num_warmup_samples=10) update_sample_count = 20 predict_sample_count = 30 model.update( TuneContext(), [_dummy_candidate() for i in range(update_sample_count)], [_dummy_result() for i in range(update_sample_count)], ) model.predict(TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)]) with tempfile.NamedTemporaryFile() as path: # Backup random_state = model.extractor.random_state # save feature extractor's random state old_data = model.data old_data_size = model.data_size model.save(path.name) res1 = model.predict( TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)] ) # Load model.extractor.random_state = random_state # load feature extractor's random state model.load(path.name) new_data = model.data new_data_size = model.data_size res2 = model.predict( TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)] ) assert (res1 == res2).all() assert old_data_size == new_data_size assert len(old_data) == len(new_data) for (k1, g1), (k2, g2) in zip( # pylint: disable=invalid-name old_data.items(), new_data.items() ): assert k1 == k2 assert k1 == g1.group_hash assert k2 == g2.group_hash assert (g1.costs == g2.costs).all() assert len(g1.features) == len(g2.features) for f1, f2 in zip(g1.features, g2.features): # pylint: disable=invalid-name assert (f1 == f2).all() def test_meta_schedule_xgb_model_reupdate(): extractor = RandomFeatureExtractor() model = XGBModel(extractor=extractor, num_warmup_samples=2) update_sample_count = 60 predict_sample_count = 100 model.update( TuneContext(), [_dummy_candidate() for i in range(update_sample_count)], [_dummy_result() for i in range(update_sample_count)], ) model.update( TuneContext(), [_dummy_candidate() for i in range(update_sample_count)], [_dummy_result() for i in range(update_sample_count)], ) model.update( TuneContext(), [_dummy_candidate() for i in range(update_sample_count)], [_dummy_result() for i in range(update_sample_count)], ) model.predict(TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)]) def xgb_version_check(): # pylint: disable=import-outside-toplevel import xgboost as xgb from packaging import version # pylint: enable=import-outside-toplevel return version.parse(xgb.__version__) >= version.parse("1.6.0") @unittest.skipIf(xgb_version_check(), "test not supported for xgboost version after 1.6.0") def test_meta_schedule_xgb_model_callback_as_function(): # pylint: disable=import-outside-toplevel from itertools import chain as itertools_chain import xgboost as xgb # pylint: enable=import-outside-toplevel extractor = RandomFeatureExtractor() model = XGBModel(extractor=extractor, num_warmup_samples=10) update_sample_count = 20 predict_sample_count = 30 model.update( TuneContext(), [_dummy_candidate() for i in range(update_sample_count)], [_dummy_result() for i in range(update_sample_count)], ) model.predict(TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)]) with tempfile.NamedTemporaryFile() as path: # Backup and train on new TrainingCallBack api random_state = model.extractor.random_state # save feature extractor's random state model.save(path.name) old_booster = model.booster xs = [ # pylint: disable=invalid-name x.numpy().astype("float32") for x in extractor.extract_from( TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)], ) ] d_test = PackSum(xs=xs, ys=None) pred1 = old_booster.predict(d_test.dmatrix) # Load and train on deprecated TrainingCallBack api model.extractor.random_state = random_state # load feature extractor's random state model.load(path.name) d_train = PackSum( xs=list(itertools_chain.from_iterable([g.features for g in model.data.values()])), ys=np.concatenate( [g.min_cost / g.costs for g in model.data.values()], axis=0, ), ) def obj(ys_pred: np.ndarray, d_train1: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument return d_train.obj_square_error(ys_pred) def rmse(ys_pred: np.ndarray, d_train1: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument return d_train.rmse(ys_pred) def avg_peak_score(ys_pred: np.ndarray, d_train1: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument return d_train.average_peak_score(ys_pred, model.average_peak_n) new_booster = xgb.train( model.config.to_dict(), d_train.dmatrix, num_boost_round=10000, obj=obj, callbacks=[ partial( _get_custom_call_back( early_stopping_rounds=model.early_stopping_rounds, verbose_eval=model.verbose_eval, fevals=[rmse, avg_peak_score], evals=[(d_train.dmatrix, "tr")], cvfolds=None, ) ) ], ) xs = [ # pylint: disable=invalid-name x.numpy().astype("float32") for x in extractor.extract_from( TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)], ) ] d_test = PackSum(xs=xs, ys=None) pred2 = new_booster.predict(d_test.dmatrix) assert np.allclose(pred1, pred2, rtol=1e-3, atol=1e-3) if __name__ == "__main__": tvm.testing.main()
13,087
35.355556
125
py
tvm
tvm-main/tests/python/unittest/test_te_autodiff.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import pytest import tvm from tvm import te, topi from tvm.testing import assert_allclose from tvm.topi.utils import get_const_tuple def check_grad( out, inputs, args=[], data_range=(-10, 10), desired_grads=None, assert_no_jacobian=True ): inputs = inputs if isinstance(inputs, list) else [inputs] def check_device(device, host="llvm"): dev = tvm.device(device, 0) if not tvm.testing.device_enabled(host): return sout = te.create_schedule(out.op) mout = tvm.build(sout, [out] + inputs + args) out_shape = get_const_tuple(out.shape) l, h = data_range input_data = [ tvm.nd.array( np.random.uniform(l, h, size=get_const_tuple(input.shape)).astype(input.dtype) ) for input in inputs ] arg_vals = [ tvm.nd.array(np.random.uniform(l, h, size=get_const_tuple(arg.shape)).astype(arg.dtype)) for arg in args ] ones = topi.full_like(out, 1.0) # we provide head to sum and reduce the output dimension, # which equals to grad(out.sum(), inputs) grads = te.gradient(out, inputs, head=ones) grad_sched = te.create_schedule([grad.op for grad in grads]) mgrad = tvm.build(grad_sched, list(grads) + inputs + args) if assert_no_jacobian: # TODO(yzhliu): it is better to visit the expression and do assertion lowered_ir = str(tvm.lower(grad_sched, list(grads) + inputs + args, simple_mode=True)) assert "jacobian" not in lowered_ir, lowered_ir grad_data = [tvm.nd.empty(get_const_tuple(i.shape), g.dtype) for i, g in zip(inputs, grads)] mgrad(*grad_data, *input_data, *arg_vals) g_res = [g.numpy() for g in grad_data] if desired_grads: assert isinstance(desired_grads, list) for actual, desired in zip(g_res, desired_grads): assert_allclose(actual, desired, rtol=0.1, atol=1e-2) else: def forward(*in_data): out_data = tvm.nd.empty(out_shape, out.dtype) mout(out_data, *[tvm.nd.array(d) for d in list(in_data)]) return out_data.numpy().sum() tvm.testing.check_numerical_grads( forward, [d.numpy() for d in input_data + arg_vals], g_res ) check_device("cpu") def test_basic_operation(): np.random.seed(0) shape = (10, 10) x = te.var("x", dtype="float32") k = te.reduce_axis((0, 10), name="k") l = te.reduce_axis((0, 10), name="l") A0 = te.placeholder(shape, name="A0") A1 = te.placeholder(shape, name="A1") zeros = np.zeros(shape) B = te.compute(shape, lambda i, j: A0[i, j], name="B") check_grad(B, [A0]) B = te.compute(shape, lambda i, j: A0[i, j] + A1[i, j], name="B") check_grad(B, [A0, A1]) B = te.compute(shape, lambda i, j: A0[i, j] + A0[j, i], name="B") check_grad(B, A0) B = te.compute(shape, lambda i, j: te.floor(A0[i, j]), name="B") check_grad(B, A0, desired_grads=[zeros]) B = te.compute(shape, lambda i, j: te.ceil(A0[i, j]), name="B") check_grad(B, A0, desired_grads=[zeros]) B = te.compute(shape, lambda i, j: te.trunc(A0[i, j]), name="B") check_grad(B, A0, desired_grads=[zeros]) B = te.compute(shape, lambda i, j: te.round(A0[i, j]), name="B") check_grad(B, A0, desired_grads=[zeros]) B = te.compute(shape, lambda i, j: A0[i, j] + te.exp(A0[j, i]), name="B") check_grad(B, A0) B = te.compute(shape, lambda i, j: te.log(0.1 + te.abs(A0[i, j] + te.exp(A0[j, i]))), name="B") check_grad(B, A0) B = te.compute(shape, lambda i, j: te.sigmoid(A0[i, j] * A0[i, j] * A0[j, i]), name="B") check_grad(B, A0) B = te.compute(shape, lambda i, j: te.tanh(A0[i, j] * A0[i, j] * A0[j, i]), name="B") check_grad(B, A0) B = te.compute(shape, lambda i, j: te.sqrt(A0[i, j] * A0[i, j] * A0[j, i]), name="B") check_grad(B, A0, data_range=(0.1, 10)) B = te.compute(shape, lambda i, j: te.power(te.abs(A0[i, j]), A0[j, i]), name="B") check_grad(B, A0, data_range=(-4, 4)) B = te.compute(shape, lambda i, j: A0[i, j] * A0[j, i], name="B") check_grad(B, A0) B = te.compute((10,), lambda i: te.sum(A0[i, k] * A0[k, i], axis=k), name="B") check_grad(B, A0) B = te.compute(shape, lambda i, j: te.sum(A0[i, k] * A0[k, i] + 5, axis=k), name="B") check_grad(B, A0) B = te.compute(shape, lambda i, j: te.max(A0[i, k] * A0[k, j] + 5, axis=k), name="B") check_grad(B, A0) B = te.compute(shape, lambda i, j: A0[i, j] * (A1[j, i] + A0[j, i]), name="B") check_grad(B, [A0, A1]) B = te.compute( shape, lambda i, j: te.sum(A0[k, k] - A0[te.min(j + k, 9), j] * A0[i, k], axis=k), name="B" ) check_grad(B, A0) def fcombine(x, y): return x * y def fidentity(t0): return tvm.tir.const(1, t0) prod = te.comm_reducer(fcombine, fidentity, name="prod") B = te.compute((10, 10), lambda i, j: prod(A0[i, k] + A0[k, i], axis=k), name="B") check_grad(B, A0) X = te.placeholder((10,), name="X") A = te.compute((10,), lambda i: X[i] + X[9 - i]) B = te.compute((10,), lambda i: X[i] * X[9 - i]) Y = topi.tensordot(A, B, 1) check_grad(Y, X) X = te.placeholder((3, 3), name="X") Y = topi.einsum("ii->i", (X)) check_grad(Y, X) def test_topi(): X = te.placeholder((1, 2, 4, 4), name="X") W = te.placeholder((5, 2, 3, 3), name="W") W1 = te.placeholder((2, 5, 3, 3), name="W1") W2 = te.placeholder((1,), name="W2") R = topi.nn.conv2d(X, W, 1, 1, 1) check_grad(R, [X, W]) R1 = topi.nn.conv2d(topi.nn.relu(R), W1, 1, 0, 1) check_grad(R1, [X, W, W1]) R = topi.broadcast_to(W2, (5, 2, 3, 3)) check_grad(R, [W2]) R = topi.nn.conv2d(X, topi.broadcast_to(W2, (5, 2, 3, 3)), 1, 1, 1) check_grad(R, [X, W2]) R = topi.nn.pool2d(X, [2, 2], [1, 1], [2, 2], [0, 0, 0, 0], "avg") check_grad(R, X) R = topi.nn.pool2d(X, [2, 2], [1, 1], [2, 2], [0, 0, 0, 0], "max") check_grad(R, X) X = te.placeholder((1, 2, 5, 5), name="X") R = topi.reshape(X, (1, 32)) check_grad(R, [X]) X = te.placeholder((1, 2, 5, 5), name="X") W = te.placeholder((2, 2, 3, 3), name="W") S = topi.reshape(X, (1, 50)) check_grad(S, [X]) R = X + topi.nn.conv2d(X + topi.nn.conv2d(X, W, 1, 1, 1), W, 1, 1, 1) check_grad(R, [X, W]) S = topi.nn.softmax(topi.reshape(R, (1, 50))) check_grad(S, [X, W]) S = topi.sigmoid(topi.reshape(R, (1, 50))) check_grad(S, [X, W]) S = topi.tanh(topi.reshape(R, (1, 50))) check_grad(S, [X, W]) S = topi.nn.log_softmax(topi.reshape(R, (1, 50))) check_grad(S, [X, W]) check_grad(S, [W], [X]) X = te.placeholder((1, 2, 3, 5), name="X") Y = te.placeholder((1, 2, 7, 5), name="Y") S = topi.concatenate((X, Y), 2) check_grad(S, [X, Y]) X = te.placeholder((1, 2, 6, 5), name="X") (S, R) = topi.split(X, 2, 2) check_grad(S, [X]) check_grad(R, [X]) R1 = topi.concatenate((S, R), 2) check_grad(R1, [X]) R2 = topi.concatenate((R, S), 2) check_grad(R2, [X]) X = te.placeholder((4, 5), name="X") I = te.placeholder((100,), name="I", dtype="int32") R = topi.take(X, topi.abs(I)) check_grad(R, [X], [I]) W = te.placeholder((5, 5), name="W") exps = topi.exp(topi.nn.dense(X, W)) sumexps = topi.sum(exps, axis=-1, keepdims=True) R = exps / sumexps check_grad(R, [X, W], data_range=(-1, 1)) def test_stride_dilation(): X = te.placeholder((1, 2, 10, 10), name="X") W = te.placeholder((2, 2, 1, 1), name="W") Y = topi.nn.conv2d(X, W, 1, 0, 1) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 2, 0, 1) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 3, 0, 1) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 1, 0, 2) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 2, 0, 2) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 3, 0, 2) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 1, 0, 3) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 2, 0, 3) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 3, 0, 3) check_grad(Y, [X, W]) W = te.placeholder((2, 2, 2, 2), name="W") Y = topi.nn.conv2d(X, W, 1, 0, 1) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 2, 0, 1) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 3, 0, 1) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 1, 0, 2) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 2, 0, 2) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 3, 0, 2) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 1, 0, 3) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 2, 0, 3) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 3, 0, 3) check_grad(Y, [X, W]) W = te.placeholder((2, 2, 3, 3), name="W") Y = topi.nn.conv2d(X, W, 1, 0, 1) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 2, 0, 1) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 3, 0, 1) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 1, 0, 2) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 2, 0, 2) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 3, 0, 2) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 1, 0, 3) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 2, 0, 3) check_grad(Y, [X, W]) Y = topi.nn.conv2d(X, W, 3, 0, 3) check_grad(Y, [X, W]) Y = topi.nn.pool2d(X, [1, 1], [1, 1], [1, 1], [0, 0, 0, 0], "max") check_grad(Y, [X]) Y = topi.nn.pool2d(X, [1, 1], [1, 1], [2, 2], [0, 0, 0, 0], "max") check_grad(Y, [X]) Y = topi.nn.pool2d(X, [1, 1], [1, 1], [3, 3], [0, 0, 0, 0], "max") check_grad(Y, [X]) Y = topi.nn.pool2d(X, [2, 2], [1, 1], [1, 1], [0, 0, 0, 0], "max") check_grad(Y, [X]) Y = topi.nn.pool2d(X, [2, 2], [1, 1], [2, 2], [0, 0, 0, 0], "max") check_grad(Y, [X]) Y = topi.nn.pool2d(X, [2, 2], [1, 1], [3, 3], [0, 0, 0, 0], "max") check_grad(Y, [X]) Y = topi.nn.pool2d(X, [3, 3], [1, 1], [1, 1], [0, 0, 0, 0], "max") check_grad(Y, [X]) Y = topi.nn.pool2d(X, [3, 3], [1, 1], [2, 2], [0, 0, 0, 0], "max") check_grad(Y, [X]) Y = topi.nn.pool2d(X, [3, 3], [1, 1], [3, 3], [0, 0, 0, 0], "max") check_grad(Y, [X]) @pytest.mark.xfail def test_reduction_init(): np.random.seed(0) shape = (10, 10) k = te.reduce_axis((0, 10), name="k") A0 = te.placeholder(shape, name="A0") B = te.compute((10,), lambda i: te.sum(A0[i, k] * A0[k, i], axis=k, init=0.0), name="B") check_grad(B, A0) if __name__ == "__main__": test_basic_operation() test_topi() test_stride_dilation()
11,590
31.928977
100
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_space_cuda_async_multiple_initialization.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Tests for MetaSchedule search space on CUDA""" from typing import List, Optional, Tuple, Union # isort: off from typing_extensions import Literal # isort: on from tvm.meta_schedule.testing.space_generation import get_rules from tvm import meta_schedule as ms from tvm.meta_schedule.testing.te_workload import create_te_workload from tvm.target import Target from tvm.ir import IRModule from tvm.tir import Schedule def generate_design_space( kind: Literal["llvm", "cuda", "cuda-tensorcore", "hexagon"], mod: IRModule, target: Target, types: Union[type, Tuple[type, ...]], sch_rules: Optional[List[ms.ScheduleRule]] = None, initialize_time: int = 1, ) -> List[Schedule]: if sch_rules is None: sch_rules = get_rules(kind, types) else: assert types is None ctx = ms.TuneContext( mod=mod, target=target, space_generator=ms.space_generator.PostOrderApply( sch_rules=sch_rules, postprocs=[], mutator_probs={}, ), task_name="test", ) # each time cloning will trigger one more initialization for _ in range(initialize_time - 1): ctx = ctx.clone() return ctx.generate_design_space() def _target(): return Target("nvidia/geforce-rtx-3070") def _design_space(mod): return generate_design_space( kind="cuda", mod=mod, target=_target(), types=ms.ScheduleRule, initialize_time=100, ) def test_c2d(): mod = create_te_workload("C2D", 0) actual = _design_space(mod) assert len(actual) == 3 def test_gmm(): mod = create_te_workload("GMM", 0) actual = _design_space(mod) assert len(actual) == 3 if __name__ == "__main__": test_c2d() test_gmm()
2,563
27.808989
68
py
tvm
tvm-main/tests/python/unittest/test_tir_schedule_blockize.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import tvm import tvm.testing from tvm import tir from tvm.script import tir as T from tvm.tir.schedule.testing import verify_trace_roundtrip import pytest # fmt: off # pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks @T.prim_func def single_elementwise(A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32")): for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 # fmt: on # pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,unexpected-keyword-arg,too-many-nested-blocks def test_blockize_outer(): @T.prim_func def after_blockize_outer( A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32"), ) -> None: with T.block("blockized_B"): vio = T.axis.spatial(1, 0) vjo = T.axis.spatial(1, 0) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 func = single_elementwise s = tir.Schedule(func, debug_mask="all") x, _ = s.get_loops(s.get_block("B")) s.blockize(x) tvm.ir.assert_structural_equal(s.mod["main"], after_blockize_outer) verify_trace_roundtrip(sch=s, mod=func) def test_blockize_inner(): @T.prim_func def after_blockize_inner( A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32"), ) -> None: for i in T.serial(128): with T.block("blockized_B"): vi = T.axis.spatial(128, i) vjo = T.axis.spatial(1, 0) for j in T.serial(128): with T.block("B"): vj = T.axis.remap("S", [j]) B[vi, vj] = A[vi, vj] * 2.0 func = single_elementwise s = tir.Schedule(func, debug_mask="all") _, y = s.get_loops(s.get_block("B")) s.blockize(y) tvm.ir.assert_structural_equal(s.mod["main"], after_blockize_inner) verify_trace_roundtrip(sch=s, mod=func) def test_two_elementwise_blockize_reverse_compute_at(): @T.prim_func def before_blockize_rca( A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32"), ) -> None: B = T.alloc_buffer([128, 128], dtype="float32") for i, j in T.grid(8, 8): with T.block("B_o"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) for i_1, j_1 in T.grid(16, 16): with T.block("B"): vi_i, vj_i = T.axis.remap("SS", [i_1, j_1]) T.reads(A[vi * 16 + vi_i, vj * 16 + vj_i]) T.writes(B[vi * 16 + vi_i, vj * 16 + vj_i]) B[vi * 16 + vi_i, vj * 16 + vj_i] = A[vi * 16 + vi_i, vj * 16 + vj_i] * 2.0 for ax0, ax1 in T.grid(16, 16): with T.block("C"): vi = T.axis.spatial(128, i * 16 + ax0) vj = T.axis.spatial(128, j * 16 + ax1) T.reads(B[vi, vj]) T.writes(C[vi, vj]) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def after_blockize_rca( A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32"), ) -> None: B = T.alloc_buffer([128, 128], dtype="float32") for i, j in T.grid(8, 8): with T.block("B_o"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) T.writes(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) for i_1, j_1 in T.grid(16, 16): with T.block("B"): vi_i, vj_i = T.axis.remap("SS", [i_1, j_1]) T.reads(A[vi * 16 + vi_i, vj * 16 + vj_i]) T.writes(B[vi * 16 + vi_i, vj * 16 + vj_i]) B[vi * 16 + vi_i, vj * 16 + vj_i] = A[vi * 16 + vi_i, vj * 16 + vj_i] * 2.0 with T.block("C_o"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(B[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) T.writes(C[vi * 16 : vi * 16 + 16, vj * 16 : vj * 16 + 16]) for ax0, ax1 in T.grid(16, 16): with T.block("C"): vi_i, vj_i = T.axis.remap("SS", [ax0, ax1]) T.reads(B[vi * 16 + vi_i, vj * 16 + vj_i]) T.writes(C[vi * 16 + vi_i, vj * 16 + vj_i]) C[vi * 16 + vi_i, vj * 16 + vj_i] = B[vi * 16 + vi_i, vj * 16 + vj_i] + 1.0 func = before_blockize_rca s = tir.Schedule(func, debug_mask="all") _, _, x, _ = s.get_loops(s.get_block("C")) s.blockize(x) tvm.ir.assert_structural_equal(s.mod["main"], after_blockize_rca) verify_trace_roundtrip(sch=s, mod=func) def test_two_elementwise_blockize_compute_at(): @T.prim_func def before_blockize_compute_at( A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32"), ) -> None: # body # with T.block("root") B = T.alloc_buffer([128, 128], dtype="float32") for i_0, j_0 in T.grid(8, 8): for ax0, ax1 in T.grid(16, 16): with T.block("B"): vi = T.axis.spatial(128, i_0 * 16 + ax0) vj = T.axis.spatial(128, j_0 * 16 + ax1) T.reads(A[vi, vj]) T.writes(B[vi, vj]) B[vi, vj] = A[vi, vj] * 2.0 with T.block("C_o"): vi_o, vj_o = T.axis.remap("SS", [i_0, j_0]) T.reads(B[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16]) T.writes(C[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16]) for i_1, j_1 in T.grid(16, 16): with T.block("C"): vi_i, vj_i = T.axis.remap("SS", [i_1, j_1]) T.reads(B[vi_o * 16 + vi_i, vj_o * 16 + vj_i]) T.writes(C[vi_o * 16 + vi_i, vj_o * 16 + vj_i]) C[vi_o * 16 + vi_i, vj_o * 16 + vj_i] = ( B[vi_o * 16 + vi_i, vj_o * 16 + vj_i] + 1.0 ) @T.prim_func def after_blockize_compute_at( A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32"), ) -> None: B = T.alloc_buffer([128, 128], dtype="float32") for i_0, j_0 in T.grid(8, 8): with T.block("B_o"): vi_o, vj_o = T.axis.remap("SS", [i_0, j_0]) T.reads(A[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16]) T.writes(B[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16]) for ax0, ax1 in T.grid(16, 16): with T.block("B"): vi_i, vj_i = T.axis.remap("SS", [ax0, ax1]) T.reads(A[vi_o * 16 + vi_i, vj_o * 16 + vj_i]) T.writes(B[vi_o * 16 + vi_i, vj_o * 16 + vj_i]) B[vi_o * 16 + vi_i, vj_o * 16 + vj_i] = ( A[vi_o * 16 + vi_i, vj_o * 16 + vj_i] * 2.0 ) with T.block("C_o"): vi_o, vj_o = T.axis.remap("SS", [i_0, j_0]) T.reads(B[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16]) T.writes(C[vi_o * 16 : vi_o * 16 + 16, vj_o * 16 : vj_o * 16 + 16]) for i_1, j_1 in T.grid(16, 16): with T.block("C"): vi_i, vj_i = T.axis.remap("SS", [i_1, j_1]) T.reads(B[vi_o * 16 + vi_i, vj_o * 16 + vj_i]) T.writes(C[vi_o * 16 + vi_i, vj_o * 16 + vj_i]) C[vi_o * 16 + vi_i, vj_o * 16 + vj_i] = ( B[vi_o * 16 + vi_i, vj_o * 16 + vj_i] + 1.0 ) func = before_blockize_compute_at s = tir.Schedule(func, debug_mask="all") _, _, x, _ = s.get_loops(s.get_block("B")) s.blockize(x) tvm.ir.assert_structural_equal(s.mod["main"], after_blockize_compute_at) verify_trace_roundtrip(sch=s, mod=func) def test_blockize_init_loops(): @T.prim_func def rowsum(A: T.Buffer((128, 128), "float32"), B: T.Buffer((128,), "float32")) -> None: for k, i in T.grid(128, 128): with T.block("B"): vk, vi = T.axis.remap("RS", [k, i]) with T.init(): B[vi] = 0.0 B[vi] = B[vi] + A[vi, vk] @T.prim_func def after_rowsum_blockize( A: T.Buffer((128, 128), "float32"), B: T.Buffer((128,), "float32"), ) -> None: with T.block("blockized_B"): vko = T.axis.R(1, 0) vio = T.axis.S(1, 0) with T.init(): for i1 in T.serial(0, 128): with T.block("B_init"): vi_init = T.axis.S(128, i1) B[vi_init] = T.float32(0) for i0, i1_1 in T.grid(128, 128): with T.block("B"): vk, vi = T.axis.remap("RS", [i0, i1_1]) B[vi] = B[vi] + A[vi, vk] s = tir.Schedule(rowsum, debug_mask="all") k, _ = s.get_loops(s.get_block("B")) s.blockize(k) tvm.ir.assert_structural_equal(s.mod["main"], after_rowsum_blockize) verify_trace_roundtrip(sch=s, mod=rowsum) @pytest.mark.parametrize("preserve_unit_iters", [True, False]) def test_blockize_outer_int64_shape(preserve_unit_iters): @T.prim_func def single_elementwise_int64( A: T.Buffer((T.int64(16), T.int64(128)), "float32"), B: T.Buffer((T.int64(16), T.int64(128)), "float32"), ) -> None: for i0, j0, i1, j1 in T.grid(T.int64(1), T.int64(8), T.int64(16), T.int64(16)): with T.block("B"): vi = T.axis.S(T.int64(16), i0 * T.int64(16) + i1) vj = T.axis.S(T.int64(128), j0 * T.int64(16) + j1) B[vi, vj] = A[vi, vj] + 1.0 @T.prim_func def after_single_elementwise_int64_blockize( A: T.Buffer((T.int64(16), T.int64(128)), "float32"), B: T.Buffer((T.int64(16), T.int64(128)), "float32"), ) -> None: for i0, j0 in T.grid(T.int64(1), T.int64(8)): with T.block("B_o"): vi_o = T.axis.spatial(T.int64(1), T.int64(0)) vj_o = T.axis.spatial(T.int64(8), j0) for i1, j1 in T.grid(T.int64(16), T.int64(16)): with T.block("B"): vi_i, vj_i = T.axis.remap("SS", [i1, j1]) B[vi_i, vj_o * T.int64(16) + vj_i] = A[ vi_i, vj_o * T.int64(16) + vj_i ] + T.float32(1) @T.prim_func def after_single_elementwise_int64_blockize_preserve_unit_iters( A: T.Buffer((T.int64(16), T.int64(128)), "float32"), B: T.Buffer((T.int64(16), T.int64(128)), "float32"), ) -> None: for i0, j0 in T.grid(T.int64(1), T.int64(8)): with T.block("B_o"): vi_o = T.axis.spatial(T.int64(1), i0) vj_o = T.axis.spatial(T.int64(8), j0) for i1, j1 in T.grid(T.int64(16), T.int64(16)): with T.block("B"): vi_i, vj_i = T.axis.remap("SS", [i1, j1]) B[vi_i, vj_o * T.int64(16) + vj_i] = A[ vi_i, vj_o * T.int64(16) + vj_i ] + T.float32(1) s = tir.Schedule(single_elementwise_int64, debug_mask="all") _, _, i1, _ = s.get_loops(s.get_block("B")) s.blockize(i1, preserve_unit_iters=preserve_unit_iters) expected = ( after_single_elementwise_int64_blockize_preserve_unit_iters if preserve_unit_iters else after_single_elementwise_int64_blockize ) tvm.ir.assert_structural_equal(s.mod["main"], expected) verify_trace_roundtrip(sch=s, mod=single_elementwise_int64) def test_blockize_blocks(): @T.prim_func def blocks_func(A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32")) -> None: for m in T.serial(6): for i, j in T.grid(3, 1): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A[vi, vj]) T.writes(B[vi, vj]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 64): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A[vi, vj + 64]) T.writes(B[vi, vj + 64]) B[vi, vj + 64] = A[vi, vj + 64] * 3.0 @T.prim_func def after_blocks_blockize( A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32") ) -> None: for m in range(6): with T.block("outer_B_C_"): vi_o = T.axis.spatial(1, 0) vj_o = T.axis.spatial(1, 0) T.reads(A[0:128, 0:128]) T.writes(B[0:128, 0:128]) for i, j in T.grid(3, 1): with T.block("B"): vi_i = T.axis.spatial(3, i) T.reads(A[vi_i, 0]) T.writes(B[vi_i, 0]) B[vi_i, 0] = A[vi_i, 0] * T.float32(2) for i, j in T.grid(128, 64): with T.block("C"): vi_i, vj_i = T.axis.remap("SS", [i, j]) T.reads(A[vi_i, vj_i + 64]) T.writes(B[vi_i, vj_i + 64]) B[vi_i, vj_i + 64] = A[vi_i, vj_i + 64] * T.float32(3) s = tir.Schedule(blocks_func, debug_mask="all") blocks = [s.get_block("B"), s.get_block("C")] s.blockize(blocks, preserve_unit_iters=False) expected = after_blocks_blockize tvm.ir.assert_structural_equal(s.mod["main"], expected) verify_trace_roundtrip(sch=s, mod=blocks_func) if __name__ == "__main__": tvm.testing.main()
15,311
41.651811
137
py
tvm
tvm-main/tests/python/unittest/test_tir_schedule_split_fuse.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import pytest import tvm import tvm.testing from tvm import te, tir from tvm.script import tir as T from tvm.tir.expr import IntImm from tvm.tir.schedule.testing import verify_trace_roundtrip # pylint: disable=no-member,invalid-name,unused-variable @T.prim_func def elementwise(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) B = T.match_buffer(b, (128, 128, 128)) for i, j, k in T.grid(128, 128, 128): with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_dependent_loops(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) B = T.match_buffer(b, (128, 128, 128)) for i in T.serial(0, 128): for j, k in T.grid(i, 128): with T.block("B"): vi = T.axis.S(128, i) vj = T.axis.S(i, j) vk = T.axis.S(128, k) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_symbolic(a: T.handle, b: T.handle, n: T.int32) -> None: A = T.match_buffer(a, (128, 128, n)) B = T.match_buffer(b, (128, 128, n)) for i, j, k in T.grid(128, 128, n): with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_symbolic_fused(a: T.handle, b: T.handle, n: T.int32) -> None: A = T.match_buffer(a, (128, 128, n)) B = T.match_buffer(b, (128, 128, n)) for i_j_k_fused in T.serial(0, (n * 16384)): with T.block("B"): vi = T.axis.S(128, T.floordiv(i_j_k_fused, n * 128)) vj = T.axis.S(128, T.floordiv(T.floormod(i_j_k_fused, n * 128), n)) vk = T.axis.S(n, T.floormod(i_j_k_fused, n)) T.reads([A[vi, vj, vk]]) T.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_symbolic_split(a: T.handle, b: T.handle, n: T.int32) -> None: A = T.match_buffer(a, (128, 128, n)) B = T.match_buffer(b, (128, 128, n)) for i, j, k0, k1 in T.grid(128, 128, 10, T.floordiv((n + 9), 10)): with T.block("B"): T.where((((k0 * T.floordiv((n + 9), 10)) + k1) < n)) vi, vj = T.axis.remap("SS", [i, j]) vk = T.axis.S(n, k0 * T.floordiv(n + 9, 10) + k1) T.reads([A[vi, vj, vk]]) T.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_with_seq(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) B = T.match_buffer(b, (128, 128, 128)) C = T.alloc_buffer((128, 128, 128)) for i, j in T.grid(128, 128): for k in T.serial(0, 128): with T.block("C"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) C[vi, vj, vk] = A[vi, vj, vk] * 2.0 for k in T.serial(0, 128): with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) B[vi, vj, vk] = C[vi, vj, vk] * 2.0 @T.prim_func def elementwise_with_anno(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) B = T.match_buffer(b, (128, 128, 128)) for i, j in T.grid(128, 128): for k in T.serial(0, 128, annotations={"useless_annotation": True}): with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) T.reads([A[vi, vj, vk]]) T.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_with_thread_binding(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) B = T.match_buffer(b, (128, 128, 128)) for i, j in T.grid(128, 128): for k in T.thread_binding(0, 128, thread="threadIdx.x"): with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) T.reads([A[vi, vj, vk]]) T.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_with_starting_point(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) B = T.match_buffer(b, (128, 128, 128)) for i, j in T.grid(128, 128): for k in T.serial(10, 128): with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) T.reads([A[vi, vj, vk]]) T.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_with_opaque_block(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) B = T.match_buffer(b, (128, 128, 128)) for i, j, k in T.grid(128, 128, 128): with T.block("opaque"): T.reads([A[i, j, k]]) T.writes([B[i, j, k]]) with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) T.reads([A[vi, vj, vk]]) T.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_fused(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) B = T.match_buffer(b, (128, 128, 128)) for fused in T.serial(0, 2097152): with T.block("B"): vi = T.axis.S(128, T.floordiv(fused, 16384)) vj = T.axis.S(128, T.floordiv(T.floormod(fused, 16384), 128)) vk = T.axis.S(128, T.floormod(fused, 128)) T.reads([A[vi, vj, vk]]) T.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_split_case0(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128, 128]) B = T.match_buffer(b, [128, 128, 128]) for i1, i2, i3, j1, j2, k1, k2 in T.grid(2, 1, 64, 4, 32, 16, 8): with T.block("B"): vi = T.axis.S(128, i1 * 64 + i2 * 64 + i3) vj = T.axis.S(128, j1 * 32 + j2) vk = T.axis.S(128, k1 * 8 + k2) T.reads([A[vi, vj, vk]]) T.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_split_case1(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128, 128]) B = T.match_buffer(b, [128, 128, 128]) for i1, i2, i3, j1, j2, j3, k1, k2, k3 in T.grid(2, 1, 64, 2, 1, 64, 2, 1, 64): with T.block("B"): vi = T.axis.S(128, i1 * 64 + i2 * 64 + i3) vj = T.axis.S(128, j1 * 64 + j2 * 64 + j3) vk = T.axis.S(128, k1 * 64 + k2 * 64 + k3) T.reads([A[vi, vj, vk]]) T.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_split_with_predicate(a: T.handle, b: T.handle) -> None: B = T.match_buffer(b, [128, 128, 128]) A = T.match_buffer(a, [128, 128, 128]) for i0, i1, i2, j0, j1, k0, k1 in T.grid(1000, 2, 3, 1, 129, 3, 43): with T.block("B"): vi = T.axis.S(128, i0 * 6 + i1 * 3 + i2) vj = T.axis.S(128, j0 * 129 + j1) vk = T.axis.S(128, k0 * 43 + k1) T.where((i0 * 2 + i1) * 3 + i2 < 128 and j0 * 129 + j1 < 128 and k0 * 43 + k1 < 128) T.reads([A[vi, vj, vk]]) T.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_fuse_with_opaque_block(a: T.handle, b: T.handle) -> None: B = T.match_buffer(b, [128, 128, 128]) A = T.match_buffer(a, [128, 128, 128]) for i_j_k_fused in T.serial(0, 2097152): with T.block("opaque"): T.reads( [ A[ T.floordiv(i_j_k_fused, 16384), T.floordiv(T.floormod(i_j_k_fused, 16384), 128), T.floormod(i_j_k_fused, 128), ] ] ) T.writes( [ B[ T.floordiv(i_j_k_fused, 16384), T.floordiv(T.floormod(i_j_k_fused, 16384), 128), T.floormod(i_j_k_fused, 128), ] ] ) with T.block("B"): vi = T.axis.S(128, T.floordiv(i_j_k_fused, 16384)) vj = T.axis.S(128, T.floordiv(T.floormod(i_j_k_fused, 16384), 128)) vk = T.axis.S(128, T.floormod(i_j_k_fused, 128)) T.reads([A[vi, vj, vk]]) T.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def elementwise_split_with_opaque_block(a: T.handle, b: T.handle) -> None: B = T.match_buffer(b, [128, 128, 128]) A = T.match_buffer(a, [128, 128, 128]) for i0, i1, j, k in T.grid(8, 16, 128, 128): with T.block("opaque"): T.reads([A[i0 * 16 + i1, j, k]]) T.writes([B[i0 * 16 + i1, j, k]]) with T.block("B"): vi = T.axis.S(128, i0 * 16 + i1) vj, vk = T.axis.remap("SS", [j, k]) T.reads([A[vi, vj, vk]]) T.writes([B[vi, vj, vk]]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 @T.prim_func def opaque_access(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [16, 16], "float32") B = T.match_buffer(b, [16, 16], "float32") for i, j in T.grid(16, 16): with T.block("A"): vi, vj = T.axis.remap("SS", [i, j]) T.reads([]) T.writes([A[0:16, 0:16]]) A[vi, vj] = 1 for i, j in T.grid(16, 16): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) T.reads([]) T.writes([B[0:16, 0:16]]) T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, vi * 16 + vj, dtype="handle")) @T.prim_func def opaque_access_fused(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [16, 16]) B = T.match_buffer(b, [16, 16]) for i_j_fused in T.serial(0, 256): with T.block("A"): vi = T.axis.S(16, T.floordiv(i_j_fused, 16)) vj = T.axis.S(16, T.floormod(i_j_fused, 16)) T.reads([]) T.writes([A[0:16, 0:16]]) A[vi, vj] = 1 for i_j_fused in T.serial(0, 256): with T.block("B"): vi = T.axis.S(16, T.floordiv(i_j_fused, 16)) vj = T.axis.S(16, T.floormod(i_j_fused, 16)) T.reads([]) T.writes([B[0:16, 0:16]]) T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, ((vi * 16) + vj), dtype="handle")) @T.prim_func def opaque_access_split(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (16, 16)) B = T.match_buffer(b, (16, 16)) for i, j0, j1 in T.grid(16, 4, 4): with T.block("A"): vi = T.axis.S(16, i) vj = T.axis.S(16, j0 * 4 + j1) T.reads([]) T.writes([A[0:16, 0:16]]) A[vi, vj] = 1 for i, j0, j1 in T.grid(16, 4, 4): with T.block("B"): vi = T.axis.S(16, i) vj = T.axis.S(16, j0 * 4 + j1) T.reads([]) T.writes([B[0:16, 0:16]]) T.evaluate(T.tvm_fill_fragment(B.data, 16, 16, 16, 0, ((vi * 16) + vj), dtype="handle")) @T.prim_func def elementwise_not_affine(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (127, 128)) B = T.match_buffer(b, (127, 128)) for i in T.serial(0, 4): for j, k in T.grid(T.min(31, 126 - i * 32) + 1, 128): with T.block("B"): vi = T.axis.S(127, i * 32 + j) vj = T.axis.S(128, k) B[vi, vj] = A[vi, vj] @T.prim_func def elementwise_not_affine_fused(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [127, 128]) B = T.match_buffer(b, [127, 128]) for i in T.grid(4): for j_k_fused in T.serial(0, T.min(31, 126 - i * 32) * 128 + 128): with T.block("B"): vi = T.axis.S( 127, i * 32 + T.floordiv(j_k_fused, 128), ) vj = T.axis.S(128, T.floormod(j_k_fused, 128)) T.reads([A[vi, vj]]) T.writes([B[vi, vj]]) B[vi, vj] = A[vi, vj] # pylint: enable=no-member,invalid-name,unused-variable def test_fuse(): sch = tir.Schedule(elementwise, debug_mask="all") block_b = sch.get_block("B") i, j, k = sch.get_loops(block_b) sch.fuse(i, j, k) tvm.ir.assert_structural_equal(elementwise_fused, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise) def test_split(): sch = tir.Schedule(elementwise, debug_mask="all") block_b = sch.get_block("B") i, j, k = sch.get_loops(block_b) sch.split(i, factors=[2, 1, 64]) sch.split(j, factors=[4, 32]) sch.split(k, factors=[16, 8]) tvm.ir.assert_structural_equal(elementwise_split_case0, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise) def test_split_with_inferred_factor(): sch = tir.Schedule(elementwise, debug_mask="all") block_b = sch.get_block("B") i, j, k = sch.get_loops(block_b) sch.split(i, factors=[None, 1, 64]) sch.split(j, factors=[2, None, 64]) sch.split(k, factors=[2, 1, None]) tvm.ir.assert_structural_equal(elementwise_split_case1, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise) def test_split_with_predicate(): sch = tir.Schedule(elementwise, debug_mask="all") block_b = sch.get_block("B") i, j, k = sch.get_loops(block_b) sch.split(i, factors=[1000, 2, 3]) sch.split(j, factors=[None, 129]) sch.split(k, factors=[3, None]) tvm.ir.assert_structural_equal(elementwise_split_with_predicate, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise) def test_fuse_fail_not_only_child(): sch = tir.Schedule(elementwise_with_seq, debug_mask="all") block_b = sch.get_block("B") _, j, k = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError): sch.fuse(j, k) def test_fuse_split_fail_with_annotation(): sch = tir.Schedule(elementwise_with_anno, debug_mask="all") block_b = sch.get_block("B") _, j, k = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError): sch.fuse(j, k) with pytest.raises(tvm.tir.ScheduleError): sch.split(k, factors=[None, 10]) def test_fuse_split_fail_not_start_with_zero(): sch = tir.Schedule(elementwise_with_anno, debug_mask="all") block_b = sch.get_block("B") _, j, k = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError): sch.fuse(j, k) with pytest.raises(tvm.tir.ScheduleError): sch.split(k, factors=[None, 10]) def test_fuse_with_opaque_block(): sch = tir.Schedule(elementwise_with_opaque_block, debug_mask="all") block_opaque = sch.get_block("opaque") i, j, k = sch.get_loops(block_opaque) sch.fuse(i, j, k) tvm.ir.assert_structural_equal(elementwise_fuse_with_opaque_block, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise_with_opaque_block) def test_fuse_with_opaque_access(): sch = tir.Schedule(opaque_access, debug_mask="all") block_a = sch.get_block("A") i, j = sch.get_loops(block_a) sch.fuse(i, j) block_b = sch.get_block("B") i, j = sch.get_loops(block_b) sch.fuse(i, j) tvm.ir.assert_structural_equal(opaque_access_fused, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=opaque_access) def test_split_with_opaque_block(): sch = tir.Schedule(elementwise_with_opaque_block, debug_mask="all") block_opaque = sch.get_block("opaque") i, _, _ = sch.get_loops(block_opaque) sch.split(i, factors=[None, 16]) tvm.ir.assert_structural_equal(elementwise_split_with_opaque_block, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise_with_opaque_block) def test_split_with_opaque_access(): sch = tir.Schedule(opaque_access, debug_mask="all") block_a = sch.get_block("A") _, j = sch.get_loops(block_a) sch.split(j, factors=[None, 4]) block_b = sch.get_block("B") _, j = sch.get_loops(block_b) sch.split(j, factors=[None, 4]) tvm.ir.assert_structural_equal(opaque_access_split, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=opaque_access) def test_split_with_non_positive_factors(): sch = tir.Schedule(elementwise, debug_mask="all") block_b = sch.get_block("B") i, j, k = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError): sch.split(i, factors=[-2, -64]) with pytest.raises(tvm.tir.ScheduleError): sch.split(j, factors=[0, None]) with pytest.raises(tvm.tir.ScheduleError): sch.split(k, factors=[None, -16]) def test_fuse_split_fail_with_thread_binding(): sch = tir.Schedule(elementwise_with_thread_binding, debug_mask="all") block_b = sch.get_block("B") _, j, k = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError): sch.fuse(j, k) with pytest.raises(tvm.tir.ScheduleError): sch.split(k, factors=[None, 10]) def test_fuse_symbolic(): sch = tir.Schedule(elementwise_symbolic, debug_mask="all") block_b = sch.get_block("B") i, j, k = sch.get_loops(block_b) sch.fuse(i, j, k) tvm.ir.assert_structural_equal(elementwise_symbolic_fused, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise_symbolic) def test_split_symbolic(): sch = tir.Schedule(elementwise_symbolic, debug_mask="all") block_b = sch.get_block("B") _, _, k = sch.get_loops(block_b) sch.split(k, factors=[10, None]) tvm.ir.assert_structural_equal(elementwise_symbolic_split, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise_symbolic) def test_fuse_fail_with_dependent_loops(): sch = tir.Schedule(elementwise_dependent_loops, debug_mask="all") block_b = sch.get_block("B") i, j, _ = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError): sch.fuse(i, j) def test_fuse_not_affine(): sch = tir.Schedule(elementwise_not_affine, debug_mask="all") block_b = sch.get_block("B") _, j, k = sch.get_loops(block_b) sch.fuse(j, k) tvm.ir.assert_structural_equal(elementwise_not_affine_fused, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise_not_affine) def test_add_unit_loop_above_block(): @T.prim_func def zero_dim( A: T.Buffer((), "int32"), B: T.Buffer((), "int32"), C: T.Buffer((), "int32"), ) -> None: with T.block("C"): vi = T.axis.spatial(1, 0) C[()] = A[()] + B[()] @T.prim_func def zero_dim_added( A: T.Buffer((), "int32"), B: T.Buffer((), "int32"), C: T.Buffer((), "int32"), ) -> None: for u in range(1): with T.block("C"): vi = T.axis.spatial(1, 0) C[()] = A[()] + B[()] sch = tir.Schedule(zero_dim, debug_mask="all") block = sch.get_block("C") sch.add_unit_loop(block) tvm.ir.assert_structural_equal(zero_dim_added, sch.mod["main"]) def test_add_unit_loop_above_loop(): @T.prim_func def zero_dim( A: T.Buffer((), "int32"), B: T.Buffer((), "int32"), C: T.Buffer((), "int32"), ) -> None: for u in range(1): with T.block("C"): vi = T.axis.spatial(1, 0) C[()] = A[()] + B[()] @T.prim_func def zero_dim_added( A: T.Buffer((), "int32"), B: T.Buffer((), "int32"), C: T.Buffer((), "int32"), ) -> None: for u1, u2 in T.grid(1, 1): with T.block("C"): vi = T.axis.spatial(1, 0) C[()] = A[()] + B[()] sch = tir.Schedule(zero_dim, debug_mask="all") block = sch.get_block("C") (loop,) = sch.get_loops(block) sch.add_unit_loop(loop) tvm.ir.assert_structural_equal(zero_dim_added, sch.mod["main"]) @pytest.mark.skip("Pending fix in affine analysis") def test_fuse_int64(): def _create_prim_func(): n = te.const(16, "int32") m = te.const(32, "int64") A = te.placeholder((n, m), name="A", dtype="int32") B = te.compute((n, m), lambda i, j: A[i, j] + 1, name="B") return te.create_prim_func([A, B]) mod = _create_prim_func() sch = tir.Schedule(mod, debug_mask="all") i, j = sch.get_loops(sch.get_block("B")) sch.fuse(i, j) verify_trace_roundtrip(sch=sch, mod=mod) def test_split_int64_extent_with_mixed_factors(): def _create_prim_func(): m = te.const(384, "int64") A = te.placeholder((m,), name="A", dtype="float32") B = te.compute((m,), lambda i: A[i] + 1, name="B") return te.create_prim_func([A, B]) mod = _create_prim_func() sch = tir.Schedule(mod, debug_mask="all") (i,) = sch.get_loops(sch.get_block("B")) sch.split( i, factors=[ te.const(1, "int64"), te.const(512, "int32"), ], ) def test_split_int64_extent_with_int32_factors(): def _create_prim_func(): m = te.const(12, "int64") A = te.placeholder((m,), name="A", dtype="float32") B = te.compute((m,), lambda i: A[i] + 1, name="B") return te.create_prim_func([A, B]) mod = _create_prim_func() sch = tir.Schedule(mod, debug_mask="all") (i,) = sch.get_loops(sch.get_block("B")) sch.split( i, factors=[ te.const(1, "int32"), te.const(1, "int32"), te.const(3, "int32"), te.const(1, "int32"), te.const(4, "int32"), ], ) def test_split_int64_factors(): sch = tir.Schedule(elementwise_symbolic, debug_mask="all") block_b = sch.get_block("B") _, _, k = sch.get_loops(block_b) sch.split(k, factors=[IntImm(dtype="int64", value=10), None]) tvm.ir.assert_structural_equal(elementwise_symbolic_split, sch.mod["main"]) if __name__ == "__main__": tvm.testing.main()
22,878
34.144393
100
py
tvm
tvm-main/tests/python/unittest/test_autotvm_index_tuner.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test index based tuners""" import multiprocessing from tvm.testing.autotvm import DummyRunner, get_sample_task from tvm import autotvm def test_grid_search_tuner(): """Test GridSearchTuner""" task, _ = get_sample_task() measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner()) # When no range index, range_length should be the length of config space tuner = autotvm.tuner.GridSearchTuner(task) assert tuner.begin_idx == 0 assert tuner.end_idx == 64 assert tuner.index == 0 assert tuner.range_length == 64 assert tuner.visited_max == 64 # With range index, range_length should be the length of the specified range tuner = autotvm.tuner.GridSearchTuner(task, range_idx=(8, 15)) assert tuner.begin_idx == 8 assert tuner.end_idx == 16 assert tuner.index == 8 assert tuner.range_length == 8 assert tuner.visited_max == 8 # Tuner should only focus on the specified range tuner.tune(n_trial=8, measure_option=measure_option) assert len(tuner.visited) == 8 assert not tuner.has_next() # With multi-filter task, _ = get_sample_task() task.config_space.multi_filter( filter=lambda entity: 32 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024 ) tuner = autotvm.tuner.GridSearchTuner(task) assert tuner.begin_idx == 0 assert tuner.end_idx == 64 assert tuner.index == 5 assert tuner.range_length == 64 assert tuner.visited_max == 34 # With range index, range_length should be the length of the specified range tuner = autotvm.tuner.GridSearchTuner(task, range_idx=(8, 15)) assert tuner.begin_idx == 8 assert tuner.end_idx == 16 assert tuner.index == 12 assert tuner.range_length == 8 assert tuner.visited_max == 4 # Tuner should only focus on the specified range tuner.tune(n_trial=8, measure_option=measure_option) assert len(tuner.visited) == 4 assert not tuner.has_next() def grid_search_spawn(): assert multiprocessing.get_spawn_method(False) == "spawn" test_grid_search_tuner() def test_grid_search_tuner_spawn(): ctx = multiprocessing.get_context("spawn") p = ctx.Process(target=test_grid_search_tuner) p.start() p.join() def test_random_tuner(): """Test RandomTuner""" task, _ = get_sample_task() measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner()) tuner = autotvm.tuner.RandomTuner(task, range_idx=(8, 15)) assert tuner.begin_idx == 8 assert tuner.end_idx == 16 assert tuner.range_length == 8 assert tuner.visited_max == 8 # Tuner should only focus on the specified range and should visit all indices tuner.tune(n_trial=8, measure_option=measure_option) assert len(tuner.visited) == 8 assert not tuner.has_next() for idx in tuner.visited: assert 8 <= idx <= 15 # With multi-filter task, _ = get_sample_task() task.config_space.multi_filter( filter=lambda entity: 32 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024 ) tuner = autotvm.tuner.RandomTuner(task, range_idx=(8, 15)) assert tuner.begin_idx == 8 assert tuner.end_idx == 16 assert tuner.range_length == 8 assert tuner.visited_max == 4 # Tuner should only focus on the specified range and should visit all indices tuner.tune(n_trial=8, measure_option=measure_option) assert len(tuner.visited) == 4 assert not tuner.has_next() for idx in tuner.visited: assert 8 <= idx <= 15 if __name__ == "__main__": test_grid_search_tuner() test_grid_search_tuner_spawn() test_random_tuner()
4,492
33.037879
97
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_renormalize_split_pattern.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm.script import tir as T # fmt: off # pylint: disable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,redundant-keyword-arg @tvm.script.ir_module class Before: @T.prim_func def main(inputs: T.Buffer((1, 4, 4, 512), "float32"), weight: T.Buffer((4, 4, 512, 256), "float32"), conv2d_transpose_nhwc: T.Buffer((1, 8, 8, 256), "float32")) -> None: # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) inputs_flat = T.Buffer([8192], dtype="float32", data=inputs.data) weight_flat = T.Buffer([2097152], dtype="float32", data=weight.data) conv2d_transpose_nhwc_flat = T.Buffer([16384], dtype="float32", data=conv2d_transpose_nhwc.data) # var definition threadIdx_x = T.env_thread("threadIdx.x") blockIdx_x = T.env_thread("blockIdx.x") # body T.launch_thread(blockIdx_x, 64) conv2d_transpose_nhwc_local = T.decl_buffer([8], "float32", scope="local") PadInput_shared = T.decl_buffer([768], "float32", scope="shared") weight_shared = T.decl_buffer([4096], "float32", scope="shared") T.launch_thread(threadIdx_x, 32) for i2_3_init, i1_4_init, i2_4_init in T.grid(2, 2, 2): conv2d_transpose_nhwc_local[i1_4_init * 4 + i2_3_init * 2 + i2_4_init] = T.float32(0) for i6_0 in T.serial(16): for ax0_ax1_ax2_ax3_fused_0 in T.serial(24): PadInput_shared[ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x] = T.if_then_else(128 <= ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x and ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x < 640 and 1 <= blockIdx_x // 32 * 2 + (ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x) % 128 // 32 and blockIdx_x // 32 * 2 + (ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x) % 128 // 32 < 5, inputs_flat[blockIdx_x // 32 * 1024 + ax0_ax1_ax2_ax3_fused_0 * 512 + i6_0 * 32 + threadIdx_x - 2560], T.float32(0), dtype="float32") for ax0_ax1_ax2_ax3_fused_0 in T.serial(32): weight_shared[T.ramp(ax0_ax1_ax2_ax3_fused_0 * 128 + threadIdx_x * 4, 1, 4)] = weight_flat[T.ramp((ax0_ax1_ax2_ax3_fused_0 * 128 + threadIdx_x * 4) // 256 * 131072 + i6_0 * 8192 + (ax0_ax1_ax2_ax3_fused_0 * 128 + threadIdx_x * 4) % 256 // 8 * 256 + blockIdx_x % 32 * 8 + threadIdx_x % 2 * 4, 1, 4)] for i6_1, i2_3, i4_2, i5_2, i6_2, i1_4, i2_4 in T.grid(4, 2, 4, 4, 8, 2, 2): conv2d_transpose_nhwc_local[i1_4 * 4 + i2_3 * 2 + i2_4] = conv2d_transpose_nhwc_local[i1_4 * 4 + i2_3 * 2 + i2_4] + T.if_then_else((i1_4 + i4_2) % 2 == 0 and (i2_4 + i5_2) % 2 == 0, PadInput_shared[threadIdx_x // 8 * 128 + (i1_4 + i4_2) // 2 * 128 + (i2_4 + i5_2) // 2 * 32 + i2_3 * 32 + i6_1 * 8 + i6_2], T.float32(0), dtype="float32") * weight_shared[i6_1 * 64 + i6_2 * 8 + threadIdx_x % 8 + 3840 - i5_2 * 256 - i4_2 * 1024] for ax1, ax2 in T.grid(2, 4): conv2d_transpose_nhwc_flat[threadIdx_x // 8 * 4096 + ax1 * 2048 + blockIdx_x // 32 * 1024 + ax2 * 256 + blockIdx_x % 32 * 8 + threadIdx_x % 8] = conv2d_transpose_nhwc_local[ax1 * 4 + ax2] @tvm.script.ir_module class After: @T.prim_func def main(inputs: T.Buffer((1, 4, 4, 512), "float32"), weight: T.Buffer((4, 4, 512, 256), "float32"), conv2d_transpose_nhwc: T.Buffer((1, 8, 8, 256), "float32")) -> None: # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) inputs_flat = T.Buffer([8192], dtype="float32", data=inputs.data) weight_flat = T.Buffer([2097152], dtype="float32", data=weight.data) conv2d_transpose_nhwc_flat = T.Buffer([16384], dtype="float32", data=conv2d_transpose_nhwc.data) # var definition threadIdx_x = T.env_thread("threadIdx.x") blockIdx_x = T.env_thread("blockIdx.x") # body T.launch_thread(blockIdx_x, 64) conv2d_transpose_nhwc_local = T.decl_buffer([8], "float32", scope="local") PadInput_shared = T.decl_buffer([768], "float32", scope="shared") weight_shared = T.decl_buffer([4096], "float32", scope="shared") T.launch_thread(threadIdx_x, 32) for i2_3_init, i1_4_init, i2_4_init in T.grid(2, 2, 2): conv2d_transpose_nhwc_local[i1_4_init * 4 + i2_3_init * 2 + i2_4_init] = T.float32(0) for i6_0 in T.serial(16): for ax0_ax1_ax2_ax3_fused_0 in T.serial(24): PadInput_shared[ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x] = T.if_then_else(1 <= (ax0_ax1_ax2_ax3_fused_0 + threadIdx_x // 32) // 4 and (ax0_ax1_ax2_ax3_fused_0 + threadIdx_x // 32) // 20 < 1 and 1 <= blockIdx_x // 32 * 2 + (ax0_ax1_ax2_ax3_fused_0 + threadIdx_x // 32) % 4 and (blockIdx_x // 32 * 2 + (ax0_ax1_ax2_ax3_fused_0 + threadIdx_x // 32) % 4) // 5 < 1, inputs_flat[blockIdx_x // 32 * 1024 + ax0_ax1_ax2_ax3_fused_0 * 512 + i6_0 * 32 + threadIdx_x - 2560], T.float32(0), dtype="float32") for ax0_ax1_ax2_ax3_fused_0 in T.serial(32): weight_shared[T.ramp(ax0_ax1_ax2_ax3_fused_0 * 128 + threadIdx_x * 4, 1, 4)] = weight_flat[T.ramp((ax0_ax1_ax2_ax3_fused_0 + threadIdx_x * 4 // 128) // 2 * 131072 + i6_0 * 8192 + (ax0_ax1_ax2_ax3_fused_0 * 16 + threadIdx_x * 4 // 8) % 32 * 256 + blockIdx_x % 32 * 8 + threadIdx_x % 2 * 4, 1, 4)] for i6_1, i2_3, i4_2, i5_2, i6_2, i1_4, i2_4 in T.grid(4, 2, 4, 4, 8, 2, 2): conv2d_transpose_nhwc_local[i1_4 * 4 + i2_3 * 2 + i2_4] = conv2d_transpose_nhwc_local[i1_4 * 4 + i2_3 * 2 + i2_4] + T.if_then_else((i1_4 + i4_2) % 2 == 0 and (i2_4 + i5_2) % 2 == 0, PadInput_shared[threadIdx_x // 8 * 128 + (i1_4 + i4_2) // 2 * 128 + (i2_4 + i5_2) // 2 * 32 + i2_3 * 32 + i6_1 * 8 + i6_2], T.float32(0), dtype="float32") * weight_shared[i6_1 * 64 + i6_2 * 8 + threadIdx_x % 8 + 3840 - i5_2 * 256 - i4_2 * 1024] for ax1, ax2 in T.grid(2, 4): conv2d_transpose_nhwc_flat[threadIdx_x // 8 * 4096 + ax1 * 2048 + blockIdx_x // 32 * 1024 + ax2 * 256 + blockIdx_x % 32 * 8 + threadIdx_x % 8] = conv2d_transpose_nhwc_local[ax1 * 4 + ax2] @tvm.script.ir_module class After_simplified: @T.prim_func def main(inputs: T.Buffer((1, 4, 4, 512), "float32"), weight: T.Buffer((4, 4, 512, 256), "float32"), conv2d_transpose_nhwc: T.Buffer((1, 8, 8, 256), "float32")) -> None: # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) # var definition threadIdx_x = T.env_thread("threadIdx.x") blockIdx_x = T.env_thread("blockIdx.x") inputs_flat = T.Buffer([8192], dtype="float32", data=inputs.data) weight_flat = T.Buffer([2097152], dtype="float32", data=weight.data) conv2d_transpose_nhwc_flat = T.Buffer([16384], dtype="float32", data=conv2d_transpose_nhwc.data) # body T.launch_thread(blockIdx_x, 64) conv2d_transpose_nhwc_local = T.decl_buffer([8], "float32", scope="local") PadInput_shared = T.decl_buffer([768], "float32", scope="shared") weight_shared = T.decl_buffer([4096], "float32", scope="shared") T.launch_thread(threadIdx_x, 32) for i2_3_init, i1_4_init, i2_4_init in T.grid(2, 2, 2): conv2d_transpose_nhwc_local[i1_4_init * 4 + i2_3_init * 2 + i2_4_init] = T.float32(0) for i6_0 in T.serial(16): for ax0_ax1_ax2_ax3_fused_0 in T.serial(24): PadInput_shared[ax0_ax1_ax2_ax3_fused_0 * 32 + threadIdx_x] = T.if_then_else(4 <= ax0_ax1_ax2_ax3_fused_0 and ax0_ax1_ax2_ax3_fused_0 < 20 and 1 <= blockIdx_x // 32 * 2 + ax0_ax1_ax2_ax3_fused_0 % 4 and blockIdx_x // 32 * 2 + ax0_ax1_ax2_ax3_fused_0 % 4 < 5, inputs_flat[blockIdx_x // 32 * 1024 + ax0_ax1_ax2_ax3_fused_0 * 512 + i6_0 * 32 + threadIdx_x - 2560], T.float32(0), dtype="float32") for ax0_ax1_ax2_ax3_fused_0 in T.serial(32): weight_shared[T.ramp(ax0_ax1_ax2_ax3_fused_0 * 128 + threadIdx_x * 4, 1, 4)] = weight_flat[T.ramp(ax0_ax1_ax2_ax3_fused_0 // 2 * 131072 + i6_0 * 8192 + ax0_ax1_ax2_ax3_fused_0 % 2 * 4096 + threadIdx_x // 2 * 256 + blockIdx_x % 32 * 8 + threadIdx_x % 2 * 4, 1, 4)] for i6_1, i2_3, i4_2, i5_2, i6_2, i1_4, i2_4 in T.grid(4, 2, 4, 4, 8, 2, 2): conv2d_transpose_nhwc_local[i1_4 * 4 + i2_3 * 2 + i2_4] = conv2d_transpose_nhwc_local[i1_4 * 4 + i2_3 * 2 + i2_4] + T.if_then_else((i1_4 + i4_2) % 2 == 0 and (i2_4 + i5_2) % 2 == 0, PadInput_shared[threadIdx_x // 8 * 128 + (i1_4 + i4_2) // 2 * 128 + (i2_4 + i5_2) // 2 * 32 + i2_3 * 32 + i6_1 * 8 + i6_2], T.float32(0), dtype="float32") * weight_shared[i6_1 * 64 + i6_2 * 8 + threadIdx_x % 8 + 3840 - i5_2 * 256 - i4_2 * 1024] for ax1, ax2 in T.grid(2, 4): conv2d_transpose_nhwc_flat[threadIdx_x // 8 * 4096 + ax1 * 2048 + blockIdx_x // 32 * 1024 + ax2 * 256 + blockIdx_x % 32 * 8 + threadIdx_x % 8] = conv2d_transpose_nhwc_local[ax1 * 4 + ax2] # pylint: enable=no-member,invalid-name,unused-variable,line-too-long,redefined-outer-name,redundant-keyword-arg # fmt: on def test_renormalize_split_pattern(): after = tvm.tir.transform.RenormalizeSplitPattern()(Before) tvm.ir.assert_structural_equal(after, After) after = tvm.tir.transform.Simplify()(after) tvm.ir.assert_structural_equal(after, After_simplified) @T.prim_func def impossible_equality(n: T.int32): # Prior to bugfix, this conditional defined the expression "2" as # equal to zero within the then_case. [min_value=2, max_value=0] if 2 == 0: # Then this expression evaluates n/2, using the min/max values # of "2", which is caught as a divide by zero error. if n // 2 >= 16: T.evaluate(0) @T.prim_func def impossible_inequality(n: T.int32): # Prior to bugfix, this conditional set up a range of possible # values for the expression "-2" as [0, kPosInf]. if -1 < -2: if n // (-2) >= 16: T.evaluate(0) integer_condition = tvm.testing.parameter( impossible_equality, impossible_inequality, ) def test_analyze_inside_integer_conditional(integer_condition): """Avoid crash occurring in ConstIntBoundAnalyzer. Crash occurred when simplifying some expressions with provably false integer expressions. If the expressions were renormalized before calling Simplify, conditional statements could assign a range of possible values to integers, as if they were variables. This would result in divide by zero throwing an exception, followed by a second exception during stack unwinding causing the program to crash. """ # Similar issue would occur in most transformations that subclass # IRMutatorWithAnalyzer. tir.transform.Simplify() is an # exception, as it rewrites the integer conditionals first. These # tests are written using RenormalizeSplitPattern as it is the # first case identified. transform = tvm.tir.transform.RenormalizeSplitPattern() # Issue would result in an error through while applying the transformation. mod = tvm.IRModule.from_expr(integer_condition) transform(mod) if __name__ == "__main__": tvm.testing.main()
11,891
65.066667
513
py
tvm
tvm-main/tests/python/unittest/test_subwarp_reduction_cuda.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing import numpy as np from tvm.script import tir as T @T.prim_func def reduce(a: T.handle, b: T.handle, d1: T.int32, d2: T.int32, d3: T.int32) -> None: A = T.match_buffer(a, [1, d1, d2, d3]) B = T.match_buffer(b, [1, d1, d2]) for i, j, k, l in T.grid(1, d1, d2, d3): with T.block("reduce"): vi, vj, vk, vl = T.axis.remap("SSSR", [i, j, k, l]) with T.init(): B[vi, vj, vk] = 0.0 B[vi, vj, vk] = B[vi, vj, vk] + A[vi, vj, vk, vl] @T.prim_func def reduce_max(a: T.handle, b: T.handle, d1: T.int32, d2: T.int32, d3: T.int32) -> None: A = T.match_buffer(a, [1, d1, d2, d3]) B = T.match_buffer(b, [1, d1, d2]) for i, j, k, l in T.grid(1, d1, d2, d3): with T.block("reduce"): vi, vj, vk, vl = T.axis.remap("SSSR", [i, j, k, l]) with T.init(): B[vi, vj, vk] = T.float32(-3.4028234663852886e38) B[vi, vj, vk] = T.max(B[vi, vj, vk], A[vi, vj, vk, vl]) @tvm.testing.requires_gpu @tvm.testing.requires_cuda def test_cuda_subwarp_reduction(): def check_sum(d1: int, d2: int, d3: int): _, _, _d1, _d2, _d3 = reduce.params mod = reduce.specialize({_d1: d1, _d2: d2, _d3: d3}) sch = tvm.tir.Schedule(mod) blk = sch.get_block("reduce") i, j, k, l = sch.get_loops(blk) sch.bind(i, "blockIdx.x") sch.bind(j, "threadIdx.z") sch.bind(k, "threadIdx.y") sch.bind(l, "threadIdx.x") f = tvm.build(sch.mod["main"], target="cuda") # prepare input and output array a_np = np.random.rand(1, d1, d2, d3).astype("float32") b_np = a_np.sum(axis=-1).astype("float32") a = tvm.nd.array(a_np, tvm.cuda(0)) b = tvm.nd.array(np.zeros_like(b_np), tvm.cuda(0)) # launch kernel f(a, b) tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-6, atol=1e-6) def check_max(d1: int, d2: int, d3: int): _, _, _d1, _d2, _d3 = reduce_max.params mod = reduce_max.specialize({_d1: d1, _d2: d2, _d3: d3}) sch = tvm.tir.Schedule(mod) blk = sch.get_block("reduce") i, j, k, l = sch.get_loops(blk) sch.bind(i, "blockIdx.x") sch.bind(j, "threadIdx.z") sch.bind(k, "threadIdx.y") sch.bind(l, "threadIdx.x") f = tvm.build(sch.mod["main"], target="cuda") # prepare input and output array a_np = -np.random.rand(1, d1, d2, d3).astype("float32") b_np = a_np.max(axis=-1).astype("float32") a = tvm.nd.array(a_np, tvm.cuda(0)) b = tvm.nd.array(np.zeros_like(b_np), tvm.cuda(0)) # launch kernel f(a, b) tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-6, atol=1e-6) for d1 in range(1, 5): for d2 in range(1, 5): for d3 in range(2, 33): check_sum(d1, d2, d3) check_max(d1, d2, d3) if __name__ == "__main__": test_cuda_subwarp_reduction()
3,804
35.238095
88
py
tvm
tvm-main/tests/python/unittest/test_tir_analysis_expr_deep_equal.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te def test_equal_expr(): x = te.var("x") y = te.var("y") def func1(): return x + y + 1 def func2(): return te.exp(tvm.tir.truncdiv((x + y + 1) * y, 4)) assert tvm.tir.analysis.expr_deep_equal(func1(), func1()) assert tvm.tir.analysis.expr_deep_equal(func2(), func2()) assert not tvm.tir.analysis.expr_deep_equal(func2(), func1()) if __name__ == "__main__": test_equal_expr()
1,243
31.736842
65
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_postproc_rewrite_cooperative_fetch.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring import tvm import tvm.testing from tvm import meta_schedule as ms from tvm import tir from tvm.meta_schedule.testing import te_workload from tvm.script import tir as T from tvm.target import Target from tvm.te import create_prim_func def _target() -> Target: return Target("cuda", host="llvm") def _create_context(mod, target) -> ms.TuneContext: ctx = ms.TuneContext( mod=mod, target=target, space_generator=ms.space_generator.PostOrderApply( sch_rules=[], postprocs=[ ms.postproc.RewriteCooperativeFetch(), ], mutator_probs={}, ), task_name="test", ) return ctx # fmt: off # pylint: disable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks @tvm.script.ir_module class AfterRewrite0: @T.prim_func def main(var_A: T.handle, var_B: T.handle, var_C: T.handle) -> None: # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) A = T.match_buffer(var_A, [512, 512], dtype="float32") B = T.match_buffer(var_B, [512, 512], dtype="float32") C = T.match_buffer(var_C, [512, 512], dtype="float32") # body # with T.block("root") C_local = T.alloc_buffer([512, 512], dtype="float32", scope="local") A_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared") B_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared") for i0_0_i1_0_fused in T.thread_binding(0, 16, thread="blockIdx.x"): for i0_1_i1_1_fused in T.thread_binding(0, 16, thread="vthread.x"): for i0_2_i1_2_fused in T.thread_binding(0, 8, thread="threadIdx.x"): for i2_0 in T.serial(0, 1): for ax0_ax1_fused_0 in T.serial(0, 32768): for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.x"): with T.block("A_shared"): v0 = T.axis.spatial(512, (ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1) // 512) v1 = T.axis.spatial(512, (ax0_ax1_fused_0 * 8 + ax0_ax1_fused_1) % 512) T.reads([A[v0, v1]]) T.writes([A_shared[v0, v1]]) A_shared[v0, v1] = A[v0, v1] for ax0_ax1_fused_0 in T.serial(0, 1024): for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.x"): for ax0_ax1_fused_2 in T.vectorized(0, 2): with T.block("B_shared"): v0 = T.axis.spatial(512, (ax0_ax1_fused_0 * 16 + ax0_ax1_fused_1 * 2 + ax0_ax1_fused_2) // 32) v1 = T.axis.spatial(512, i0_0_i1_0_fused * 32 + (ax0_ax1_fused_0 * 16 + ax0_ax1_fused_1 * 2 + ax0_ax1_fused_2) % 32) T.reads([B[v0, v1]]) T.writes([B_shared[v0, v1]]) B_shared[v0, v1] = B[v0, v1] for i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in T.grid(16, 2, 2, 32, 16, 2): with T.block("C"): i = T.axis.spatial(512, i0_1_i1_1_fused * 32 + i0_3 * 16 + i0_4) j = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + i1_3 * 2 + i1_4) k = T.axis.reduce(512, i2_0 * 512 + i2_1 * 32 + i2_2) T.reads([A_shared[i, k], B_shared[k, j]]) T.writes([C_local[i, j]]) with T.init(): C_local[i, j] = T.float32(0) C_local[i, j] = C_local[i, j] + A_shared[i, k] * B_shared[k, j] for ax0, ax1 in T.grid(32, 4): with T.block("C_local"): v0 = T.axis.spatial(512, i0_1_i1_1_fused * 32 + ax0) v1 = T.axis.spatial(512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + ax1) T.reads([C_local[v0, v1]]) T.writes([C[v0, v1]]) C[v0, v1] = C_local[v0, v1] @tvm.script.ir_module class WarpExecutionAfterRewrite: @T.prim_func def main( A: T.Buffer((512, 512), "float32"), B: T.Buffer((512, 512), "float32"), C: T.Buffer((512, 512), "float32"), ) -> None: # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) # body # with T.block("root") C_local = T.alloc_buffer([512, 512], dtype="float32", scope="local") A_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared") B_shared = T.alloc_buffer([512, 512], dtype="float32", scope="shared") for i0_0_i1_0_fused in T.thread_binding(0, 16, thread="blockIdx.x"): for i0_1_i1_1_fused in T.thread_binding(0, 16, thread="vthread.x"): for i0_2_i1_2_fused in T.thread_binding(0, 8, thread="threadIdx.y"): for i2_0 in T.serial(0, 1): for ax0_ax1_fused_0 in T.serial(0, 1024): for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.y"): for ax0_ax1_fused_2 in T.thread_binding( 0, 32, thread="threadIdx.x" ): with T.block("A_shared"): v0 = T.axis.spatial( 512, ( ax0_ax1_fused_0 * 256 + ax0_ax1_fused_1 * 32 + ax0_ax1_fused_2 ) // 512, ) v1 = T.axis.spatial( 512, ( ax0_ax1_fused_0 * 256 + ax0_ax1_fused_1 * 32 + ax0_ax1_fused_2 ) % 512, ) T.reads([A[v0, v1]]) T.writes([A_shared[v0, v1]]) A_shared[v0, v1] = A[v0, v1] for ax0_ax1_fused_0 in T.serial(0, 32): for ax0_ax1_fused_1 in T.thread_binding(0, 8, thread="threadIdx.y"): for ax0_ax1_fused_2 in T.thread_binding( 0, 32, thread="threadIdx.x" ): for ax0_ax1_fused_3 in T.vectorized(0, 2): with T.block("B_shared"): v0 = T.axis.spatial( 512, ( ax0_ax1_fused_0 * 512 + ax0_ax1_fused_1 * 64 + ax0_ax1_fused_2 * 2 + ax0_ax1_fused_3 ) // 32, ) v1 = T.axis.spatial( 512, i0_0_i1_0_fused * 32 + ( ax0_ax1_fused_0 * 512 + ax0_ax1_fused_1 * 64 + ax0_ax1_fused_2 * 2 + ax0_ax1_fused_3 ) % 32, ) T.reads([B[v0, v1]]) T.writes([B_shared[v0, v1]]) B_shared[v0, v1] = B[v0, v1] for i2_1, i0_3, i1_3, i2_2, i0_4, i1_4 in T.grid(16, 2, 2, 32, 16, 2): with T.block("C"): i = T.axis.spatial(512, i0_1_i1_1_fused * 32 + i0_3 * 16 + i0_4) j = T.axis.spatial( 512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + i1_3 * 2 + i1_4, ) k = T.axis.reduce(512, i2_0 * 512 + i2_1 * 32 + i2_2) T.reads([A_shared[i, k], B_shared[k, j]]) T.writes([C_local[i, j]]) T.block_attr({"warp_execution": 1}) with T.init(): C_local[i, j] = T.float32(0) C_local[i, j] = C_local[i, j] + A_shared[i, k] * B_shared[k, j] for ax0, ax1 in T.grid(32, 4): with T.block("C_local"): v0 = T.axis.spatial(512, i0_1_i1_1_fused * 32 + ax0) v1 = T.axis.spatial( 512, i0_0_i1_0_fused * 32 + i0_2_i1_2_fused * 4 + ax1 ) T.reads([C_local[v0, v1]]) T.writes([C[v0, v1]]) C[v0, v1] = C_local[v0, v1] # pylint: enable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks # fmt: on def test_rewrite_cooperative_fetch(): mod = create_prim_func(te_workload.matmul(n=512, m=512, k=512)) target = _target() ctx = _create_context(mod, target) sch = tir.Schedule(mod, debug_mask="all") # fmt: off # pylint: disable=line-too-long,invalid-name b0 = sch.get_block(name="C", func_name="main") b1 = sch.cache_write(block=b0, write_buffer_index=0, storage_scope="local") l2, l3, l4 = sch.get_loops(block=b0) v5, v6, v7, v8, v9 = sch.sample_perfect_tile(loop=l2, n=5, max_innermost_factor=64, decision=[1, 16, 1, 2, 16]) l10, l11, l12, l13, l14 = sch.split(loop=l2, factors=[v5, v6, v7, v8, v9]) v15, v16, v17, v18, v19 = sch.sample_perfect_tile(loop=l3, n=5, max_innermost_factor=64, decision=[16, 1, 8, 2, 2]) l20, l21, l22, l23, l24 = sch.split(loop=l3, factors=[v15, v16, v17, v18, v19]) v25, v26, v27 = sch.sample_perfect_tile(loop=l4, n=3, max_innermost_factor=64, decision=[1, 16, 32]) l28, l29, l30 = sch.split(loop=l4, factors=[v25, v26, v27]) sch.reorder(l10, l20, l11, l21, l12, l22, l28, l29, l13, l23, l30, l14, l24) l31 = sch.fuse(l10, l20) sch.bind(loop=l31, thread_axis="blockIdx.x") l32 = sch.fuse(l11, l21) sch.bind(loop=l32, thread_axis="vthread.x") l33 = sch.fuse(l12, l22) sch.bind(loop=l33, thread_axis="threadIdx.x") b34 = sch.cache_read(block=b0, read_buffer_index=0, storage_scope="shared") sch.compute_at(block=b34, loop=l28, preserve_unit_loops=True) _, _, _, _, l39, l40 = sch.get_loops(block=b34) l41 = sch.fuse(l39, l40) _, v43 = sch.sample_perfect_tile(loop=l41, n=2, max_innermost_factor=4, decision=[262144, 1]) sch.annotate(block_or_loop=b34, ann_key="meta_schedule.cooperative_fetch", ann_val=v43) b44 = sch.cache_read(block=b0, read_buffer_index=1, storage_scope="shared") sch.compute_at(block=b44, loop=l28, preserve_unit_loops=True) _, _, _, _, l49, l50 = sch.get_loops(block=b44) l51 = sch.fuse(l49, l50) _, v53 = sch.sample_perfect_tile(loop=l51, n=2, max_innermost_factor=4, decision=[8192, 2]) sch.annotate(block_or_loop=b44, ann_key="meta_schedule.cooperative_fetch", ann_val=v53) sch.reverse_compute_at(block=b1, loop=l33, preserve_unit_loops=True) # pylint: enable=line-too-long,invalid-name # fmt: on sch.enter_postproc() assert ctx.space_generator.postprocs[0].apply(sch) tvm.ir.assert_structural_equal(sch.mod, AfterRewrite0) def test_rewrite_warp_execution(): mod = create_prim_func(te_workload.matmul(n=512, m=512, k=512)) target = _target() ctx = _create_context(mod, target) sch = tir.Schedule(mod, debug_mask="all") # fmt: off # pylint: disable=line-too-long,invalid-name b0 = sch.get_block(name="C", func_name="main") b1 = sch.cache_write(block=b0, write_buffer_index=0, storage_scope="local") l2, l3, l4 = sch.get_loops(block=b0) sch.annotate(b0, "warp_execution", 1) v5, v6, v7, v8, v9 = sch.sample_perfect_tile(loop=l2, n=5, max_innermost_factor=64, decision=[1, 16, 1, 2, 16]) l10, l11, l12, l13, l14 = sch.split(loop=l2, factors=[v5, v6, v7, v8, v9]) v15, v16, v17, v18, v19 = sch.sample_perfect_tile(loop=l3, n=5, max_innermost_factor=64, decision=[16, 1, 8, 2, 2]) l20, l21, l22, l23, l24 = sch.split(loop=l3, factors=[v15, v16, v17, v18, v19]) v25, v26, v27 = sch.sample_perfect_tile(loop=l4, n=3, max_innermost_factor=64, decision=[1, 16, 32]) l28, l29, l30 = sch.split(loop=l4, factors=[v25, v26, v27]) sch.reorder(l10, l20, l11, l21, l12, l22, l28, l29, l13, l23, l30, l14, l24) l31 = sch.fuse(l10, l20) sch.bind(loop=l31, thread_axis="blockIdx.x") l32 = sch.fuse(l11, l21) sch.bind(loop=l32, thread_axis="vthread.x") l33 = sch.fuse(l12, l22) sch.bind(loop=l33, thread_axis="threadIdx.y") b34 = sch.cache_read(block=b0, read_buffer_index=0, storage_scope="shared") sch.compute_at(block=b34, loop=l28, preserve_unit_loops=True) _, _, _, _, l39, l40 = sch.get_loops(block=b34) l41 = sch.fuse(l39, l40) _, v43 = sch.sample_perfect_tile(loop=l41, n=2, max_innermost_factor=4, decision=[262144, 1]) sch.annotate(block_or_loop=b34, ann_key="meta_schedule.cooperative_fetch", ann_val=v43) b44 = sch.cache_read(block=b0, read_buffer_index=1, storage_scope="shared") sch.compute_at(block=b44, loop=l28, preserve_unit_loops=True) _, _, _, _, l49, l50 = sch.get_loops(block=b44) l51 = sch.fuse(l49, l50) _, v53 = sch.sample_perfect_tile(loop=l51, n=2, max_innermost_factor=4, decision=[8192, 2]) sch.annotate(block_or_loop=b44, ann_key="meta_schedule.cooperative_fetch", ann_val=v53) sch.reverse_compute_at(block=b1, loop=l33, preserve_unit_loops=True) # pylint: enable=line-too-long,invalid-name # fmt: on sch.enter_postproc() assert ctx.space_generator.postprocs[0].apply(sch) tvm.ir.assert_structural_equal(sch.mod, WarpExecutionAfterRewrite) if __name__ == "__main__": tvm.testing.main()
16,452
53.30033
156
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_lower_opaque_block.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm import te from tvm.script import tir as T def _check(original, transformed): func = original mod = tvm.IRModule.from_expr(func) mod = tvm.tir.transform.LowerOpaqueBlock()(mod) mod = tvm.tir.transform.Simplify()(mod) tvm.ir.assert_structural_equal(mod["main"], transformed, True) @T.prim_func def compacted_elementwise_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer([1, 16], "float32", scope="global") for j in range(0, 16): with T.block(): T.reads(A[i, j]) T.writes(B[0, j]) B[0, j] = A[i, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[0, j]) T.writes(C[i, j]) C[i, j] = B[0, j] * 2.0 @T.prim_func def transformed_elementwise_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in T.serial(0, 16): B_new = T.decl_buffer(shape=[1, 16], dtype="float32") for j in T.serial(0, 16): B_new[0, j] = A[i, j] + 1.0 for j in T.serial(0, 16): C[i, j] = B_new[0, j] * 2.0 @T.prim_func def compacted_gpu_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i0 in T.thread_binding(0, 4, thread="blockIdx.x"): for i1 in T.thread_binding(0, 2, thread="threadIdx.x"): for i2 in T.thread_binding(0, 2, thread="vthread"): with T.block(): T.reads(A[i0 * 4 + i1 * 2 + i2, 0:16]) T.writes(C[i0 * 4 + i1 * 2 + i2, 0:16]) B = T.alloc_buffer([1, 16], "float32", scope="local") for j in range(0, 16): with T.block(): T.reads(A[i0 * 4 + i1 * 2 + i2, j]) T.writes(B[0, j]) B[0, j] = A[i0 * 4 + i1 * 2 + i2, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[0, j]) T.writes(C[i0 * 4 + i1 * 2 + i2, j]) C[i0 * 4 + i1 * 2 + i2, j] = B[0, j] * 2.0 @T.prim_func def transformed_gpu_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") i0 = T.env_thread("blockIdx.x") i1 = T.env_thread("threadIdx.x") i2 = T.env_thread("vthread") T.launch_thread(i0, 4) T.launch_thread(i1, 2) T.launch_thread(i2, 2) B = T.decl_buffer(shape=[1, 16], dtype="float32", scope="local") for j in range(0, 16): B[0, j] = A[i0 * 4 + i1 * 2 + i2, j] + 1.0 for j in range(0, 16): C[i0 * 4 + i1 * 2 + i2, j] = B[0, j] * 2.0 @T.prim_func def compacted_symbolic_func(a: T.handle, c: T.handle, n: T.int32, m: T.int32) -> None: A = T.match_buffer(a, (n, m), "float32") C = T.match_buffer(c, (n, m), "float32") for i in range(0, n): with T.block(): T.reads(A[i, m]) T.writes(C[i, m]) B = T.alloc_buffer((m,), "float32", scope="global") for j in range(0, m): with T.block(): T.reads(A[i, j]) T.writes(B[j]) B[j] = A[i, j] + 1.0 for j in range(0, m): with T.block(): T.reads(B[j]) T.writes(C[i, j]) C[i, j] = B[j] * 2.0 @T.prim_func def transformed_symbolic_func(a: T.handle, c: T.handle, n: T.int32, m: T.int32) -> None: A = T.match_buffer(a, (n, m), "float32") C = T.match_buffer(c, (n, m), "float32") for i in range(0, n): B = T.decl_buffer(shape=[m], dtype="float32") for j in range(0, m): B[j] = A[i, j] + 1.0 for j in range(0, m): C[i, j] = B[j] * 2.0 @T.prim_func def compacted_predicate_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (32), "float32") C = T.match_buffer(c, (32), "float32") for i, j in T.grid(5, 7): with T.block(): T.reads(A[i * 7 + j]) T.writes(C[i * 7 + j]) T.where(i * 7 + j < 32) C[i * 7 + j] = A[i * 7 + j] + 1.0 @T.prim_func def transformed_predicate_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (32), "float32") C = T.match_buffer(c, (32), "float32") for i, j in T.grid(5, 7): if i * 7 + j < 32: C[i * 7 + j] = A[i * 7 + j] + 1.0 @T.prim_func def compacted_unit_loop_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (32), "float32") C = T.match_buffer(c, (32), "float32") for x, y, z in T.grid(4, 1, 8): with T.block(): T.reads(A[x * 8 + y * 8 + z]) T.writes(C[x * 8 + y * 8 + z]) C[x * 8 + y * 8 + z] = A[x * 8 + y * 8 + z] + 1.0 @T.prim_func def transformed_unit_loop_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (32), "float32") C = T.match_buffer(c, (32), "float32") for x, z in T.grid(4, 8): C[x * 8 + z] = A[x * 8 + z] + 1.0 @T.prim_func def compacted_multi_alloc_func(a: T.handle, d: T.handle) -> None: A = T.match_buffer(a, (32), "float32") D = T.match_buffer(d, (32), "float32") for i in range(0, 32): with T.block(): T.reads(A[i]) T.writes(D[i]) B = T.alloc_buffer((32,), scope="global") C = T.alloc_buffer((32,), scope="global") B[i] = A[i] + 1.0 C[i] = A[i] + B[i] D[i] = C[i] * 2.0 @T.prim_func def transformed_multi_alloc_func(a: T.handle, d: T.handle) -> None: A = T.match_buffer(a, (32), "float32") D = T.match_buffer(d, (32), "float32") for i in range(0, 32): B = T.decl_buffer(shape=(32,), dtype="float32") C = T.decl_buffer(shape=(32,), dtype="float32") B[i] = A[i] + 1.0 C[i] = A[i] + B[i] D[i] = C[i] * 2.0 @T.prim_func def compacted_strided_buffer_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i0 in range(0, 4): with T.block(): T.reads(A[i0 * 4 : i0 * 4 + 4, 0:16]) T.writes(C[i0 * 4 : i0 * 4 + 4, 0:16]) B = T.alloc_buffer([4, 16], "float32", strides=[17, 1], scope="global") for i1 in range(0, 4): for j in range(0, 16): with T.block(): T.reads(A[i0 * 4 + i1, j]) T.writes(B[i1, j]) B[i1, j] = A[i0 * 4 + i1, j] + 1.0 for i1 in range(0, 4): for j in range(0, 16): with T.block(): T.reads(B[i1, j]) T.writes(C[i0 * 4 + i1, j]) C[i0 * 4 + i1, j] = B[i1, j] * 2.0 @T.prim_func def transformed_strided_buffer_func( A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32") ) -> None: # body for i0 in T.serial(4): B_data = T.allocate([4, 17], "float32", "global") B = T.decl_buffer(shape=[4, 16], dtype="float32", strides=[17, 1], data=B_data) for i1, j in T.grid(4, 16): B[i1, j] = A[i0 * 4 + i1, j] + T.float32(1) for i1, j in T.grid(4, 16): C[i0 * 4 + i1, j] = B[i1, j] * T.float32(2) @T.prim_func def annotated_loops(a: T.handle) -> None: A = T.match_buffer(a, (16,), "float32") for i in range(0, 16, annotations={"pragma_1": "str_value", "pragma_2": 1, "pragma_3": 0.0}): A[i] = 0.0 @T.prim_func def boolean_handling_before(a: T.Buffer(10, "bool"), b: T.Buffer(10, "bool")) -> None: for i0 in T.serial(10): with T.block("b"): T.reads(a[i0]) T.writes(b[i0]) b[i0] = a[i0] @T.prim_func def boolean_handling_after(a: T.Buffer(10, "bool"), b: T.Buffer(10, "bool")) -> None: # body for i0 in T.serial(10): b[i0] = a[i0] def test_elementwise(): _check(compacted_elementwise_func, transformed_elementwise_func) def test_gpu_workload(): _check(compacted_gpu_func, transformed_gpu_func) def test_symbolic_shape(): _check(compacted_symbolic_func, transformed_symbolic_func) def test_predicate(): _check(compacted_predicate_func, transformed_predicate_func) def test_unit_loops(): _check(compacted_unit_loop_func, transformed_unit_loop_func) def test_multi_alloc(): _check(compacted_multi_alloc_func, transformed_multi_alloc_func) def test_strided_buffer(): _check(compacted_strided_buffer_func, transformed_strided_buffer_func) def test_lower_te(): x = te.placeholder((1,)) y = te.compute((1,), lambda i: x[i] + 2) s = te.create_schedule(y.op) orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y]) mod = tvm.tir.transform.LowerOpaqueBlock()(orig_mod) tvm.ir.assert_structural_equal(mod, orig_mod) # LowerOpaqueBlock should do nothing on TE def test_annotated_loops(): mod = tvm.IRModule.from_expr(annotated_loops) mod = tvm.tir.transform.LowerOpaqueBlock()(mod) attr1 = mod["main"].body attr2 = attr1.body attr3 = attr2.body assert attr1.attr_key == "pragma_1" and attr1.value == "str_value" assert attr2.attr_key == "pragma_2" tvm.ir.assert_structural_equal(attr2.value, tvm.tir.IntImm("int32", 1)) assert attr3.attr_key == "pragma_3" tvm.ir.assert_structural_equal(attr3.value, tvm.tir.FloatImm("float32", 0.0)) def test_annotated_block(): @T.prim_func def annotated_block() -> None: with T.block(): T.block_attr({"pragma_1": "str_value", "pragma_2": 1, "pragma_3": 0.0}) T.evaluate(0) mod = tvm.IRModule.from_expr(annotated_block) mod = tvm.tir.transform.LowerOpaqueBlock()(mod) attr1 = mod["main"].body attr2 = attr1.body attr3 = attr2.body assert attr1.attr_key == "pragma_1" and attr1.value == "str_value" assert attr2.attr_key == "pragma_2" tvm.ir.assert_structural_equal(attr2.value, tvm.tir.IntImm("int32", 1)) assert attr3.attr_key == "pragma_3" tvm.ir.assert_structural_equal(attr3.value, tvm.tir.FloatImm("float32", 0.0)) def test_preserved_annotations(): @T.prim_func def before(A: T.Buffer(8, "float32"), B: T.Buffer(8, "float32")): for i in T.serial(8, annotations={"k_0": 1, "k_1": [2, 3], "k_2": 3.14}): with T.block("block"): T.block_attr({"k_3": "oops"}) B[i] = A[i] + 1.0 @T.prim_func def after(A: T.Buffer(8, "float32"), B: T.Buffer(8, "float32")): for i in T.serial(8, annotations={"k_0": 1, "k_1": [2, 3], "k_2": 3.14}): B[i] = A[i] + 1.0 mod = tvm.IRModule.from_expr(before) mod = tvm.tir.transform.LowerOpaqueBlock()(mod) tvm.ir.assert_structural_equal(mod["main"], after) def test_boolean_handling(): _check(boolean_handling_before, boolean_handling_after) if __name__ == "__main__": tvm.testing.main()
12,286
32.479564
97
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_instrument_bound_checkers.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest import tvm import tvm.testing from tvm import te import numpy as np def collect_visit(stmt, f): ret = [] tvm.tir.stmt_functor.post_order_visit(stmt, lambda x: ret.append(f(x))) return ret @tvm.testing.requires_llvm @pytest.mark.xfail def test_out_of_bounds_llvm(index_a, index_b): n = te.size_var("n") A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") C = te.compute(A.shape, lambda i: A[i + index_a] + B[i + index_b], name="C") s = te.create_schedule(C.op) tgt = "llvm" tgt_host = "llvm" stmt = tvm.lower(s, [A, B, C], simple_mode=True) print(stmt) tgt = tvm.target.Target(tgt, tgt_host) fadd = tvm.build(s, [A, B, C], target=tgt, name="myadd") dev = tvm.device(tgt.kind.name, 0) a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=1024).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(1024, dtype=C.dtype), dev) fadd(a, b, c) @tvm.testing.requires_llvm def test_in_bounds_llvm(): n = te.size_var("n") A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") C = te.compute(A.shape, lambda i: A[i] + B[i], name="C") s = te.create_schedule(C.op) tgt = "llvm" tgt_host = "llvm" stmt = tvm.lower(s, [A, B, C], simple_mode=True) tgt = tvm.target.Target(tgt, tgt_host) fadd = tvm.build(s, [A, B, C], target=tgt, name="myadd") dev = tvm.device(tgt.kind.name, 0) a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=1024).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(1024, dtype=C.dtype), dev) fadd(a, b, c) @tvm.testing.requires_llvm @pytest.mark.xfail def test_out_of_bounds_vectorize_llvm(nn, index_a, index_b): n = tvm.runtime.convert(nn) a = te.placeholder((n), name="a") b = te.placeholder((n), name="b") c = te.compute((n,), lambda i: a[i + index_a] + b[i + index_b], name="c") s = te.create_schedule(c.op) xo, xi = s[c].split(c.op.axis[0], factor=8) s[c].parallel(xo) s[c].vectorize(xi) tgt = "llvm" tgt_host = "llvm" stmt = tvm.lower(s, [a, b, c], simple_mode=True) tgt = tvm.target.Target(tgt, tgt_host) f = tvm.build(s, [a, b, c], target=tgt, name="myaddvec") dev = tvm.cpu(0) n = nn a = tvm.nd.array(np.random.uniform(size=(n)).astype(a.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(n)).astype(a.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=c.dtype), dev) f(a, b, c) @tvm.testing.requires_llvm def test_in_bounds_vectorize_llvm(): n = 512 lanes = 2 A = te.placeholder((n,), name="A", dtype="float32x%d" % lanes) B = te.compute((n,), lambda i: A[i], name="B") C = te.compute((n,), lambda i: B[i] + tvm.tir.const(1, A.dtype), name="C") s = te.create_schedule(C.op) xo, xi = s[C].split(C.op.axis[0], nparts=2) _, xi = s[C].split(xi, factor=2) s[C].parallel(xo) s[C].vectorize(xi) s[B].compute_at(s[C], xo) xo, xi = s[B].split(B.op.axis[0], factor=2) s[B].vectorize(xi) # build and invoke the kernel. lowered_func = tvm.lower(s, [A, C], "llvm", simple_mode=False) f = tvm.build(s, [A, C], "llvm") dev = tvm.cpu(0) # launch the kernel. a = tvm.nd.empty((n,), A.dtype).copyfrom( np.random.uniform(size=[n] + ([] if lanes == 1 else [lanes])) ) c = tvm.nd.empty((n,), C.dtype, dev) f(a, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1) @tvm.testing.requires_llvm def test_in_bounds_loop_partition_basic_llvm(): n = te.size_var("n") A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") T = te.compute((n,), lambda i: A[i] + B[i]) s = te.create_schedule(T.op) xo, xi = s[T].split(T.op.axis[0], factor=4) lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False) dev = tvm.cpu(0) f = tvm.build(s, [A, B, T], "llvm") a = tvm.nd.array(np.random.uniform(size=(32,)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(32,)).astype(B.dtype), dev) t = tvm.nd.empty((32,), T.dtype, dev) f(a, b, t) @tvm.testing.requires_llvm @pytest.mark.xfail def test_out_of_bounds_loop_partition_basic_llvm(index_a, index_b): n = te.size_var("n") A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") T = te.compute((n,), lambda i: A[i + index_a] + B[i + index_b]) s = te.create_schedule(T.op) xo, xi = s[T].split(T.op.axis[0], factor=4) lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False) dev = tvm.cpu(0) f = tvm.build(s, [A, B, T], "llvm") a = tvm.nd.array(np.random.uniform(size=(32,)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(32,)).astype(B.dtype), dev) t = tvm.nd.empty((32,), T.dtype, dev) f(a, b, t) def test_in_bounds_const_loop_partition_ir(): def check_attr_stmt(x): if ( isinstance(x, tvm.tir.AttrStmt) and x.attr_key == "buffer_bound" and tvm.ir.structural_equal(x.value.args, [n]) ): return True return False def check_branch_stmt(x): if isinstance(x, tvm.tir.IfThenElse): return True return False def assert_bound_instrumentation(stmt, f, nums): count = 0 for i in collect_visit(stmt, f): if i is True: count = count + 1 assert count == nums def collect_branch_stmt(x): if isinstance(x, tvm.tir.IfThenElse): branch_collector.append(x) n = 21 A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") T = te.compute((n,), lambda i: A[i] + B[i]) s = te.create_schedule(T.op) xo, xi = s[T].split(T.op.axis[0], factor=4) with tvm.transform.PassContext( config={ "tir.instrument_bound_checkers": True, "tir.LoopPartition": {"partition_const_loop": True}, } ): mod = tvm.driver.lower(s, [A, B, T], name="main") stmt = mod["main"].body # after instrumentation assert_bound_instrumentation(stmt, check_attr_stmt, 2 * 3) assert_bound_instrumentation(stmt, check_branch_stmt, 2) branch_collector = list() collect_visit(stmt, collect_branch_stmt) assert len(branch_collector) == 2 @tvm.testing.requires_llvm def test_in_bounds_const_loop_partition_llvm(): with tvm.transform.PassContext( config={ "tir.instrument_bound_checkers": True, "tir.LoopPartition": {"partition_const_loop": True}, } ): n = 21 A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") T = te.compute((n,), lambda i: A[i] + B[i]) s = te.create_schedule(T.op) xo, xi = s[T].split(T.op.axis[0], factor=4) lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False) dev = tvm.cpu(0) f = tvm.build(s, [A, B, T], "llvm") a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(n,)).astype(B.dtype), dev) t = tvm.nd.empty((n,), T.dtype, dev) f(a, b, t) @tvm.testing.requires_llvm @pytest.mark.xfail def test_out_of_bounds_const_loop_partition_llvm(index_a, index_b): with tvm.transform.PassContext( config={ "tir.instrument_bound_checkers": True, "tir.LoopPartition": {"partition_const_loop": True}, } ): n = 21 A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") T = te.compute((n,), lambda i: A[i + index_a] + B[i + index_b]) s = te.create_schedule(T.op) xo, xi = s[T].split(T.op.axis[0], factor=4) lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False) dev = tvm.cpu(0) f = tvm.build(s, [A, B, T], "llvm") a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(n,)).astype(B.dtype), dev) t = tvm.nd.empty((n,), T.dtype, dev) f(a, b, t) @tvm.testing.requires_llvm def test_in_bounds_conv_llvm(loop_tiling=False): HSTR = WSTR = 1 in_channel = 128 kernel_height = kernel_width = 3 out_channel = 64 batch_size = 1 in_height = in_width = 64 out_height = out_width = in_height - kernel_height + 1 data = te.placeholder((batch_size, in_channel, in_height, in_width), name="data") kernel = te.placeholder((kernel_height, kernel_width, in_channel, out_channel), name="kernel") ic = te.reduce_axis((0, in_channel), name="ic") kh = te.reduce_axis((0, kernel_height), name="kh") kw = te.reduce_axis((0, kernel_width), name="kw") conv = te.compute( (batch_size, out_channel, out_height, out_width), lambda n, oc, oh, ow: te.sum( data[n, ic, oh * HSTR + kh, ow * WSTR + kw] * kernel[kh, kw, ic, oc], axis=[ic, kh, kw] ), name="conv2d", ) s = te.create_schedule(conv.op) n, oc, oh, ow = conv.op.axis if loop_tiling: oho, owo, ohi, owi = s[conv].tile(oh, ow, 16, 16) lowered_func = tvm.lower(s, [data, kernel, conv], simple_mode=True) dev = tvm.cpu(0) f = tvm.build(s, [data, kernel, conv], "llvm") data_input = tvm.nd.array( np.random.uniform(size=(batch_size, in_channel, in_height, in_width)).astype("float32"), dev ) kernel_input = tvm.nd.array( np.random.uniform(size=(kernel_height, kernel_width, in_channel, out_channel)).astype( "float32" ), dev, ) conv_out = tvm.nd.empty((batch_size, out_channel, out_height, out_width), "float32", dev) f(data_input, kernel_input, conv_out) @tvm.testing.requires_llvm @pytest.mark.xfail def test_out_of_bounds_conv_llvm(data_offsets, kernel_offsets, loop_tiling=False): HSTR = WSTR = 1 in_channel = 128 kernel_height = kernel_width = 3 out_channel = 64 batch_size = 1 in_height = in_width = 64 out_height = out_width = in_height - kernel_height + 1 data = te.placeholder((batch_size, in_channel, in_height, in_width), name="data") kernel = te.placeholder((kernel_height, kernel_width, in_channel, out_channel), name="kernel") ic = te.reduce_axis((0, in_channel), name="ic") kh = te.reduce_axis((0, kernel_height), name="kh") kw = te.reduce_axis((0, kernel_width), name="kw") conv = te.compute( (batch_size, out_channel, out_height, out_width), lambda n, oc, oh, ow: te.sum( data[ n + data_offsets[0], ic + data_offsets[1], oh * HSTR + kh + data_offsets[2], ow * WSTR + kw + data_offsets[3], ] * kernel[ kh + kernel_offsets[0], kw + kernel_offsets[1], ic + kernel_offsets[2], oc + kernel_offsets[3], ], axis=[ic, kh, kw], ), name="conv2d", ) s = te.create_schedule(conv.op) n, oc, oh, ow = conv.op.axis if loop_tiling: oho, owo, ohi, owi = s[conv].tile(oh, ow, 16, 16) lowered_func = tvm.lower(s, [data, kernel, conv], simple_mode=True) dev = tvm.cpu(0) f = tvm.build(s, [data, kernel, conv], "llvm") data_input = tvm.nd.array( np.random.uniform(size=(batch_size, in_channel, in_height, in_width)).astype("float32"), dev ) kernel_input = tvm.nd.array( np.random.uniform(size=(kernel_height, kernel_width, in_channel, out_channel)).astype( "float32" ), dev, ) conv_out = tvm.nd.empty((batch_size, out_channel, out_height, out_width), "float32", dev) f(data_input, kernel_input, conv_out) @tvm.testing.requires_llvm def test_in_bounds_tensors_with_same_shapes1D_llvm(): n = te.size_var("n") k = te.size_var("k") m = te.size_var("m") A = te.placeholder((n,), name="A") B = te.placeholder((k,), name="B") T = te.compute((m,), lambda i: A[i] * B[i]) s = te.create_schedule(T.op) lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False) dev = tvm.cpu(0) f = tvm.build(s, [A, B, T], "llvm") a = tvm.nd.array(np.random.uniform(size=(32,)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(32,)).astype(B.dtype), dev) t = tvm.nd.empty((32,), T.dtype, dev) f(a, b, t) @tvm.testing.requires_llvm @pytest.mark.xfail def test_out_of_bounds_tensors_with_diff_shapes1D_llvm(a_shape, b_shape, c_shape): n = te.size_var("n") k = te.size_var("k") m = te.size_var("m") A = te.placeholder((n,), name="A") B = te.placeholder((k,), name="B") T = te.compute((m,), lambda i: A[i] * B[i]) s = te.create_schedule(T.op) lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False) dev = tvm.cpu(0) f = tvm.build(s, [A, B, T], "llvm") a = tvm.nd.array(np.random.uniform(size=(a_shape,)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(b_shape,)).astype(B.dtype), dev) t = tvm.nd.empty((c_shape,), T.dtype, dev) f(a, b, t) @tvm.testing.requires_llvm def test_in_bounds_tensors_with_same_shapes2D_llvm(): n = te.size_var("n") k = te.size_var("k") m = te.size_var("m") A = te.placeholder((n, n), name="A") B = te.placeholder((k, k), name="B") T = te.compute((m, m), lambda i, j: A[i][j] * B[i][j]) s = te.create_schedule(T.op) lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False) dev = tvm.cpu(0) f = tvm.build(s, [A, B, T], "llvm") a = tvm.nd.array(np.random.uniform(size=(32, 32)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(32, 32)).astype(B.dtype), dev) t = tvm.nd.empty((32, 32), T.dtype, dev) f(a, b, t) @tvm.testing.requires_llvm @pytest.mark.xfail def test_out_of_bounds_tensors_with_diff_shapes2D_llvm(a_shape, b_shape, c_shape): n = te.size_var("n") k = te.size_var("k") m = te.size_var("m") A = te.placeholder((n, n), name="A") B = te.placeholder((k, k), name="B") T = te.compute((m, m), lambda i, j: A[i][j] * B[i][j]) s = te.create_schedule(T.op) lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False) dev = tvm.cpu(0) f = tvm.build(s, [A, B, T], "llvm") a = tvm.nd.array(np.random.uniform(size=(a_shape[0], a_shape[1])).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(b_shape[0], b_shape[1])).astype(B.dtype), dev) t = tvm.nd.empty((c_shape[0], c_shape[1]), T.dtype, dev) f(a, b, t) @tvm.testing.requires_llvm def test_in_bounds_tensors_with_same_shapes3D_llvm(): n = te.size_var("n") k = te.size_var("k") m = te.size_var("m") A = te.placeholder((n, n, n), name="A") B = te.placeholder((k, k, k), name="B") T = te.compute((m, m, m), lambda i, j, p: A[i][j][p] * B[i][j][p]) s = te.create_schedule(T.op) lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False) dev = tvm.cpu(0) f = tvm.build(s, [A, B, T], "llvm") a = tvm.nd.array(np.random.uniform(size=(32, 32, 32)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(32, 32, 32)).astype(B.dtype), dev) t = tvm.nd.empty((32, 32, 32), T.dtype, dev) f(a, b, t) @tvm.testing.requires_llvm @pytest.mark.xfail def test_out_of_bounds_tensors_with_diff_shapes3D_llvm(a_shape, b_shape, c_shape): n = te.size_var("n") k = te.size_var("k") m = te.size_var("m") A = te.placeholder((n, n, n), name="A") B = te.placeholder((k, k, k), name="B") T = te.compute((m, m, m), lambda i, j, p: A[i][j][p] * B[i][j][p]) s = te.create_schedule(T.op) lowered_func = tvm.lower(s, [A, B, T], "llvm", simple_mode=False) dev = tvm.cpu(0) f = tvm.build(s, [A, B, T], "llvm") a = tvm.nd.array( np.random.uniform(size=(a_shape[0], a_shape[1], c_shape[2])).astype(A.dtype), dev ) b = tvm.nd.array( np.random.uniform(size=(b_shape[0], b_shape[1], b_shape[2])).astype(B.dtype), dev ) t = tvm.nd.empty((c_shape[0], c_shape[1], c_shape[2]), T.dtype, dev) f(a, b, t) @tvm.testing.requires_llvm @pytest.mark.xfail def test_out_of_bounds_tensors_with_zero_shape_op_with_not_zero_shape_llvm(): n = 64 A = te.placeholder((n,), name="A") scale = te.placeholder((), name="scale") k = te.reduce_axis((0, n), name="k") C = te.compute((), lambda: te.sum(A[k + k + k] * scale, axis=k), name="C") D = te.compute((), lambda: C + 1) s = te.create_schedule(D.op) stmt = tvm.lower(s, [A, scale, D], simple_mode=True) # build and invoke the kernel. f = tvm.build(s, [A, scale, D], "llvm") dev = tvm.cpu(0) # launch the kernel. a = tvm.nd.array(np.random.randint(0, 2, size=(n,)).astype(A.dtype), dev) sc = tvm.nd.array(np.random.randint(0, 2, size=()).astype(scale.dtype), dev) d = tvm.nd.empty((), D.dtype, dev) f(a, sc, d) d_np = np.sum(a.numpy()) * sc.numpy() + 1 tvm.testing.assert_allclose(d.numpy(), d_np) if __name__ == "__main__": with tvm.transform.PassContext( config={ "tir.instrument_bound_checkers": True, } ): # zero scale test_out_of_bounds_tensors_with_zero_shape_op_with_not_zero_shape_llvm() # in bound test_in_bounds_llvm() # upper bound test_out_of_bounds_llvm(1, 0) test_out_of_bounds_llvm(0, 1) test_out_of_bounds_llvm(1, 1) test_out_of_bounds_llvm(10000, 0) test_out_of_bounds_llvm(0, 10000) test_out_of_bounds_llvm(10000, 10000) # lower bound test_out_of_bounds_llvm(-1, 0) test_out_of_bounds_llvm(0, -1) test_out_of_bounds_llvm(-1, -1) test_out_of_bounds_llvm(-10000, 0) test_out_of_bounds_llvm(0, -10000) test_out_of_bounds_llvm(-10000, -10000) # vectorize in bound test_in_bounds_vectorize_llvm() # vectorization upper bound test_out_of_bounds_vectorize_llvm(1024, 1000, 0) test_out_of_bounds_vectorize_llvm(1024, 0, 10000) # vectorization lower bound test_out_of_bounds_vectorize_llvm(1024, -1000, 0) test_out_of_bounds_vectorize_llvm(1024, 0, -10000) test_in_bounds_const_loop_partition_llvm() test_out_of_bounds_const_loop_partition_llvm(1, 0) test_out_of_bounds_const_loop_partition_llvm(0, 1) test_out_of_bounds_const_loop_partition_llvm(-1, 0) test_out_of_bounds_const_loop_partition_llvm(0, -1) test_in_bounds_loop_partition_basic_llvm() test_out_of_bounds_loop_partition_basic_llvm(32, 0) test_out_of_bounds_loop_partition_basic_llvm(0, 32) test_out_of_bounds_loop_partition_basic_llvm(-32, 0) test_out_of_bounds_loop_partition_basic_llvm(0, -32) # conv test_in_bounds_conv_llvm() test_out_of_bounds_conv_llvm([1, 0, 0, 0], [0, 0, 0, 0]) test_out_of_bounds_conv_llvm([0, 1, 0, 0], [0, 0, 0, 0]) test_out_of_bounds_conv_llvm([0, 0, 1, 0], [0, 0, 0, 0]) test_out_of_bounds_conv_llvm([0, 0, 0, 1], [0, 0, 0, 0]) test_out_of_bounds_conv_llvm([-1, 0, 0, 0], [0, 0, 0, 0]) test_out_of_bounds_conv_llvm([0, -1, 0, 0], [0, 0, 0, 0]) test_out_of_bounds_conv_llvm([0, 0, -1, 0], [0, 0, 0, 0]) test_out_of_bounds_conv_llvm([0, 0, 0, -1], [0, 0, 0, 0]) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [1, 0, 0, 0]) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 1, 0, 0]) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, 1, 0]) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, 0, 1]) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [-1, 0, 0, 0]) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, -1, 0, 0]) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, -1, 0]) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, 0, -1]) # loop tiling test_in_bounds_conv_llvm(True) test_out_of_bounds_conv_llvm([1, 0, 0, 0], [0, 0, 0, 0], True) test_out_of_bounds_conv_llvm([0, 1, 0, 0], [0, 0, 0, 0], True) test_out_of_bounds_conv_llvm([0, 0, 1, 0], [0, 0, 0, 0], True) test_out_of_bounds_conv_llvm([0, 0, 0, 1], [0, 0, 0, 0], True) test_out_of_bounds_conv_llvm([-1, 0, 0, 0], [0, 0, 0, 0], True) test_out_of_bounds_conv_llvm([0, -1, 0, 0], [0, 0, 0, 0], True) test_out_of_bounds_conv_llvm([0, 0, -1, 0], [0, 0, 0, 0], True) test_out_of_bounds_conv_llvm([0, 0, 0, -1], [0, 0, 0, 0], True) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [1, 0, 0, 0], True) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 1, 0, 0], True) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, 1, 0], True) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, 0, 1], True) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [-1, 0, 0, 0], True) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, -1, 0, 0], True) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, -1, 0], True) test_out_of_bounds_conv_llvm([0, 0, 0, 0], [0, 0, 0, -1], True) # tensors with diff shapes basic operation such as mul test_out_of_bounds_tensors_with_diff_shapes1D_llvm(32, 64, 64) test_out_of_bounds_tensors_with_diff_shapes1D_llvm(64, 32, 64) test_out_of_bounds_tensors_with_diff_shapes2D_llvm([64, 64], [32, 32], [64, 64]) test_out_of_bounds_tensors_with_diff_shapes2D_llvm([32, 32], [64, 64], [64, 64]) test_out_of_bounds_tensors_with_diff_shapes3D_llvm([64, 64, 64], [32, 32, 32], [64, 64, 64]) test_out_of_bounds_tensors_with_diff_shapes3D_llvm([32, 32, 32], [64, 64, 64], [64, 64, 64]) # check tensors with the same shapes test_in_bounds_tensors_with_same_shapes1D_llvm() test_in_bounds_tensors_with_same_shapes2D_llvm() test_in_bounds_tensors_with_same_shapes3D_llvm() # ir tests test_in_bounds_const_loop_partition_ir()
22,873
36.68369
100
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_inject_double_buffer.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm.script import tir as T, ir as I from tvm import te def test_double_buffer(): dtype = "int64" n = 100 m = 4 tx = te.thread_axis("threadIdx.x") ib = tvm.tir.ir_builder.create() A = ib.pointer("float32", name="A") C = ib.pointer("float32", name="C") ib.scope_attr(tx, "thread_extent", 1) with ib.for_range(0, n) as i: B = ib.allocate("float32", m, name="B", scope="shared") with ib.new_scope(): ib.scope_attr(B.asobject().data, "double_buffer_scope", 1) with ib.for_range(0, m) as j: B[j] = A[i * 4 + j] with ib.for_range(0, m) as j: C[j] = B[j] + 1 stmt = ib.get() mod = tvm.IRModule({"db": tvm.tir.PrimFunc([A.asobject(), C.asobject()], stmt)}) opt = tvm.transform.Sequential( [tvm.tir.transform.InjectDoubleBuffer(), tvm.tir.transform.Simplify()] ) with tvm.transform.PassContext(config={"tir.InjectDoubleBuffer": {"split_loop": 2}}): mod = opt(mod) stmt = mod["db"].body assert isinstance(stmt.body, tvm.tir.Allocate) assert list(stmt.body.extents) == [m * 2] f = tvm.tir.transform.ThreadSync("shared")(mod)["db"] count = [0] def count_sync(op): if isinstance(op, tvm.tir.Call) and op.op.same_as(tvm.ir.Op.get("tir.tvm_storage_sync")): count[0] += 1 tvm.tir.stmt_functor.post_order_visit(f.body, count_sync) assert count[0] == 4 class TestDoubleBuffer(tvm.testing.CompareBeforeAfter): transform = tvm.ir.transform.Sequential( [ tvm.tir.transform.InjectDoubleBuffer(), tvm.tir.transform.Simplify(), ] ) def before(A: T.Buffer([16, 32], "float32"), B: T.Buffer(16, "float32")): for i in range(16): cache_data = T.allocate([32], "float32") cache = T.Buffer(32, "float32", data=cache_data) T.attr(cache_data, "double_buffer_scope", 1) for j in range(32): cache[j] = A[i, j] B[i] = 0.0 for j in range(32): B[i] = B[i] + cache[j] def expected(A: T.Buffer((16, 32), "float32"), B: T.Buffer((16,), "float32")): cache_data = T.allocate([64], "float32", "global") cache = T.Buffer(64, data=cache_data) for j in range(32): cache[j] = A[0, j] B[0] = T.float32(0) for j in range(32): B[0] = B[0] + cache[j] for i_outer in range(15): T.attr(cache_data, "double_buffer_write", 1) for j in range(32): cache[(i_outer + 1) % 2 * 32 + j] = A[i_outer + 1, j] B[i_outer + 1] = T.float32(0) for j in range(32): B[i_outer + 1] = B[i_outer + 1] + cache[(i_outer + 1) % 2 * 32 + j] class TestDoubleBufferWithDeclBuffer(tvm.testing.CompareBeforeAfter): """Like TestDoubleBuffer, but with a declared buffer object""" transform = tvm.ir.transform.Sequential( [ tvm.tir.transform.InjectDoubleBuffer(), tvm.tir.transform.Simplify(), ] ) def before(A: T.Buffer((16, 32), "float32"), B: T.Buffer(16, "float32")): for i in range(16): cache = T.decl_buffer(32, "float32") T.attr(cache.data, "double_buffer_scope", 1) for j in range(32): cache[j] = A[i, j] B[i] = 0.0 for j in range(32): B[i] = B[i] + cache[j] def expected(A: T.Buffer((16, 32), "float32"), B: T.Buffer(16, "float32")): cache = T.decl_buffer(64, "float32") for j in range(32): cache[j] = A[0, j] B[0] = T.float32(0) for j in range(32): B[0] = B[0] + cache[j] for i_outer in range(15): T.attr(cache.data, "double_buffer_write", 1) for j in range(32): cache[(i_outer + 1) % 2 * 32 + j] = A[i_outer + 1, j] B[i_outer + 1] = T.float32(0) for j in range(32): B[i_outer + 1] = B[i_outer + 1] + cache[(i_outer + 1) % 2 * 32 + j] if __name__ == "__main__": tvm.testing.main()
4,985
31.802632
97
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_extract_constants.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import tvm from tvm import tir from tvm.script import tir as T import tvm.testing @tvm.script.ir_module class Module4: @T.prim_func def constant1(a: T.handle) -> None: A = T.match_buffer(a, (10), "int32") B = T.alloc_buffer((10), "int32") K_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10]) K = T.Buffer(shape=(10), dtype="int32", data=K_data) for x in T.serial(0, 10): B[x] = A[x] + K[x] @T.prim_func def constant2(a: T.handle) -> None: A = T.match_buffer(a, (10), "int32") B = T.alloc_buffer((10), "int32") K_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10]) K = T.Buffer(shape=(10), dtype="int32", data=K_data) for x in T.serial(0, 10): B[x] = A[x] + K[x] @T.prim_func def constant3(a: T.handle) -> None: A = T.match_buffer(a, (10), "int32") B = T.alloc_buffer((10), "int32") K_data = T.allocate_const([1, 2, 3, 1, 1, 1, 1, 1, 1, 1], "int32", [10]) K = T.Buffer(shape=(10), dtype="int32", data=K_data) for x in T.serial(0, 10): B[x] = A[x] + K[x] def test_const_extraction(): mod = tvm.tir.transform.ExtractPrimFuncConstants()(Module4) constants = mod.attrs["constants"] assert len(constants) == 2 def _visit(stmt): if isinstance(stmt, tvm.tir.AllocateConst): assert np.array_equal(stmt.data.numpy(), constants[int(stmt.irmod_storage_idx)].numpy()) for n, f in mod.functions.items(): tvm.tir.stmt_functor.post_order_visit(f.body, _visit) tvm.lower(mod) if __name__ == "__main__": tvm.testing.main()
2,495
34.15493
100
py
tvm
tvm-main/tests/python/unittest/test_tvmscript_parser_ir.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Unittests for tvm.script.parser.ir""" import pytest import inspect import tvm.testing from tvm.script.parser import ir_module from tvm.ir import IRModule def test_ir_base(): @ir_module class BlankIRModule: pass assert isinstance(BlankIRModule, IRModule) and len(BlankIRModule.functions.items()) == 0 assert BlankIRModule.__name__ == "BlankIRModule" if __name__ == "__main__": tvm.testing.main()
1,217
31.918919
92
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_byoc_tensorrt.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Test Meta Schedule Builder """ # pylint: disable=missing-docstring import sys from typing import List import pytest import tvm import tvm.testing from tvm import relay from tvm.meta_schedule.arg_info import TensorInfo from tvm.meta_schedule.builder import BuilderInput, LocalBuilder from tvm.meta_schedule.runner import EvaluatorConfig, LocalRunner, RunnerInput from tvm.meta_schedule.testing.custom_builder_runner import ( build_relay, build_relay_with_tensorrt, run_with_graph_executor, ) from tvm.meta_schedule.testing.relay_workload import get_network from tvm.relay import testing from tvm.relay.op.contrib import tensorrt from tvm.target import Target from tvm.tir import FloatImm has_tensorrt_codegen = pytest.mark.skipif( not tvm.get_global_func("relay.ext.tensorrt", True), reason="TensorRT codegen not available", ) has_tensorrt_runtime = pytest.mark.skipif( not tensorrt.is_tensorrt_runtime_enabled(), reason="TensorRT runtime not available", ) # conv2d+relu network def get_conv2d_relu( data_shape, out_channels, kernel_size, strides, padding, dilation, groups, data_layout, kernel_layout, dtype, ): data = relay.var("data", relay.TensorType(data_shape, dtype)) weight = relay.var("weight") net = relay.nn.conv2d( data=data, weight=weight, # conv kernel strides=strides, padding=padding, dilation=dilation, groups=groups, channels=out_channels, kernel_size=kernel_size, data_layout=data_layout, kernel_layout=kernel_layout, ) net = relay.add(net, net) net = relay.nn.relu(net) inputs = relay.analysis.free_vars(net) return relay.Function(inputs, net) def verify_meta_schedule_with_tensorrt( mod, params, data_shape, use_trt: bool = True, ): # Build builder = LocalBuilder( f_build=build_relay_with_tensorrt if use_trt else build_relay, timeout_sec=1000, ) builder_input = BuilderInput(mod, Target("cuda"), params) builder_result = builder.build([builder_input])[0] assert builder_result.error_msg is None, builder_result.error_msg assert builder_result.artifact_path is not None # Run runner_input = RunnerInput( builder_result.artifact_path, device_type="cuda", args_info=[TensorInfo("float32", data_shape)], ) runner = LocalRunner( evaluator_config=EvaluatorConfig( number=5, repeat=2, min_repeat_ms=0, enable_cpu_cache_flush=False, ), f_run_evaluator=run_with_graph_executor, ) # Run the module runner_future = runner.run([runner_input])[0] runner_result = runner_future.result() assert runner_result is not None assert runner_result.error_msg is None, runner_result.error_msg assert runner_result.run_secs is not None for result in runner_result.run_secs: if isinstance(result, FloatImm): result = result.value assert isinstance(result, float) assert result >= 0.0 @has_tensorrt_codegen def test_conv2d_relu(): data_shape = (1, 1280, 14, 14) out_channels = 256 kernel_size, strides, padding, dilation, groups = (1, 1), (1, 1), (0, 0, 0, 0), (1, 1), 1 data_layout, kernel_layout = "NCHW", "OIHW" dtype = "float32" f = get_conv2d_relu( data_shape, out_channels, kernel_size, strides, padding, dilation, groups, data_layout, kernel_layout, dtype, ) mod, params = testing.create_workload(f) verify_meta_schedule_with_tensorrt(mod, params, data_shape) @has_tensorrt_codegen @pytest.mark.parametrize("model_name", ["resnet_50"]) @pytest.mark.parametrize("input_shape", [[1, 3, 224, 224]]) @pytest.mark.parametrize("use_trt", [True, False]) def test_relay_model(model_name: str, input_shape: List[int], use_trt: bool): mod, params, _ = get_network(model_name, input_shape) verify_meta_schedule_with_tensorrt( mod, params, input_shape, use_trt, ) if __name__ == "__main__": tvm.testing.main()
4,995
27.878613
93
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_task_scheduler.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Test Meta Schedule Task Scheduler """ import random import weakref from typing import Set import pytest import tvm import tvm.testing from tvm import meta_schedule as ms from tvm.meta_schedule.testing.dummy_object import DummyBuilder, DummyRunner from tvm.script import tir as T from tvm.tir import Schedule # pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,missing-docstring @tvm.script.ir_module class MatmulModule: @T.prim_func def main( # type: ignore a: T.handle, b: T.handle, c: T.handle, ) -> None: # pylint: disable=no-self-argument T.func_attr({"global_symbol": "main", "tir.noalias": True}) A = T.match_buffer(a, (1024, 1024), "float32") B = T.match_buffer(b, (1024, 1024), "float32") C = T.match_buffer(c, (1024, 1024), "float32") for i, j, k in T.grid(1024, 1024, 1024): with T.block("matmul"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 # type: ignore C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj] @tvm.script.ir_module class MatmulReluModule: @T.prim_func def main( # type: ignore a: T.handle, b: T.handle, d: T.handle, ) -> None: # pylint: disable=no-self-argument T.func_attr({"global_symbol": "main", "tir.noalias": True}) A = T.match_buffer(a, (1024, 1024), "float32") B = T.match_buffer(b, (1024, 1024), "float32") D = T.match_buffer(d, (1024, 1024), "float32") C = T.alloc_buffer((1024, 1024), "float32") for i, j, k in T.grid(1024, 1024, 1024): with T.block("matmul"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 # type: ignore C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj] for i, j in T.grid(1024, 1024): with T.block("relu"): vi, vj = T.axis.remap("SS", [i, j]) D[vi, vj] = T.max(C[vi, vj], 0.0) # type: ignore @tvm.script.ir_module class BatchMatmulModule: @T.prim_func def main( # type: ignore a: T.handle, b: T.handle, c: T.handle, ) -> None: # pylint: disable=no-self-argument T.func_attr({"global_symbol": "main", "tir.noalias": True}) A = T.match_buffer(a, [16, 128, 128]) B = T.match_buffer(b, [16, 128, 128]) C = T.match_buffer(c, [16, 128, 128]) for n, i, j, k in T.grid(16, 128, 128, 128): with T.block("matmul"): vn, vi, vj, vk = T.axis.remap("SSSR", [n, i, j, k]) with T.init(): C[vn, vi, vj] = 0.0 # type: ignore C[vn, vi, vj] = C[vn, vi, vj] + A[vn, vi, vk] * B[vn, vj, vk] # pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks def _schedule_matmul(sch: Schedule): block = sch.get_block("matmul") i, j, k = sch.get_loops(block=block) i_0, i_1, i_2, i_3 = sch.split(loop=i, factors=[2, 4, 64, 2]) j_0, j_1, j_2, j_3 = sch.split(loop=j, factors=[4, 64, 2, 2]) k_0, k_1 = sch.split(loop=k, factors=[32, 32]) sch.reorder(i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3) def _schedule_batch_matmul(sch: Schedule): block = sch.get_block("matmul") i, j, k, t = sch.get_loops(block=block) i_0, i_1, i_2, i_3 = sch.split(loop=i, factors=[2, 2, 2, 2]) j_0, j_1, j_2, j_3 = sch.split(loop=j, factors=[2, 4, 64, 2]) k_0, k_1 = sch.split(loop=k, factors=[32, 32]) t_0, t_1 = sch.split(loop=t, factors=[2, 512]) sch.reorder(i_0, j_0, i_1, j_1, k_0, i_2, j_2, k_1, i_3, j_3, t_0, t_1) @ms.derived_object class MyTaskScheduler(ms.task_scheduler.PyTaskScheduler): done: Set = set() def next_task_id(self) -> int: tasks = self._outer().tasks_ while len(self.done) != len(tasks): x = random.randint(0, len(tasks) - 1) task = tasks[x] if not task.is_terminated: """Calling base func via following route: Python side: PyTaskScheduler does not have `_touch_task` Call TaskScheduler's `touch_task`, which calls ffi C++ side: The ffi calls TaskScheduler's `touch_task` But it is overridden in PyTaskScheduler PyTaskScheduler checks if the function is overridden in python If not, it returns the TaskScheduler's vtable, calling TaskScheduler::TouchTask """ if task.runner_futures is not None: self.join_running_task(x) return x self.done.add(x) return -1 def test_meta_schedule_task_scheduler_single(): num_trials_per_iter = 3 max_trials_per_task = 10 database = ms.database.MemoryDatabase() round_robin = ms.task_scheduler.RoundRobin() round_robin.tune( [ ms.TuneContext( MatmulModule, target=tvm.target.Target("llvm"), space_generator=_schedule_matmul, search_strategy=ms.search_strategy.ReplayTrace(), task_name="Test", rand_state=42, ) ], [1.0], max_trials_global=num_trials_per_iter, max_trials_per_task=max_trials_per_task, num_trials_per_iter=64, builder=DummyBuilder(), runner=DummyRunner(), database=database, measure_callbacks=[ms.measure_callback.AddToDatabase()], cost_model=None, ) assert len(database) == max_trials_per_task def test_meta_schedule_task_scheduler_multiple(): num_trials_per_iter = 6 max_trials_per_task = 101 tasks = [ ms.TuneContext( MatmulModule, target=tvm.target.Target("llvm"), space_generator=_schedule_matmul, search_strategy=ms.search_strategy.ReplayTrace(), task_name="Matmul", rand_state=42, ), ms.TuneContext( MatmulReluModule, target=tvm.target.Target("llvm"), space_generator=_schedule_matmul, search_strategy=ms.search_strategy.ReplayTrace(), task_name="MatmulRelu", rand_state=0xDEADBEEF, ), ms.TuneContext( BatchMatmulModule, target=tvm.target.Target("llvm"), space_generator=_schedule_batch_matmul, search_strategy=ms.search_strategy.ReplayTrace(), task_name="BatchMatmul", rand_state=0x114514, ), ] database = ms.database.MemoryDatabase() round_robin = ms.task_scheduler.RoundRobin() round_robin.tune( tasks, [1.0, 1.0, 1.0], builder=DummyBuilder(), runner=DummyRunner(), database=database, measure_callbacks=[ms.measure_callback.AddToDatabase()], max_trials_global=max_trials_per_task * len(tasks), max_trials_per_task=max_trials_per_task, num_trials_per_iter=num_trials_per_iter, cost_model=None, ) assert len(database) == max_trials_per_task * len(tasks) for task in tasks: assert ( len( database.get_top_k( database.commit_workload(task.mod), 100000, ) ) == max_trials_per_task ) def test_meta_schedule_task_scheduler_NIE(): # pylint: disable=invalid-name @ms.derived_object class NIETaskScheduler(ms.task_scheduler.PyTaskScheduler): pass with pytest.raises(ValueError, match="next_task_id is not defined"): scheduler = NIETaskScheduler() scheduler.next_task_id() def test_meta_schedule_task_scheduler_avoid_cyclic(): # pylint: disable=invalid-name scheduler = MyTaskScheduler() test = weakref.ref(scheduler) # test if it can be destructed successfully del scheduler assert test() is None def test_meta_schedule_task_scheduler_override_next_task_id_only(): # pylint: disable=invalid-name max_trials_per_task = 101 tasks = [ ms.TuneContext( MatmulModule, target=tvm.target.Target("llvm"), space_generator=_schedule_matmul, search_strategy=ms.search_strategy.ReplayTrace(), task_name="Matmul", rand_state=42, ), ms.TuneContext( MatmulReluModule, target=tvm.target.Target("llvm"), space_generator=_schedule_matmul, search_strategy=ms.search_strategy.ReplayTrace(), task_name="MatmulRelu", rand_state=0xDEADBEEF, ), ms.TuneContext( BatchMatmulModule, target=tvm.target.Target("llvm"), space_generator=_schedule_batch_matmul, search_strategy=ms.search_strategy.ReplayTrace(), task_name="BatchMatmul", rand_state=0x114514, ), ] database = ms.database.MemoryDatabase() scheduler = MyTaskScheduler() scheduler.tune( tasks, task_weights=[1.0] * len(tasks), builder=DummyBuilder(), runner=DummyRunner(), database=database, measure_callbacks=[ms.measure_callback.AddToDatabase()], max_trials_global=max_trials_per_task * len(tasks), max_trials_per_task=max_trials_per_task, num_trials_per_iter=6, cost_model=None, ) assert len(database) == max_trials_per_task * len(tasks) for task in tasks: assert ( len( database.get_top_k( database.commit_workload(task.mod), 100000, ) ) == max_trials_per_task ) def test_meta_schedule_task_scheduler_multiple_gradient_based(): max_trials_per_task = 101 tasks = [ ms.TuneContext( MatmulModule, target=tvm.target.Target("llvm"), space_generator=_schedule_matmul, search_strategy=ms.search_strategy.ReplayTrace(), task_name="Matmul", rand_state=42, ), ms.TuneContext( MatmulReluModule, target=tvm.target.Target("llvm"), space_generator=_schedule_matmul, search_strategy=ms.search_strategy.ReplayTrace(), task_name="MatmulRelu", rand_state=0xDEADBEEF, ), ms.TuneContext( BatchMatmulModule, target=tvm.target.Target("llvm"), space_generator=_schedule_batch_matmul, search_strategy=ms.search_strategy.ReplayTrace(), task_name="BatchMatmul", rand_state=0x114514, ), ] database = ms.database.MemoryDatabase() gradient_based = ms.task_scheduler.GradientBased() gradient_based.tune( tasks, task_weights=[1.0, 1.0, 1.0], builder=DummyBuilder(), runner=DummyRunner(), database=database, measure_callbacks=[ms.measure_callback.AddToDatabase()], max_trials_global=max_trials_per_task * len(tasks), max_trials_per_task=max_trials_per_task, num_trials_per_iter=6, cost_model=None, ) assert len(database) == max_trials_per_task * len(tasks) for task in tasks: assert ( len(database.get_top_k(database.commit_workload(task.mod), 10000)) == max_trials_per_task ) def test_meta_schedule_task_scheduler_gradient_based_with_null_search_strategy(): """ When search strategy of one task returns empty list of candidates or None, the scheduler should continue working as normal for other tasks """ @ms.derived_object class NullSearchStrategy(ms.search_strategy.PySearchStrategy): def __init__(self, rounds_with_empty_candidates): self.rounds_with_empty_candidates = rounds_with_empty_candidates def _initialize_with_tune_context(self, context: "TuneContext") -> None: pass def pre_tuning(self, *args, **kwargs): pass def post_tuning(self): pass def generate_measure_candidates(self): """ Returns empty list to indicate there is no result from search, while the search isn't ended. """ if self.rounds_with_empty_candidates: self.rounds_with_empty_candidates -= 1 return [] return None def notify_runner_results(self, *args, **kwargs): pass def clone(self): return NullSearchStrategy(n=self.n) tasks = [ ms.TuneContext( MatmulModule, target=tvm.target.Target("llvm"), space_generator=_schedule_matmul, search_strategy=NullSearchStrategy(rounds_with_empty_candidates=5), task_name="Matmul", rand_state=42, ), ms.TuneContext( BatchMatmulModule, target=tvm.target.Target("llvm"), space_generator=_schedule_batch_matmul, search_strategy=NullSearchStrategy(rounds_with_empty_candidates=0), task_name="BatchMatmul", rand_state=0x114514, ), ms.TuneContext( MatmulReluModule, target=tvm.target.Target("llvm"), space_generator=_schedule_matmul, search_strategy=ms.search_strategy.ReplayTrace(), task_name="MatmulRelu", rand_state=0xDEADBEEF, ), ] database = ms.database.MemoryDatabase() gradient_based = ms.task_scheduler.GradientBased() gradient_based.tune( tasks, task_weights=[1.0, 1.0, 1.0], builder=DummyBuilder(), runner=DummyRunner(), database=database, measure_callbacks=[ms.measure_callback.AddToDatabase()], max_trials_global=30, max_trials_per_task=10, num_trials_per_iter=6, cost_model=None, ) assert len(database) == 10 assert len(database.get_top_k(database.commit_workload(MatmulModule), 100)) == 0 assert len(database.get_top_k(database.commit_workload(BatchMatmulModule), 100)) == 0 assert len(database.get_top_k(database.commit_workload(MatmulReluModule), 100)) == 10 if __name__ == "__main__": test_meta_schedule_task_scheduler_single() test_meta_schedule_task_scheduler_multiple() test_meta_schedule_task_scheduler_NIE() test_meta_schedule_task_scheduler_avoid_cyclic() test_meta_schedule_task_scheduler_override_next_task_id_only() test_meta_schedule_task_scheduler_multiple_gradient_based() test_meta_schedule_task_scheduler_gradient_based_with_null_search_strategy()
15,714
34.1566
99
py
tvm
tvm-main/tests/python/unittest/test_tir_schedule_decompose_padding.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import numpy as np import tvm import tvm.testing from tvm import tir from tvm.script import tir as T # pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg def check_decompose_padding(origin, scheduled, expected, check_run=False): tvm.ir.assert_structural_equal(scheduled, expected) if check_run: in_buffer = origin.buffer_map[origin.params[0]] out_buffer = origin.buffer_map[origin.params[1]] in_shape = [int(_) for _ in in_buffer.shape] out_shape = [int(_) for _ in out_buffer.shape] x = tvm.nd.array(np.random.uniform(0, 64, in_shape).astype(in_buffer.dtype)) y0 = tvm.nd.array(np.zeros(out_shape).astype(out_buffer.dtype)) y1 = tvm.nd.array(np.zeros(out_shape).astype(out_buffer.dtype)) f_origin = tvm.build(origin) f_scheduled = tvm.build(scheduled) f_origin(x, y0) f_scheduled(x, y1) tvm.testing.assert_allclose(y0.numpy(), y1.numpy()) def test_int64_indices_batch_decompose_padding(): @T.prim_func def before_decompose( x: T.Buffer((T.int64(1), T.int64(128), T.int64(128)), "int32"), y: T.Buffer((T.int64(1), T.int64(140), T.int64(128)), "int32"), ): for b, i, j in T.grid(T.int64(1), T.int64(140), T.int64(128)): with T.block("block"): vb, vi, vj = T.axis.remap("SSS", [b, i, j]) y[vb, vi, vj] = T.if_then_else(vi < T.int64(128), x[vb, vi, vj], 0) @T.prim_func def after_decompose( x: T.Buffer((T.int64(1), T.int64(128), T.int64(128)), "int32"), y: T.Buffer((T.int64(1), T.int64(140), T.int64(128)), "int32"), ): # with T.block("root"): for b, i in T.grid(T.int64(1), T.int64(140)): for j in range(T.int64(128)): with T.block("block_pad_const"): vb = T.axis.spatial(T.int64(1), T.int64(0)) vi, vj = T.axis.remap("SS", [i, j]) T.reads() T.writes(y[vb, vi, vj]) y[vb, vi, vj] = 0 for j in range(T.int64(128)): with T.block("block"): vb = T.axis.spatial(T.int64(1), T.int64(0)) vi = T.axis.spatial(T.int64(128), i) vj = T.axis.spatial(T.int64(128), j) T.where(i < T.int64(128)) T.reads(x[vb, vi, vj]) T.writes(y[vb, vi, vj]) y[vb, vi, vj] = x[vb, vi, vj] sch = tir.Schedule(before_decompose, debug_mask="all") block = sch.get_block("block") sch.decompose_padding(block, sch.get_loops(block)[2]) check_decompose_padding(before_decompose, sch.mod["main"], after_decompose, check_run=False) def test_1d_decompose_padding(): @T.prim_func def before_decompose(x: T.Buffer(128, "int32"), y: T.Buffer(140, "int32")): for i in range(140): with T.block("block"): vi = T.axis.remap("S", [i]) y[vi] = T.if_then_else(vi >= 6 and vi < 134, x[vi - 6], 0, dtype="int32") @T.prim_func def after_decompose(x: T.Buffer(128, "int32"), y: T.Buffer(140, "int32")): for i in T.serial(140): with T.block("block_pad_const"): vi = T.axis.spatial(140, i) T.reads() T.writes(y[vi]) y[vi] = 0 for i in T.serial(128): with T.block("block"): vi = T.axis.spatial(128, i) T.reads(x[vi]) T.writes(y[vi + 6]) y[vi + 6] = x[vi] sch = tir.Schedule(before_decompose, debug_mask="all") block = sch.get_block("block") sch.decompose_padding(block, sch.get_loops(block)[0]) check_decompose_padding(before_decompose, sch.mod["main"], after_decompose, check_run=False) @T.prim_func def sum_pool_2d( x: T.Buffer((1, 16, 225, 225), "int8"), tensor: T.Buffer((1, 16, 225, 225), "int8") ): pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8") for i0, i1, i2, i3 in T.grid(1, 16, 231, 231): with T.block("pad_temp"): ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) pad_temp[ax0, ax1, ax2, ax3] = T.if_then_else( 3 <= ax2 and ax2 < 228 and 3 <= ax3 and ax3 < 228, x[ax0, ax1, ax2 - 3, ax3 - 3], T.int8(0), dtype="int8", ) for i0, i1, i2, i3, i4, i5 in T.grid(1, 16, 225, 225, 7, 7): with T.block("tensor"): ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5]) with T.init(): tensor[ax0, ax1, ax2, ax3] = T.int8(0) tensor[ax0, ax1, ax2, ax3] = ( tensor[ax0, ax1, ax2, ax3] + pad_temp[ax0, ax1, ax2 + rv0, ax3 + rv1] ) def test_decompose_hw_padding_direct(): """Case 0. direct decompose""" @T.prim_func def pooling_decompose_0( x: T.Buffer((1, 16, 225, 225), "int8"), tensor: T.Buffer((1, 16, 225, 225), "int8") ): pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8") for i0, i1, i2, i3 in T.grid(1, 16, 231, 231): with T.block("pad_temp_pad_const"): ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) pad_temp[ax0, ax1, ax2, ax3] = T.int8(0) for i0, i1, i2, i3 in T.grid(1, 16, 225, 225): with T.block("pad_temp"): ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) pad_temp[ax0, ax1, ax2 + 3, ax3 + 3] = x[ax0, ax1, ax2, ax3] for i0, i1, i2, i3, i4, i5 in T.grid(1, 16, 225, 225, 7, 7): with T.block("tensor"): ax0, ax1, ax2, ax3, rv0, rv1 = T.axis.remap("SSSSRR", [i0, i1, i2, i3, i4, i5]) with T.init(): tensor[ax0, ax1, ax2, ax3] = T.int8(0) tensor[ax0, ax1, ax2, ax3] = ( tensor[ax0, ax1, ax2, ax3] + pad_temp[ax0, ax1, ax2 + rv0, ax3 + rv1] ) sch = tir.Schedule(sum_pool_2d, debug_mask="all") pad = sch.get_block("pad_temp") sch.decompose_padding(pad, sch.get_loops(pad)[0]) check_decompose_padding(sum_pool_2d, sch.mod["main"], pooling_decompose_0, check_run=True) def test_decompose_hw_padding_tiled(): """Case 1. tiling and then decompose""" @T.prim_func def pooling_decompose_1( x: T.Buffer((1, 16, 225, 225), "int8"), tensor: T.Buffer((1, 16, 225, 225), "int8") ) -> None: pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8") for i0, i2_0, i3_0 in T.grid(1, 3, 3): for ax0, ax1, ax2 in T.grid(16, 81, 81): with T.block("pad_temp_pad_const"): ax0_1 = T.axis.spatial(1, 0) ax1_1 = T.axis.spatial(16, ax0) ax2_1 = T.axis.spatial(231, i2_0 * 75 + ax1) ax3 = T.axis.spatial(231, i3_0 * 75 + ax2) T.reads() T.writes(pad_temp[ax0_1, ax1_1, ax2_1, ax3]) pad_temp[ax0_1, ax1_1, ax2_1, ax3] = T.int8(0) for ax0, ax1, ax2 in T.grid(16, 81, 81): with T.block("pad_temp"): ax0_2 = T.axis.spatial(1, 0) ax1_2 = T.axis.spatial(16, ax0) ax2_2 = T.axis.spatial(225, i2_0 * 75 + ax1 - 3) ax3 = T.axis.spatial(225, i3_0 * 75 + ax2 - 3) T.where( 3 <= i2_0 * 75 + ax1 and i2_0 * 75 + ax1 < 228 and 3 <= i3_0 * 75 + ax2 and i3_0 * 75 + ax2 < 228 ) T.reads(x[ax0_2, ax1_2, ax2_2, ax3]) T.writes(pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3]) pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3] = x[ax0_2, ax1_2, ax2_2, ax3] for i1, i2_1, i3_1, i4, i5 in T.grid(16, 75, 75, 7, 7): with T.block("tensor"): ax0_3, ax1_3 = T.axis.remap("SS", [i0, i1]) ax2_3 = T.axis.spatial(225, i2_0 * 75 + i2_1) ax3 = T.axis.spatial(225, i3_0 * 75 + i3_1) rv0, rv1 = T.axis.remap("RR", [i4, i5]) T.reads(pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1]) T.writes(tensor[ax0_3, ax1_3, ax2_3, ax3]) with T.init(): tensor[ax0_3, ax1_3, ax2_3, ax3] = T.int8(0) tensor[ax0_3, ax1_3, ax2_3, ax3] = ( tensor[ax0_3, ax1_3, ax2_3, ax3] + pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1] ) sch = tir.Schedule(sum_pool_2d, debug_mask="all") block = sch.get_block("tensor") pad = sch.get_block("pad_temp") n, c, h, w, kh, kw = sch.get_loops(block) ho, hi = sch.split(h, [3, 75]) wo, wi = sch.split(w, [3, 75]) sch.reorder(n, ho, wo, c, hi, wi, kh, kw) sch.compute_at(sch.get_block("pad_temp"), wo) sch.decompose_padding(pad, sch.get_loops(pad)[3]) check_decompose_padding(sum_pool_2d, sch.mod["main"], pooling_decompose_1, check_run=True) def test_decompose_hw_padding_tiled_and_lift_pad(): """Case 2. tiling and then decompose, lift const pad values to outer loop""" @T.prim_func def pooling_decompose_2( x: T.Buffer((1, 16, 225, 225), "int8"), tensor: T.Buffer((1, 16, 225, 225), "int8") ) -> None: pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8") for i0, i2_0, i3_0, ax0, ax1, ax2 in T.grid(1, 3, 3, 16, 81, 81): with T.block("pad_temp_pad_const"): ax0_1 = T.axis.spatial(1, 0) ax1_1 = T.axis.spatial(16, ax0) ax2_1 = T.axis.spatial(231, i2_0 * 75 + ax1) ax3 = T.axis.spatial(231, i3_0 * 75 + ax2) T.reads() T.writes(pad_temp[ax0_1, ax1_1, ax2_1, ax3]) pad_temp[ax0_1, ax1_1, ax2_1, ax3] = T.int8(0) for i0, i2_0, i3_0 in T.grid(1, 3, 3): for ax0, ax1, ax2 in T.grid(16, 81, 81): with T.block("pad_temp"): ax0_2 = T.axis.spatial(1, 0) ax1_2 = T.axis.spatial(16, ax0) ax2_2 = T.axis.spatial(225, i2_0 * 75 + ax1 - 3) ax3 = T.axis.spatial(225, i3_0 * 75 + ax2 - 3) T.where( 3 <= i2_0 * 75 + ax1 and i2_0 * 75 + ax1 < 228 and 3 <= i3_0 * 75 + ax2 and i3_0 * 75 + ax2 < 228 ) T.reads(x[ax0_2, ax1_2, ax2_2, ax3]) T.writes(pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3]) pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3] = x[ax0_2, ax1_2, ax2_2, ax3] for i1, i2_1, i3_1, i4, i5 in T.grid(16, 75, 75, 7, 7): with T.block("tensor"): ax0_3, ax1_3 = T.axis.remap("SS", [i0, i1]) ax2_3 = T.axis.spatial(225, i2_0 * 75 + i2_1) ax3 = T.axis.spatial(225, i3_0 * 75 + i3_1) rv0, rv1 = T.axis.remap("RR", [i4, i5]) T.reads(pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1]) T.writes(tensor[ax0_3, ax1_3, ax2_3, ax3]) with T.init(): tensor[ax0_3, ax1_3, ax2_3, ax3] = T.int8(0) tensor[ax0_3, ax1_3, ax2_3, ax3] = ( tensor[ax0_3, ax1_3, ax2_3, ax3] + pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1] ) sch = tir.Schedule(sum_pool_2d, debug_mask="all") block = sch.get_block("tensor") pad = sch.get_block("pad_temp") n, c, h, w, kh, kw = sch.get_loops(block) ho, hi = sch.split(h, [3, 75]) wo, wi = sch.split(w, [3, 75]) sch.reorder(n, ho, wo, c, hi, wi, kh, kw) sch.compute_at(sch.get_block("pad_temp"), wo) sch.decompose_padding(pad, sch.get_loops(pad)[0]) check_decompose_padding(sum_pool_2d, sch.mod["main"], pooling_decompose_2, check_run=True) def test_decompose_hw_padding_non_perfect_tiled(): """Case 3. non-perfect tiling and then decompose""" @T.prim_func def pooling_decompose_3( x: T.Buffer((1, 16, 225, 225), "int8"), tensor: T.Buffer((1, 16, 225, 225), "int8") ) -> None: pad_temp = T.alloc_buffer([1, 16, 231, 231], dtype="int8") for i0, i2_0, i3_0 in T.grid(1, 3, 3): for ax0, ax1, ax2 in T.grid(16, 86, 86): with T.block("pad_temp_pad_const"): ax0_1 = T.axis.spatial(1, 0) ax1_1 = T.axis.spatial(16, ax0) ax2_1 = T.axis.spatial(231, i2_0 * 80 + ax1) ax3 = T.axis.spatial(231, i3_0 * 80 + ax2) T.where(i2_0 * 80 + ax1 < 231 and i3_0 * 80 + ax2 < 231) T.reads() T.writes(pad_temp[ax0_1, ax1_1, ax2_1, ax3]) pad_temp[ax0_1, ax1_1, ax2_1, ax3] = T.int8(0) for ax0, ax1, ax2 in T.grid(16, 86, 86): with T.block("pad_temp"): ax0_2 = T.axis.spatial(1, 0) ax1_2 = T.axis.spatial(16, ax0) ax2_2 = T.axis.spatial(225, i2_0 * 80 + ax1 - 3) ax3 = T.axis.spatial(225, i3_0 * 80 + ax2 - 3) T.where( 3 <= i2_0 * 80 + ax1 and i2_0 * 80 + ax1 < 228 and 3 <= i3_0 * 80 + ax2 and i3_0 * 80 + ax2 < 228 and i2_0 * 80 + ax1 < 231 and i3_0 * 80 + ax2 < 231 ) T.reads(x[ax0_2, ax1_2, ax2_2, ax3]) T.writes(pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3]) pad_temp[ax0_2, ax1_2, ax2_2 + 3, ax3 + 3] = x[ax0_2, ax1_2, ax2_2, ax3] for i1, i2_1, i3_1, i4, i5 in T.grid(16, 80, 80, 7, 7): with T.block("tensor"): ax0_3, ax1_3 = T.axis.remap("SS", [i0, i1]) ax2_3 = T.axis.spatial(225, i2_0 * 80 + i2_1) ax3 = T.axis.spatial(225, i3_0 * 80 + i3_1) rv0, rv1 = T.axis.remap("RR", [i4, i5]) T.where(i2_0 * 80 + i2_1 < 225 and i3_0 * 80 + i3_1 < 225) T.reads(pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1]) T.writes(tensor[ax0_3, ax1_3, ax2_3, ax3]) with T.init(): tensor[ax0_3, ax1_3, ax2_3, ax3] = T.int8(0) tensor[ax0_3, ax1_3, ax2_3, ax3] = ( tensor[ax0_3, ax1_3, ax2_3, ax3] + pad_temp[ax0_3, ax1_3, ax2_3 + rv0, ax3 + rv1] ) sch = tir.Schedule(sum_pool_2d, debug_mask="all") block = sch.get_block("tensor") pad = sch.get_block("pad_temp") n, c, h, w, kh, kw = sch.get_loops(block) ho, hi = sch.split(h, [None, 80]) wo, wi = sch.split(w, [None, 80]) sch.reorder(n, ho, wo, c, hi, wi, kh, kw) sch.compute_at(sch.get_block("pad_temp"), wo) sch.decompose_padding(pad, sch.get_loops(pad)[3]) check_decompose_padding(sum_pool_2d, sch.mod["main"], pooling_decompose_3, check_run=True) def test_decompose_wrt_single_child_subtree(): """Test the case when the decompose position is under the single child subtree""" @T.prim_func def pad_op( x: T.Buffer((1, 16, 225, 225), "int8"), y: T.Buffer((1, 16, 231, 231), dtype="int8"), ): for i0, i1, i2, i3 in T.grid(1, 16, 231, 231): with T.block("pad_temp"): ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) y[ax0, ax1, ax2, ax3] = T.if_then_else( 3 <= ax2 and ax2 < 228 and 3 <= ax3 and ax3 < 228, x[ax0, ax1, ax2 - 3, ax3 - 3], T.int8(0), dtype="int8", ) @T.prim_func def pad_op_after( x: T.Buffer((1, 16, 225, 225), "int8"), y: T.Buffer((1, 16, 231, 231), "int8") ): for i0, i1 in T.grid(1, 16): for i2, i3 in T.grid(231, 231): with T.block("pad_temp_pad_const"): ax0 = T.axis.spatial(1, 0) ax1, ax2, ax3 = T.axis.remap("SSS", [i1, i2, i3]) y[ax0, ax1, ax2, ax3] = T.int8(0) for i2, i3 in T.grid(225, 225): with T.block("pad_temp"): ax0 = T.axis.spatial(1, 0) ax1, ax2, ax3 = T.axis.remap("SSS", [i1, i2, i3]) y[ax0, ax1, ax2 + 3, ax3 + 3] = x[ax0, ax1, ax2, ax3] sch = tir.Schedule(pad_op, debug_mask="all") pad = sch.get_block("pad_temp") _, _, h, _ = sch.get_loops(pad) sch.decompose_padding(pad, h) check_decompose_padding(pad_op, sch.mod["main"], pad_op_after, check_run=True) def test_not_to_decompose_trivial_predicate(): """Test the case when the padding condition is trivial""" @T.prim_func def trivial_pad( x: T.Buffer((1, 16, 225, 225), "int8"), y: T.Buffer([1, 16, 225, 225], dtype="int8") ): for i0, i1, i2, i3 in T.grid(1, 16, 225, 225): with T.block("pad_temp"): ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) y[ax0, ax1, ax2, ax3] = T.if_then_else( 0 <= ax2 and ax2 < 225 and 0 <= ax3 and ax3 < 225, x[ax0, ax1, ax2, ax3], T.int8(0), dtype="int8", ) sch = tir.Schedule(trivial_pad, debug_mask="all") pad = sch.get_block("pad_temp") _, _, h, _ = sch.get_loops(pad) assert not sch.can_decompose_padding(pad, h) if __name__ == "__main__": tvm.testing.main()
18,913
44.140811
96
py
tvm
tvm-main/tests/python/unittest/test_tvmscript_error_report.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import inspect import re import pytest import tvm import tvm.testing from tvm import tir from tvm.ir.diagnostics import override_renderer from tvm.script import from_source from tvm.script import tir as T def check_error(func, rel_lineno): check_error_re = re.compile(r"^.*# check_error: (.+)$") """check if TIR script throws error""" # Override the default renderer to accumulate errors errors = [] def render(e): for d in e.diagnostics: errors.append(d) override_renderer(render) # The diagnostic context throws an exception when it gets an error try: source_code = inspect.getsource(func) indent = len(re.match(r"^\s*", source_code).group(0)) source_code = "@T.prim_func\n" + "\n".join( line[indent:] for line in source_code.splitlines() ) from_source(source_code) except tvm.error.DiagnosticError as e: pass assert len(errors) == 1, errors if rel_lineno is None: return error = errors[0] assert ( error.span.line - 1 == rel_lineno or error.span.line == rel_lineno ), f"Expected error to be on line {rel_lineno}, but it was on {error.span.line - 1}" error_line = source_code.split("\n")[rel_lineno] m = check_error_re.match(error_line) if m: expected_error_text = m.group(1) error = error.message assert ( expected_error_text == error ), f'check_error expects "{expected_error_text} in str(errors): {error}' def test_buffer_bind(): def buffer_bind_missing_args(a: T.handle) -> None: A = T.match_buffer((16, 16), "float32") # error check_error(buffer_bind_missing_args, 2) def test_undefined_buffer(): def undefined_buffer(a: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") T.attr(A, "realize_scope", "") T.realize(C[0:16, 0:16], "") # error for i in T.serial(16): for j in T.serial(0, 16): A[i, j] = 0.0 check_error(undefined_buffer, 5) def test_unsupported_stmt(): def unsupported_stmt(a: T.int32) -> None: if a > 0: print("I love tvm") # error check_error(unsupported_stmt, 3) def test_unsupported_function_call(): def unsupported_function_call(a: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") T.attr(A, "realize_scope", "") T.realize(A[0:16, 0:16], "") for i in T.const_range(16): # error for j in T.serial(0, 16): A[i, j] = 0.0 check_error(unsupported_function_call, 6) def test_missing_type_annotation(): def missing_type_annotation(a) -> None: # error T.evaluate(0.0) check_error(missing_type_annotation, 1) def test_invalid_for_function(): def invalid_for_function(a: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") for i in T.evaluate(0.0): # error for j in T.serial(0, 16): A[i, j] = 0.0 check_error(invalid_for_function, 4) def test_invalid_block_function(): def invalid_block_function(a: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") with T.evaluate(0.0): # error T.evaluate(1.0) check_error(invalid_block_function, 4) def test_return_not_allowed(): def return_not_allowed(a: T.handle) -> None: return T.evaluate(0) # error check_error(return_not_allowed, 2) def test_no_body(): def no_body(a: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") T.realize(A, "") # error check_error(no_body, 3) def test_allocate_with_buffers(): def allocate_with_buffers() -> None: with T.allocate([1], "float32", "") as [A, B]: # error T.evaluate(1.0) check_error(allocate_with_buffers, 2) def test_inconsistent_binding(): def inconsistent_binding_value() -> None: for i, j in T.grid(16, 16): vi, vj = T.axis.remap("SS", [i]) # error T.evaluate(1.0) def inconsistent_binding_type() -> None: for i, j in T.grid(16, 16): vi, vj = T.axis.remap("S", [i, j]) # error T.evaluate(1.0) check_error(inconsistent_binding_value, 3) check_error(inconsistent_binding_type, 3) def test_error_remap_args(): def error_remap_type() -> None: for i, j in T.grid(16, 16): with T.block(): vi, vj = T.axis.remap("TT", [i, j]) # error T.evaluate(1.0) def error_remap_value() -> None: for i, j in T.grid(16, 16): with T.block(): vi, vj = T.axis.remap("SS", [i + j, j]) # error T.evaluate(1.0) check_error(error_remap_type, 4) check_error(error_remap_value, 4) def test_invalid_block_axes(): def invalid_block_axes(a: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") for i, j in T.grid(16, 16): with T.block(): vi = T.axis.S(i, A) # error T.evaluate(1.0) check_error(invalid_block_axes, 5) def test_duplicate_block_axes(): def duplicate_block_axes() -> None: for i, j in T.grid(16, 16): with T.block(): vi = T.axis.S(16, i) vi = T.axis.S(16, j) # error T.evaluate(1.0) def duplicate_block_axes_remap() -> None: for i, j in T.grid(16, 16): with T.block(): vi, vi = T.axis.remap("SS", [i, j]) # error T.evaluate(1.0) check_error(duplicate_block_axes, 5) check_error(duplicate_block_axes_remap, 4) def test_miss_block_bind(): def miss_block_bind_value() -> None: for i, j in T.grid(128, 128): with T.block(): vi = T.axis.S(i) # error T.evaluate(1.0) check_error(miss_block_bind_value, 4) def test_invalid_loop_var(): def invalid_loop_var() -> None: for i, j in range(0, 16): # error T.evaluate(1.0) check_error(invalid_loop_var, 2) def test_inconsistent_grid(): def inconsistent_grid() -> None: for i in T.grid(16, 16): # error T.evaluate(1.0) check_error(inconsistent_grid, 2) def test_invalid_match_buffer_region(): def invalid_match_buffer_region() -> None: for i, j in T.grid(128, 128): with T.block(): vi, vj = T.axis.remap("SS", [i, j]) A = T.match_buffer(vi) # error T.evaluate(1.0) check_error(invalid_match_buffer_region, 5) def test_duplicate_buffer(): def duplicate_buffer() -> None: A = T.alloc_buffer((128, 128), "float32") A = T.alloc_buffer((128, 128), "float32") # error check_error(duplicate_buffer, 3) def test_duplicate_block_signature(): def duplicate_reads() -> None: A = T.alloc_buffer((128, 128), "float32") for i, j in T.grid(128, 128): with T.block(): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A[0:8, 0:8]) T.reads(A[0:16, 0:16]) # error T.evaluate(1.0) def duplicate_writes() -> None: A = T.alloc_buffer((128, 128), "float32") for i, j in T.grid(128, 128): with T.block(): vi, vj = T.axis.remap("SS", [i, j]) T.writes(A[0:8, 0:8]) T.writes(A[0:16, 0:16]) # error T.evaluate(1.0) def duplicate_predicate() -> None: for i, j in T.grid(16, 16): with T.block(): vi, vj = T.axis.remap("SS", [i, j]) T.where(1) T.where(0) # error def duplicate_annotations() -> None: for i, j in T.grid(16, 16): with T.block(): vi, vj = T.axis.remap("SS", [i, j]) T.block_attr({}) T.block_attr({}) # error def duplicate_init() -> None: for i, j in T.grid(16, 16): with T.block(): vi, vj = T.axis.remap("SS", [i, j]) with T.init(): T.evaluate(1.0) with T.init(): # error T.evaluate(1.0) def duplicate_axes() -> None: for i, j in T.grid(16, 16): with T.block(): vi, vj = T.axis.remap("SS", [i, j]) vi = T.axis.S(i, 16) # error T.evaluate(1.0) check_error(duplicate_reads, 7) check_error(duplicate_writes, 7) check_error(duplicate_predicate, 6) check_error(duplicate_annotations, 6) check_error(duplicate_init, 7) check_error(duplicate_axes, 5) def test_opaque_access_during_complete(): def opaque_access_during_complete(a: T.handle) -> None: # error A = T.match_buffer(a, (16, 16), "float32") for i, j in T.grid(16, 16): with T.block(): T.evaluate(T.call_extern("dummy_extern_function", A.data, dtype="int32")) check_error(opaque_access_during_complete, None) def test_convert_slice_to_bufferload(): def convert_slice_to_bufferload() -> None: A = T.alloc_buffer((128, 128), "float32") for i, j in T.grid(128, 128): with T.block(): vi, vj = T.axis.remap("SS", [i, j]) A[vi, vj] = A[vi : vi + 2, vj] + 1 # error check_error(convert_slice_to_bufferload, 6) def test_tvm_exception_catch(): def special_stmt_except() -> None: A = T.alloc_buffer("(128, 128)", "float32") # error T.evaluate(1.0) def scope_handler_except() -> None: for i in T.serial("1", "1"): # error T.evaluate(1) def intrin_except_unassign(a: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") T.evaluate(A) # error def intrin_except_assign(a: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") A[0, 0] = A[A] # error check_error(special_stmt_except, 2) check_error(scope_handler_except, 2) check_error(intrin_except_unassign, 3) check_error(intrin_except_assign, 3) def test_match_buffer_shape_mismatch(): def buffer_shape_mismatch(a: T.handle) -> None: A = T.match_buffer(a, (8, 8)) for i, j in T.grid(8, 2): with T.block(): T.reads([]) T.writes([A[i, j * 4 : j * 4 + 4]]) sub_A = T.match_buffer( A[i, j * 4 : j * 4 + 4], (5) ) # error: shape mismatched between 4 and 5 for jj in range(0, 4): sub_A[i, j * 4 + jj] = 1 check_error(buffer_shape_mismatch, 7) def test_high_dim_store(): def high_dim_store() -> None: with T.block("root"): B = T.allocate([256], "float32", "global") for i, j in T.grid(16, 16): B[i, j] = 1.0 # error: Store is only allowed with one index check_error(high_dim_store, 5) def test_block_has_option_vars(): def block_has_option_vars() -> None: with T.block("root") as x: # error: block does not support option_vars T.evaluate(0.0) check_error(block_has_option_vars, 2) def test_implicit_root_has_attrs(): def implicit_root_has_read(): T.reads([]) # error: implicit root does not support reads T.evaluate(0.0) def implicit_root_has_write(): T.writes([]) # error: implicit root does not support writes T.evaluate(0.0) def implicit_root_has_attrs(): T.block_attr({}) # error: implicit root does not support block_attr T.evaluate(0.0) def implicit_root_has_predicate(): T.where(True) # error: implicit root does not support predicate T.evaluate(0.0) def implicit_root_has_axes(): v = T.axis.S(0, 0) # error: implicit root does not support axis define T.evaluate(0.0) check_error(implicit_root_has_read, 2) check_error(implicit_root_has_write, 2) check_error(implicit_root_has_attrs, 2) check_error(implicit_root_has_predicate, 2) check_error(implicit_root_has_axes, 2) @T.prim_func def elementwise_not_affine(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128, 128)) B = T.match_buffer(b, (128, 128, 128, 128)) for i, j, k, l in T.grid(128, 128, 128, 8): with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) vl = T.axis.S(128, l * 16) B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0 @T.prim_func def elementwise_non_single_branch(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) C = T.alloc_buffer((128, 128, 128)) B = T.match_buffer(b, (128, 128, 128)) for i, j in T.grid(128, 128): for k in T.serial(0, 128): with T.block("C"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) C[vi, vj, vk] = A[vi, vj, vk] * 2.0 for k in T.serial(0, 128): with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) B[vi, vj, vk] = C[vi, vj, vk] * 2.0 def test_reorder_fail_block(): sch = tir.Schedule(elementwise_not_affine, debug_mask="all") block_b = sch.get_block("B") i, j, k, l = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError) as execinfo: sch.reorder(l, i) expected_sub_error_message = ( " # tir.Block#0\n" ' with T.block("B"):\n' " ^^^^^^^^^^^^^^^^^^\n" ) assert expected_sub_error_message in str(execinfo.value) def test_reorder_fail_nested_loop_inner(): sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all") block_b = sch.get_block("B") i, j, k = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError) as execinfo: sch.reorder(k, i) expected_sub_error_message = ( " for i in range(128):\n" " # tir.For#0\n" " for j in range(128):\n" " ^^^^^^^^^^^^^^^^^^^^\n" ) assert expected_sub_error_message in str(execinfo.value) def test_fuse_fail_nested_loop_outer(): sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all") block_b = sch.get_block("B") i, j, k = sch.get_loops(block_b) with pytest.raises(tvm.tir.ScheduleError) as execinfo: sch.fuse(k, i) expected_sub_error_message = ( " # tir.For#1\n" " for i in range(128):\n" " ^^^^^^^^^^^^^^^^^^^^\n" " for j in range(128):\n" ) assert expected_sub_error_message in str(execinfo.value) def test_report_error_root_block(): sch = tir.Schedule(elementwise_non_single_branch, debug_mask="all") root = sch.get_block("root") with pytest.raises(tvm.tir.ScheduleError) as execinfo: sch.compute_inline(root) expected_sub_error_message = ( " # tir.Block#0\n" ' with T.block("root"):\n' " ^^^^^^^^^^^^^^^^^^^^^\n" ) assert expected_sub_error_message in str(execinfo.value) def test_load_var(): def load_var_multiple() -> None: d = T.float32() d[2] = d[2, 1] # error cannot provide two indices to load check_error(load_var_multiple, 3) def test_store_var(): def store_var_multiple() -> None: d = T.float32() d[2, 1] = d[1] # error cannot provide two indices to store check_error(store_var_multiple, 3) def test_load_handle(): def load_handle(h: T.handle) -> None: h_ = T.match_buffer(h, [1]) h_[0] = h[0] # error cannot load from handle check_error(load_handle, 3) def test_store_handle(): def store_handle(h: T.handle) -> None: h_ = T.match_buffer(h, [1]) h[0] = h_[0] # error cannot store to handle check_error(store_handle, 3) def test_binop_bad_ast_type(): def binop_bad_ast_type(h: T.handle): h_ = T.match_buffer(h, [1]) h_[0] = h + [2] # error rhs should be a primexpr check_error(binop_bad_ast_type, 3) def test_binop_bad_type(): def binop_bad_type(h: T.handle): h_ = T.match_buffer(h, [1]) h_[0] = h + 2 # error lhs and rhs should be the same type check_error(binop_bad_type, 3) def test_non_integer_typed_block_iter(): def non_integer_typed_block_iter(): with T.block(): i = T.axis.S(0.1, 0.1) # error IterVar requires an integer dtype check_error(non_integer_typed_block_iter, 3) def test_illegal_buffer_slice(): def strided_buffer_region(A: T.handle): # do not allow stride in buffer region A = T.match_buffer((128, 128), "int32") with T.block(): T.reads([]) T.writes([A[0:128:2, 0:128:3]]) # error T.evaluate(T.call_extern("strided_compute", dtype="")) def access_reversed_slice(A: T.handle): # do not allow reversed slice step A = T.match_buffer((128,), "int32") A[0:128:-1] = T.broadcast(1, 128) # error def access_non_const_slice_length(A: T.handle): # do not allow non-constant slice length A = T.match_buffer((128,), "int32") for i in range(4): T.evaluate(A[0:i:1]) # error check_error(strided_buffer_region, 3) check_error(access_reversed_slice, 3) check_error(access_non_const_slice_length, 3) def test_syntax_sugar_fail(): def loop_syntax_sugar_fail(a: T.handle) -> None: A = T.match_buffer(a, (128,)) for i in T.thread_binding(128, 128): A[i] = A[i] * 2.0 check_error(loop_syntax_sugar_fail, 3) if __name__ == "__main__": tvm.testing.main()
18,638
29.859272
89
py
tvm
tvm-main/tests/python/unittest/test_target_codegen_extern.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te import numpy as np import tvm.testing @tvm.testing.uses_gpu def test_add_pipeline(): nn = 64 max_threads = 4 n = tvm.runtime.convert(nn) A = te.placeholder((n,), name="A") def extern_generator(ins, outs): """Manually write the IR for the extern function, add pipeline""" ib = tvm.tir.ir_builder.create() with ib.for_range(0, (n + 1) // 2) as i: ib.emit( outs[0].vstore( i * 2, ins[0].vload(i * 2, "float32x2") + tvm.tir.const(1, "float32x2") ) ) return ib.get() def extern_generator_gpu(ins, outs): """Manually write the IR for the extern function, add pipeline""" ib = tvm.tir.ir_builder.create() bx = te.thread_axis("blockIdx.x") tx = te.thread_axis("threadIdx.x") ib.scope_attr(bx, "thread_extent", (nn + max_threads - 1) // max_threads) ib.scope_attr(tx, "thread_extent", max_threads) idx = bx.var * max_threads + tx.var with ib.if_scope(ib.likely(idx < n)): ib.emit( outs[0].vstore( idx * 2, ins[0].vload(idx * 2, "float32x2") + tvm.tir.const(1, "float32x2") ) ) return ib.get() C_cpu = te.extern(A.shape, [A], extern_generator, name="C") C_gpu = te.extern(A.shape, [A], extern_generator_gpu, name="C") s_cpu = te.create_schedule(C_cpu.op) s_gpu = te.create_schedule(C_gpu.op) print(tvm.lower(s_cpu, [A, C_cpu], simple_mode=True)) print(tvm.lower(s_gpu, [A, C_gpu], simple_mode=True)) def check_target(target): if not tvm.testing.device_enabled(target): return s = s_gpu if target in ["opencl", "cuda"] else s_cpu C = C_gpu if target in ["opencl", "cuda"] else C_cpu # build and invoke the kernel. f = tvm.build(s, [A, C], target) dev = tvm.device(target, 0) # launch the kernel. n = nn a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) f(a, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1) check_target("llvm") check_target("opencl") check_target("cuda") def test_pack_buffer_simple(): nn = 1024 n = tvm.runtime.convert(nn) A = te.placeholder((n,), name="A") def extern_generator(ins, outs): """Manually write the IR for the extern function, add pipeline.""" return tvm.tir.call_packed("my_extern_array_func1", ins[0], outs[0]) C = te.extern(A.shape, [A], extern_generator, name="C") s = te.create_schedule(C.op) @tvm.register_func def my_extern_array_func1(aa, bb): aa.copyto(bb) def check_target(target): if not tvm.testing.device_enabled(target): return # build and invoke the kernel. f = tvm.build(s, [A, C], target) dev = tvm.cpu(0) # launch the kernel. n = nn a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) f(a, c) tvm.testing.assert_allclose(c.numpy(), a.numpy()) check_target("stackvm") check_target("llvm") def test_pack_buffer_intermediate(): nn = 1024 n = tvm.runtime.convert(nn) A = te.placeholder((n,), name="A") B = te.compute((n,), lambda i: A[i] + 1, name="B") def extern_generator(ins, outs): """Manually write the IR for the extern function, add pipeline.""" return tvm.tir.call_packed("my_extern_array_func2", ins[0], outs[0]) C = te.extern(B.shape, [B], extern_generator, name="C") s = te.create_schedule(C.op) def check_target(target): if not tvm.testing.device_enabled(target): return # build and invoke the kernel. f = tvm.build(s, [A, C], target) dev = tvm.cpu(0) # launch the kernel. n = nn a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) @tvm.register_func def my_extern_array_func2(aa, bb): assert aa.shape == a.shape tvm.testing.assert_allclose(aa.numpy(), a.numpy() + 1) aa.copyto(bb) f(a, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1) check_target("llvm") if __name__ == "__main__": test_pack_buffer_simple() test_pack_buffer_intermediate() test_add_pipeline()
5,352
32.879747
95
py
tvm
tvm-main/tests/python/unittest/test_te_schedule_ops.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import tvm from tvm import te from tvm.driver.build_module import schedule_to_module def test_const(): x = tvm.te.const(1, "int32") assert x.dtype == "int32" assert isinstance(x, tvm.tir.IntImm) def test_schedule0(): m = te.var("m") l = te.var("l") A = te.placeholder((m, l), name="A") A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1") s = te.create_schedule(A1.op) mod = schedule_to_module(s, [A, A1]) assert isinstance(mod["main"], tvm.tir.PrimFunc) def test_schedule1(): m = te.var("m") l = te.var("l") A = te.placeholder((m, l), name="A") A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1") s = te.create_schedule(A1.op) xo, xi = s[A1].split(A1.op.axis[0], 8) s[A1].pragma(xo, "auto_unroll_max_step", 10) mod = schedule_to_module(s, [A, A1]) assert isinstance(mod["main"], tvm.tir.PrimFunc) def test_schedule2(): m = te.var("m") l = te.var("l") A = te.placeholder((m, l), name="A") A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1") A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2") s = te.create_schedule(A2.op) xo, xi = s[A2].split(A2.op.axis[0], 8) s[A1].compute_at(s[A2], xo) mod = schedule_to_module(s, [A, A2]) assert isinstance(mod["main"], tvm.tir.PrimFunc) def test_schedule_scan(): m = te.var("m") n = te.var("n") x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x") s_state = te.placeholder((m, n)) s_init = te.compute((1, n), lambda _, i: x[0, i]) s_update = te.compute((m, n), lambda t, i: s_state[t - 1, i] + x[t, i]) res = tvm.te.scan(s_init, s_update, s_state) assert tuple(res.shape) == (m, n) s = te.create_schedule(res.op) s = s.normalize() ir = tvm.lower(s, [s_state], simple_mode=True) bounds = tvm.te.schedule.InferBound(s) assert bounds[res.op.scan_axis].min.value == 1 stmt = tvm.te.schedule.ScheduleOps(s, bounds) def test_inline_multi_reduce(): def argmax_comp(x, y): idx = tvm.tir.Select((x[1] >= y[1]), x[0], y[0]) val = tvm.tir.Select((x[1] >= y[1]), x[1], y[1]) return idx, val def argmax_init(idx_typ, val_typ): return tvm.tir.const(-1, idx_typ), tvm.te.min_value(val_typ) argmax = te.comm_reducer(argmax_comp, argmax_init, name="argmax") m = te.var("m") n = te.var("n") val = te.placeholder((m, n), name="val", dtype="float32") val1 = te.compute((m, n), lambda i, j: val[i, j] + 1, name="val1") val2 = te.compute((m, n), lambda i, j: te.exp(val1[i, j]), name="val2") k = te.reduce_axis((0, n), "k") T_idx, T_val = te.compute((m,), lambda i: argmax((k.var, val2[i, k]), axis=k), name="T") s = te.create_schedule(T_idx.op) s[val1].compute_inline() s = s.normalize() bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) def test_auto_inline(): def elemwise(): m = te.var("m") n = te.var("n") A = te.placeholder((m, n), name="A") B = te.placeholder((m, n), name="B") C = te.placeholder((m, n), name="C") T1 = te.compute((m, n), lambda i, j: A(i, j) * B(i, j), name="T1") T2 = te.compute((m, n), lambda i, j: T1(i, j) + C(i, j), name="T2") return te.create_schedule(T2.op), T1 def broadcast(): m = te.var("m") n = te.var("n") A = te.placeholder((1,), name="A") B = te.placeholder((m, n), name="B") C = te.placeholder((m, n), name="C") T1 = te.compute((m, n), lambda i, j: A(0) * B(i, j), name="T1", tag="broadcast") T2 = te.compute((m, n), lambda i, j: T1(i, j) + C(i, j), name="T2") return te.create_schedule(T2.op), T1 def injective(): m = te.var("m") n = te.var("n") A = te.placeholder((m,), name="A") B = te.placeholder((m, n), name="B") C = te.placeholder((m, n), name="C") T1 = te.compute((m, n), lambda i, j: A(i) * B(i, j), name="T1") T2 = te.compute((m, n), lambda i, j: T1(i, j) + C(i, j), name="T2") return te.create_schedule(T2.op), T1 def check_auto_inline(schedule_func, auto_inline_func): s, T1 = schedule_func() # before auto inline the attach type is AttachType.kGroupRoot assert s[T1].attach_type == 1 auto_inline_func(s) # after auto inline the attach type is AttachType.kInline assert s[T1].attach_type == 2 s = s.normalize() bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) check_auto_inline(elemwise, tvm.te.schedule.AutoInlineElemWise) check_auto_inline(broadcast, tvm.te.schedule.AutoInlineBroadcast) check_auto_inline(injective, tvm.te.schedule.AutoInlineInjective) def test_schedule_const_bound(): n = 128 A = te.placeholder((n,), name="A") A1 = te.compute((n,), lambda i: A[i] + 1, name="A1") s = te.create_schedule(A1.op) xo, xi = s[A1].split(A1.op.axis[0], 8) bounds = tvm.te.schedule.InferBound(s) assert isinstance(bounds, tvm.container.Map) stmt = tvm.te.schedule.ScheduleOps(s, bounds) def test_inline_mixed(): n = te.var("n") A = te.placeholder((n,), name="A") A1 = te.compute(A.shape, lambda *i: A(*i) + 1, name="A1") A2 = te.compute(A.shape, lambda *i: A1(*i) + 2, name="A2") C = te.compute((n,), lambda i: A2[i] + A1[i], name="C") s = te.create_schedule(C.op) xo, xi = s[C].split(C.op.axis[0], factor=8) s[A1].compute_at(s[C], xo) s[A2].compute_inline() s = s.normalize() bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) def check(x): if isinstance(x, tvm.tir.Call): assert x.func != A2 tvm.tir.stmt_functor.post_order_visit(s[C].op.body[0], check) def test_scan_inline1(): m = te.var("m") n = te.var("n") x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x") s_state1 = te.placeholder((m, n)) s_state2 = te.placeholder((m, n)) s_init1 = te.compute((1, n), lambda _, i: x[0, i]) s_init2 = te.compute((1, n), lambda _, i: x[0, i]) s_x1 = te.compute((m, n), lambda t, i: s_state1[t - 1, i] + x[t, i], name="x1") s_x2 = te.compute((m, n), lambda t, i: s_state2[t - 1, i] + 1, name="x2") s_update1 = te.compute((m, n), lambda t, i: s_x1[t, i], "u1") s_update2 = te.compute((m, n), lambda t, i: s_x2[t, i], "u2") res1, res2 = tvm.te.scan([s_init1, s_init2], [s_update1, s_update2], [s_state1, s_state2]) s = te.create_schedule(res1.op) s[s_x1].compute_inline() stmt = tvm.lower(s, [x, res1, res2]) def test_scan_inline2(): m = te.var("m") n = te.var("n") x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x") s_state1 = te.placeholder((m, n)) s_state2 = te.placeholder((m, n)) s_init1 = te.compute((1, n), lambda _, i: x[0, i]) s_init2 = te.compute((1, n), lambda _, i: x[0, i]) s_xx = te.compute((m, n), lambda t, i: s_state1[t - 1, i] + x[t, i], name="xx") s_x1 = te.compute((m, n), lambda t, i: s_xx[t, i] + 1, name="x1") s_x2 = te.compute((m, n), lambda t, i: s_xx[t, i] + s_state2[t - 1, 2], name="x2") s_update1 = te.compute((m, n), lambda t, i: s_x1[t, i], "u1") s_update2 = te.compute((m, n), lambda t, i: s_x2[t, i], "u2") res1, res2 = tvm.te.scan([s_init1, s_init2], [s_update1, s_update2], [s_state1, s_state2]) s = te.create_schedule(res1.op) s[s_xx].compute_inline() s[s_x1].compute_inline() s[s_x2].compute_inline() stmt = tvm.lower(s, [x, res1, res2]) def test_schedule_cache(): m = te.var("m") n = te.var("n") A = te.placeholder((m, n), name="A") B = te.placeholder((m, n), name="B") C = te.compute((m, n), lambda i, j: A(i, j) * B(i, j), name="C") s = te.create_schedule(C.op) AA = s.cache_read(A, "shared", readers=[C]) CC = s.cache_write(C, "shared") s[AA].compute_at(s[CC], CC.op.axis[0]) bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) def test_schedule_middle_cache(): m = te.var("m") n = te.var("n") A = te.placeholder((m, n), name="A") B = te.placeholder((m, n), name="B") C = te.compute((m, n), lambda i, j: A(i, j) * B(i, j), name="C") D = te.compute((m, n), lambda i, j: C(i, j), name="D") s = te.create_schedule(D.op) AA = s.cache_read(A, "local", readers=[C]) BB = s.cache_read(B, "local", readers=[C]) CC = s.cache_read(C, "local", readers=[D]) DD = s.cache_write(D, "local") # s[AA].compute_at(s[CC], CC.op.axis[0]) bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) def test_schedule_cache_relayout1(): m = te.var("m") n = te.var("n") A = te.placeholder((m, n), name="A") B = te.placeholder((m, n), name="B") C = te.compute((m, n), lambda i, j: A(i, j) * B(i, j), name="C") s = te.create_schedule(C.op) s[C].reorder(C.op.axis[1], C.op.axis[0]) CC = s.cache_write(C, "global") bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) def test_schedule_cache_relayout2(): m = te.var("m") n = te.var("n") A = te.placeholder((m * 4, n), name="A") B = te.placeholder((m * 4, n), name="B") C = te.compute(A.shape, lambda i, j: A(i, j) * B(i, j), name="C") s = te.create_schedule(C.op) x, y = C.op.axis xo, xi = s[C].split(x, factor=4) s[C].reorder(xo, y, xi) CC = s.cache_write(C, "global") s = s.normalize() bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) def test_schedule_cache_relayout3(): m = te.var("m") n = te.var("n") A = te.placeholder((m * 4, n), name="A") B = te.placeholder((m * 4, n), name="B") k = te.reduce_axis((0, n), "k") C = te.compute((A.shape[0],), lambda i: te.sum(A(i, k) * B(i, k), axis=k), name="C") s = te.create_schedule(C.op) x = C.op.axis[0] xo, xi = s[C].split(x, factor=4) CC = s.cache_write(C, "global") s = s.normalize() bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) def test_schedule_cache_relayout4(): def _compute(*indice): return A(*indice) + 1, B(*indice) / 2 m = te.var("m") n = te.var("n") A = te.placeholder((m * 4, n), name="A") B = te.placeholder((m * 4, n), name="B") C1, C2 = te.compute(A.shape, _compute, name="C") s = te.create_schedule([C1.op, C2.op]) C1_cache, C2_cache = s.cache_write([C1, C2], "local") s = s.normalize() bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) def intrin_gemv(m, n): w = te.placeholder((m, n), name="w") x = te.placeholder((n,), name="x") k = te.reduce_axis((0, n), name="k") z = te.compute((m,), lambda i: te.sum(w[i, k] * x[k], axis=k), name="z") Wb = tvm.tir.decl_buffer( w.shape, w.dtype, name="W", offset_factor=16, strides=[te.var("ldw"), 1] ) def intrin_func(ins, outs): ww, xx = ins zz = outs[0] ww_ptr = ww.access_ptr("r") xx_ptr = xx.access_ptr("r") zz_ptr = zz.access_ptr("w") body = tvm.tir.call_packed("gemm", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0]) reset = tvm.tir.call_packed("fill_zero", zz_ptr, n) update = tvm.tir.call_packed("gemv_add", ww_ptr, xx_ptr, zz_ptr, n, ww.strides[0]) return body, reset, update buffer_params = {"data_alignment": 16, "offset_factor": 16} return te.decl_tensor_intrin( z.op, intrin_func, binds={w: Wb}, default_buffer_params=buffer_params ) def test_schedule_tensor_compute1(): # basic: split, reorder, tile M, N, L = 2048, 1024, 512 factor, rfactor = 16, 16 A = te.placeholder((N // factor, L // rfactor, factor, rfactor), name="A") B = te.placeholder((M, L // rfactor, rfactor), name="B") k = te.reduce_axis((0, L // rfactor), name="k") gemv = intrin_gemv(factor, rfactor) C = te.compute( (N, M // factor, factor), lambda i, j: gemv(A[i, k, 0:factor, 0:factor], B[j, k, 0:rfactor], reduce_axis=k), name="C", ) s = te.create_schedule(C.op) ai, aj, ax = s[C].op.axis aio, aii = s[C].split(ai, 16) s[C].reorder(aio, aj, aii) aioo, ajo, aioi, aji = s[C].tile(aio, aj, 16, 4) s = s.normalize() bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) def intrin_vadd(n, cache_read=False, cache_write=False): scope_ubuf = "local" dtype = "float32" x = te.placeholder((n,), dtype=dtype, name="vx") y = te.placeholder((n,), dtype=dtype, name="vy") z = te.compute(x.shape, lambda i: x[i] + y[i], name="z") s = te.create_schedule(z.op) def create_buffer(t): return tvm.tir.decl_buffer( t.shape, t.dtype, name="W" + t.name, scope=scope_ubuf, offset_factor=16 ) binds = {} if cache_read: binds[x] = create_buffer(x) binds[y] = create_buffer(y) if cache_write: binds[z] = create_buffer(z) def intrin_func(ins, outs): ib = tvm.tir.ir_builder.create() ib.emit( tvm.tir.call_extern( outs[0].dtype, "vadd", ins[0].access_ptr("r"), ins[1].access_ptr("r"), outs[0].access_ptr("wr"), ) ) return ib.get() return te.decl_tensor_intrin( z.op, intrin_func, binds=binds, default_buffer_params={"offset_factor": 16} ) def test_schedule_tensor_compute2(): # cache_read, cache_write M = 1024 factor = 16 dtype = "float32" scope_ubuf = "local" A = te.placeholder((M // factor, factor), name="A", dtype=dtype) B = te.placeholder((M // factor, factor), name="B", dtype=dtype) vadd = intrin_vadd(factor, True, True) C = te.compute((M // factor, factor), lambda i: vadd(A[i, 0:factor], B[i, 0:factor]), name="C") s = te.create_schedule(C.op) AL = s.cache_read(A, scope_ubuf, C) BL = s.cache_read(B, scope_ubuf, C) CL = s.cache_write(C, scope_ubuf) s = s.normalize() bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) def test_schedule_tensor_compute3(): # compute_at M = 1024 factor = 16 dtype = "float32" A = te.placeholder((M // factor, factor), name="A", dtype=dtype) B = te.placeholder((M // factor, factor), name="B", dtype=dtype) Bi = te.compute((M // factor, factor), lambda i, j: B[i, j] + 5, name="Bi") vadd = intrin_vadd(factor) C = te.compute((M // factor, factor), lambda i: vadd(A[i, 0:factor], Bi[i, 0:factor]), name="C") s = te.create_schedule(C.op) s[Bi].compute_at(s[C], C.op.axis[0]) s = s.normalize() bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) def test_loop_dep_reduce(): X = te.placeholder(shape=(10,), name="x") def f(n): rv = te.reduce_axis((0, n)) return te.sum(X[rv], axis=rv) Y = te.compute(X.shape, f, name="y") s = te.create_schedule([Y.op]) f = tvm.build(s, [X, Y]) def test_loop_dep_reduce_cache_write(): X = te.placeholder(shape=(10,), name="x") def f(n): rv = te.reduce_axis((0, n)) init = lambda dtype: tvm.tir.Select(n > 1, tvm.tir.const(0, dtype), n.astype(dtype)) sum = te.comm_reducer(lambda x, y: tvm.te.max(x + y, n.astype("float32")), init, name="sum") return sum(X[rv], axis=rv) Y = te.compute(X.shape, f, name="y") s = te.create_schedule([Y.op]) s.cache_write(Y, "local") f = tvm.build(s, [X, Y]) def test_reduction_and_dummy_fuse_split(): n = 10 X = te.placeholder(shape=(n,), dtype="int32", name="X") k = te.reduce_axis((0, n)) Y = te.compute((), lambda: te.sum(X[k], k), name="Y") s = te.create_schedule([Y.op]) ax = s[Y.op].fuse(*Y.op.axis) axo, axi = s[Y.op].split(ax, nparts=20) f = tvm.build(s, [Y, X]) args = [tvm.nd.empty((), "int32")] + [tvm.nd.array(np.ones((n,), dtype="int32"))] f(*args) assert args[0].numpy() == n n = 10 X = te.placeholder(shape=(n,), dtype="int32", name="X") k = te.reduce_axis((0, n)) Y = te.compute((n,), lambda i: te.sum(X[k], k), name="Y") s = te.create_schedule([Y.op]) ax = s[Y.op].fuse(*(list(Y.op.axis) + list(Y.op.reduce_axis))) f = tvm.build(s, [Y, X]) args = [tvm.nd.array(np.ones((n,), dtype="int32"))] + [ tvm.nd.array(np.ones((n,), dtype="int32")) ] f(*args) assert np.all(args[0].numpy() == n) def test_schedule_compute_inline(): shape = [10, 1024] A = te.placeholder(shape, name="A") B = te.placeholder(shape, name="B") C = te.compute(shape, lambda *index: A(*index) + B(*index), name="C") def _compute(*index): return C(*index), C(*index) * B(*index) F, E = te.compute(shape, _compute, name="F") s = te.create_schedule([F.op, E.op]) AL = s.cache_read(A, "local", [C]) BL = s.cache_read(B, "local", [C, E]) CL = s.cache_write(C, "local") FL, EL = s.cache_write([F, E], "local") s[C].compute_inline() s = s.normalize() bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) def test_local_stage_predicate(): m = 1 n = 3 p = 2 A = tvm.te.placeholder((m, n, p), name="A") B = tvm.te.compute((m, n, p), lambda bi, bj, bk: A[bi, bj, bk], name="B") C = tvm.te.compute((m, n, p), lambda ci, cj, ck: B[ci, cj, ck], name="C") by = tvm.te.thread_axis("blockIdx.y") tx = tvm.te.thread_axis("threadIdx.x") vx = tvm.te.thread_axis("vthread") def schedule(thread_tag, mem_scope): s = tvm.te.create_schedule(C.op) s[B].compute_at(s[C], s[C].op.axis[0]) s[B].set_scope(mem_scope) bno, bni = s[B].split(s[B].op.axis[1], n) bx = tvm.te.thread_axis("blockIdx.x") s[C].bind(s[C].op.axis[0], bx) s[C].bind(s[C].op.axis[1], thread_tag) s[B].bind(bni, thread_tag) return s def collect_visit(stmt, f): ret = [] tvm.tir.stmt_functor.post_order_visit(stmt, lambda x: ret.append(f(x))) return ret # local vs. threadIdx s = schedule(tx, "local") lowered_body = tvm.lower(s, [A, C])["main"].body assert not any(collect_visit(lowered_body, lambda x: isinstance(x, tvm.tir.IfThenElse))) # local vs. vthread s = schedule(vx, "local") lowered_body = tvm.lower(s, [A, C])["main"].body assert not any(collect_visit(lowered_body, lambda x: isinstance(x, tvm.tir.IfThenElse))) # shared vs. blockIdx s = schedule(by, "shared") lowered_body = tvm.lower(s, [A, C])["main"].body assert not any(collect_visit(lowered_body, lambda x: isinstance(x, tvm.tir.IfThenElse))) def test_local_stage_predicate2(): A = tvm.te.placeholder((128,), name="A") B = tvm.te.compute((128,), lambda bi: A[bi] + 1, name="B") C = tvm.te.compute((128,), lambda ci: B[ci] + 2, name="C") s = tvm.te.create_schedule(C.op) AA = s.cache_read(A, "local", [B]) s[B].set_scope("shared") block_x = tvm.te.thread_axis("blockIdx.x") thread_x = tvm.te.thread_axis((0, 32), "threadIdx.x") oc, ic = s[C].split(s[C].op.axis[0], factor=64) ooc, ioc = s[C].split(oc, factor=2) oic, iic = s[C].split(ic, factor=32) s[C].bind(ooc, block_x) s[C].bind(iic, thread_x) s[B].compute_at(s[C], ioc) ob, ib = s[B].split(s[B].op.axis[0], factor=32) s[B].bind(ib, thread_x) s[AA].compute_root() s[AA].compute_at(s[C], ooc) oaa, iaa = s[AA].split(s[AA].op.axis[0], factor=32) s[AA].bind(iaa, thread_x) lowered_body = tvm.lower(s, [A, C])["main"].body def collect_visit(stmt, f): ret = [] tvm.tir.stmt_functor.post_order_visit(stmt, lambda x: ret.append(f(x))) return ret def visit_stmt(op): if isinstance(op, tvm.tir.Allocate): return op.extents[0].value == 97 return False assert not any(collect_visit(lowered_body, lambda x: isinstance(x, tvm.tir.IfThenElse))) assert any(collect_visit(lowered_body, visit_stmt)) def test_schedule_record_gemm(): with tvm.transform.PassContext(config={"te.keep_schedule_record": True}): M, K, N = 1024, 1024, 1024 k = te.reduce_axis((0, K), "k") A = te.placeholder((M, K), name="A") B = te.placeholder((K, N), name="B") C = te.compute((M, N), lambda m, n: te.sum(A[m, k] * B[k, n], axis=k), name="C") s = te.create_schedule(C.op) # currently there are no other applied primitives # size of schedule record is expected to be 1 (vanilla schedule) assert len(s.schedule_record) == 1 # apply sequential optimizatoin primitives block_size, factor = 32, 8 # tile -> split + split + reorder mo, no, mi, ni = s[C].tile(C.op.axis[0], C.op.axis[1], block_size, block_size) ko, ki = s[C].split(k, factor=factor) s[C].reorder(mo, ko, no, mi, ki, ni) s[C].vectorize(ni) s[C].parallel(mo) assert len(s.schedule_record) == 8 # compare primitive names expected_names = [ "vanilla", "split", "split", "reorder", "split", "reorder", "vectorize", "parallel", ] for i in range(len(s.schedule_record)): assert s.primitive_record[i] == expected_names[i] def test_schedule_record_misc(): s = te.create_schedule([]) # size of schedule record is expected to be 0 (no storing behavior) assert len(s.schedule_record) == 0 with tvm.transform.PassContext(config={"te.keep_schedule_record": True}): s = te.create_schedule([]) # size of schedule record is expected to be 1 (vanilla schedule) assert len(s.schedule_record) == 1 stg = te.compute((), lambda *args: 0, name="empty_op") s = te.create_schedule(stg.op) # size of schedule record is expected to be 1 (vanilla schedule) assert len(s.schedule_record) == 1 if __name__ == "__main__": test_loop_dep_reduce() test_loop_dep_reduce_cache_write() test_schedule_middle_cache() test_inline_multi_reduce() test_schedule_cache_relayout4() test_schedule_cache_relayout3() test_schedule_cache_relayout2() test_schedule_cache_relayout1() test_schedule_const_bound() test_scan_inline1() test_scan_inline2() test_inline_mixed() test_auto_inline() test_schedule_scan() test_schedule0() test_schedule1() test_schedule2() test_schedule_cache() test_schedule_tensor_compute1() test_schedule_tensor_compute2() test_schedule_tensor_compute3() test_reduction_and_dummy_fuse_split() test_schedule_compute_inline() test_local_stage_predicate() test_local_stage_predicate2() test_schedule_record_gemm() test_schedule_record_misc()
23,954
33.418103
100
py
tvm
tvm-main/tests/python/unittest/test_target_codegen_vm_basic.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm import te from tvm.script import tir as T, ir as I import numpy as np def run_jit(fapi, check): for target in ["llvm", "stackvm"]: if not tvm.testing.device_enabled(target): continue f = tvm.driver.build(fapi, target=target) s = f.get_source() check(f) def test_stack_vm_basic(): a = tvm.nd.array(np.zeros(10, dtype="float32")) @tvm.register_func def tvm_call_back_get_shape(shape0): print(shape0) assert shape0 == a.shape[0] n = te.size_var("n") Ab = tvm.tir.decl_buffer((n,), "float32") stmt = tvm.tir.Evaluate(tvm.tir.call_packed("tvm_call_back_get_shape", Ab.shape[0])) mod = tvm.IRModule.from_expr( tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "print_shape") ) run_jit(mod, lambda f: f(a)) @tvm.register_func def tvm_stack_vm_print(*x): print(x) def test_stack_vm_loop(): dtype = "int64" n = te.size_var("n") Ab = tvm.tir.decl_buffer((n,), dtype) i = te.size_var("i") ib = tvm.tir.ir_builder.create() A = ib.buffer_ptr(Ab) with ib.for_range(0, n - 1, "i") as i: A[i + 1] = A[i] + 1 ib.emit(tvm.tir.call_packed("tvm_stack_vm_print", i)) stmt = ib.get() mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "ramp")) a = tvm.nd.array(np.zeros(10, dtype=dtype)) def check(f): f(a) np.testing.assert_equal(a.numpy(), np.arange(a.shape[0])) run_jit(mod, check) def test_stack_vm_cond(): dtype = "int64" n = te.size_var("n") Ab = tvm.tir.decl_buffer((n,), dtype) ib = tvm.tir.ir_builder.create() A = ib.buffer_ptr(Ab) with ib.for_range(0, n - 1, "i") as i: with ib.if_scope(tvm.tir.EQ(i, 4)): A[i + 1] = A[i] + 1 with ib.else_scope(): A[i + 1] = A[i] + 2 stmt = ib.get() mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "test")) def check(f): a = tvm.nd.array(np.zeros(10, dtype=dtype)) f(a) y = np.arange(a.shape[0]) * 2 y[5:] -= 1 np.testing.assert_equal(a.numpy(), y) run_jit(mod, check) def test_vm_parallel(): dtype = "int64" n = te.size_var("n") Ab = tvm.tir.decl_buffer((n,), dtype) i = te.size_var("i") ib = tvm.tir.ir_builder.create() A = ib.buffer_ptr(Ab) with ib.for_range(0, n, "i", kind="parallel") as i: A[i] = A[i] + 1 stmt = ib.get() mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", "test")) def check(f): a = tvm.nd.array(np.zeros(10, dtype=dtype)) f(a) np.testing.assert_equal(a.numpy(), np.ones(a.shape[0])) run_jit(mod, check) def test_codegen_decl_buffer(): """The codegen should accept DeclBuffer nodes in its input""" @I.ir_module class mod: @T.prim_func def kernel(A_data: T.handle("float32")): T.func_attr({"global_symbol": "kernel"}) A_buf = T.decl_buffer([256], dtype="float32", scope="global", data=A_data) target = tvm.target.Target("stackvm") stackvm_codegen = tvm.get_global_func("target.build.stackvm") stackvm_codegen(mod, target) if __name__ == "__main__": tvm.testing.main()
4,136
27.729167
97
py
tvm
tvm-main/tests/python/unittest/test_tvm_testing_features.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import sys import pytest import tvm.testing # This file tests features in tvm.testing, such as verifying that # cached fixtures are run an appropriate number of times. As a # result, the order of the tests is important. Use of --last-failed # or --failed-first while debugging this file is not advised. If # these tests are distributed/parallelized using pytest-xdist or # similar, all tests in this file should run sequentially on the same # node. (See https://stackoverflow.com/a/59504228) class TestTargetAutoParametrization: targets_used = [] devices_used = [] enabled_targets = [target for target, dev in tvm.testing.enabled_targets()] enabled_devices = [dev for target, dev in tvm.testing.enabled_targets()] def test_target_parametrization(self, target): assert target in self.enabled_targets self.targets_used.append(target) def test_device_parametrization(self, dev): assert dev in self.enabled_devices self.devices_used.append(dev) def test_all_targets_used(self): assert sorted(self.targets_used) == sorted(self.enabled_targets) def test_all_devices_used(self): sort_key = lambda dev: (dev.device_type, dev.device_id) assert sorted(self.devices_used, key=sort_key) == sorted(self.enabled_devices, key=sort_key) targets_with_explicit_list = [] @tvm.testing.parametrize_targets("llvm") def test_explicit_list(self, target): assert target == "llvm" self.targets_with_explicit_list.append(target) def test_no_repeats_in_explicit_list(self): if tvm.testing.device_enabled("llvm"): assert self.targets_with_explicit_list == ["llvm"] else: assert self.targets_with_explicit_list == [] targets_with_exclusion = [] @tvm.testing.exclude_targets("llvm") def test_exclude_target(self, target): assert "llvm" not in target self.targets_with_exclusion.append(target) def test_all_nonexcluded_targets_ran(self): assert sorted(self.targets_with_exclusion) == sorted( [target for target in self.enabled_targets if not target.startswith("llvm")] ) run_targets_with_known_failure = [] @tvm.testing.known_failing_targets("llvm") def test_known_failing_target(self, target): # This test runs for all targets, but intentionally fails for # llvm. The behavior is working correctly if this test shows # up as an expected failure, xfail. self.run_targets_with_known_failure.append(target) assert "llvm" not in target def test_all_targets_ran(self): assert sorted(self.run_targets_with_known_failure) == sorted(self.enabled_targets) @tvm.testing.known_failing_targets("llvm") @tvm.testing.parametrize_targets("llvm") def test_known_failing_explicit_list(self, target): assert target != "llvm" class TestJointParameter: param1_vals = [1, 2, 3] param2_vals = ["a", "b", "c"] independent_usages = 0 param1 = tvm.testing.parameter(*param1_vals) param2 = tvm.testing.parameter(*param2_vals) joint_usages = 0 joint_param_vals = list(zip(param1_vals, param2_vals)) joint_param_ids = ["apple", "pear", "banana"] joint_param1, joint_param2 = tvm.testing.parameters(*joint_param_vals, ids=joint_param_ids) def test_using_independent(self, param1, param2): type(self).independent_usages += 1 def test_independent(self): assert self.independent_usages == len(self.param1_vals) * len(self.param2_vals) def test_using_joint(self, joint_param1, joint_param2): type(self).joint_usages += 1 assert (joint_param1, joint_param2) in self.joint_param_vals def test_joint(self): assert self.joint_usages == len(self.joint_param_vals) def test_joint_test_id(self, joint_param1, joint_param2, request): param_string = ( request.node.name.replace(request.node.originalname, "") .replace("[", "") .replace("]", "") ) assert param_string in self.joint_param_ids class TestFixtureCaching: param1_vals = [1, 2, 3] param2_vals = ["a", "b", "c"] param1 = tvm.testing.parameter(*param1_vals) param2 = tvm.testing.parameter(*param2_vals) uncached_calls = 0 cached_calls = 0 @tvm.testing.fixture def uncached_fixture(self, param1): type(self).uncached_calls += 1 return 2 * param1 def test_use_uncached(self, param1, param2, uncached_fixture): assert 2 * param1 == uncached_fixture def test_uncached_count(self): assert self.uncached_calls == len(self.param1_vals) * len(self.param2_vals) @tvm.testing.fixture(cache_return_value=True) def cached_fixture(self, param1): type(self).cached_calls += 1 return 3 * param1 def test_use_cached(self, param1, param2, cached_fixture): assert 3 * param1 == cached_fixture def test_cached_count(self): cache_disabled = bool(int(os.environ.get("TVM_TEST_DISABLE_CACHE", "0"))) if cache_disabled: assert self.cached_calls == len(self.param1_vals) * len(self.param2_vals) else: assert self.cached_calls == len(self.param1_vals) class TestCachedFixtureIsCopy: param = tvm.testing.parameter(1, 2, 3, 4) @tvm.testing.fixture(cache_return_value=True) def cached_mutable_fixture(self): return {"val": 0} def test_modifies_fixture(self, param, cached_mutable_fixture): assert cached_mutable_fixture["val"] == 0 # The tests should receive a copy of the fixture value. If # the test receives the original and not a copy, then this # will cause the next parametrization to fail. cached_mutable_fixture["val"] = param class TestBrokenFixture: # Tests that use a fixture that throws an exception fail, and are # marked as setup failures. The tests themselves are never run. # This behavior should be the same whether or not the fixture # results are cached. num_uses_broken_uncached_fixture = 0 num_uses_broken_cached_fixture = 0 @tvm.testing.fixture def broken_uncached_fixture(self): raise RuntimeError("Intentionally broken fixture") @pytest.mark.xfail(True, reason="Broken fixtures should result in a failing setup", strict=True) def test_uses_broken_uncached_fixture(self, broken_uncached_fixture): type(self).num_uses_broken_fixture += 1 def test_num_uses_uncached(self): assert self.num_uses_broken_uncached_fixture == 0 @tvm.testing.fixture(cache_return_value=True) def broken_cached_fixture(self): raise RuntimeError("Intentionally broken fixture") @pytest.mark.xfail(True, reason="Broken fixtures should result in a failing setup", strict=True) def test_uses_broken_cached_fixture(self, broken_cached_fixture): type(self).num_uses_broken_cached_fixture += 1 def test_num_uses_cached(self): assert self.num_uses_broken_cached_fixture == 0 class TestAutomaticMarks: @staticmethod def check_marks(request, target): decorators = tvm.testing.plugin._target_to_requirement(target) required_marks = [decorator.mark for decorator in decorators] applied_marks = list(request.node.iter_markers()) for required_mark in required_marks: assert required_mark in applied_marks def test_automatic_fixture(self, request, target): self.check_marks(request, target) @tvm.testing.parametrize_targets def test_bare_parametrize(self, request, target): self.check_marks(request, target) @tvm.testing.parametrize_targets("llvm", "cuda", "vulkan") def test_explicit_parametrize(self, request, target): self.check_marks(request, target) @pytest.mark.parametrize("target", ["llvm", "cuda", "vulkan"]) def test_pytest_mark(self, request, target): self.check_marks(request, target) @pytest.mark.parametrize("target,other_param", [("llvm", 0), ("cuda", 1), ("vulkan", 2)]) def test_pytest_mark_covariant(self, request, target, other_param): self.check_marks(request, target) @pytest.mark.skipif( bool(int(os.environ.get("TVM_TEST_DISABLE_CACHE", "0"))), reason="Cannot test cache behavior while caching is disabled", ) class TestCacheableTypes: class EmptyClass: pass @tvm.testing.fixture(cache_return_value=True) def uncacheable_fixture(self): return self.EmptyClass() def test_uses_uncacheable(self, request): # Normally the num_tests_use_this_fixture would be set before # anything runs. For this test case only, because we are # delaying the use of the fixture, we need to manually # increment it. self.uncacheable_fixture.num_tests_use_this_fixture[0] += 1 with pytest.raises(TypeError): request.getfixturevalue("uncacheable_fixture") class ImplementsReduce: def __reduce__(self): return super().__reduce__() @tvm.testing.fixture(cache_return_value=True) def fixture_with_reduce(self): return self.ImplementsReduce() def test_uses_reduce(self, fixture_with_reduce): pass class ImplementsDeepcopy: def __deepcopy__(self, memo): return type(self)() @tvm.testing.fixture(cache_return_value=True) def fixture_with_deepcopy(self): return self.ImplementsDeepcopy() def test_uses_deepcopy(self, fixture_with_deepcopy): pass if __name__ == "__main__": tvm.testing.main()
10,430
34.359322
100
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_schedule_rule_mlt_tc.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring,line-too-long,invalid-name,too-many-locals,too-many-statements,too-many-nested-blocks,too-many-branches,too-many-lines,chained-comparison import pytest import tvm import tvm.testing from tvm import meta_schedule as ms from tvm import te from tvm.meta_schedule.testing import te_workload from tvm.meta_schedule.testing.space_generation import ( check_sketches, generate_design_space, get_rules, print_sketches, ) from tvm.script import tir as T from tvm.tir.tensor_intrin.cuda import get_wmma_intrin_group def multi_level_tiling_tensor_core( *, read_reuse_scope="shared", write_reuse_scope="shared", in_dtype="float16", out_dtype="float32", trans_b=False, use_software_pipeline=False, ) -> ms.schedule_rule.ScheduleRule: assert read_reuse_scope in ["shared", "shared.dyn"] assert write_reuse_scope in ["shared", "shared.dyn", "global"] if not isinstance(in_dtype, list): in_dtype = [in_dtype] if not isinstance(out_dtype, list): out_dtype = [out_dtype] if not isinstance(trans_b, list): trans_b = [trans_b] return ms.schedule_rule.MultiLevelTilingTensorCore( intrin_groups=[ get_wmma_intrin_group( read_reuse_scope, write_reuse_scope, _in_dtype, _out_dtype, _trans_b ) for _in_dtype in in_dtype for _out_dtype in out_dtype for _trans_b in trans_b ], structure="SSSRRSRS", tile_binds=["blockIdx.y", "blockIdx.x", "threadIdx.y"], max_innermost_factor=4, # 64 // tensor intrin size vector_load_lens=[1, 2, 3, 4, 8, 16], reuse_read=ms.schedule_rule.ReuseType( req="must", levels=[4], scope=read_reuse_scope, ), reuse_write=ms.schedule_rule.ReuseType( req="must" if write_reuse_scope.startswith("shared") else "no", levels=[2], scope=write_reuse_scope, ), use_software_pipeline=use_software_pipeline, ) @pytest.mark.parametrize("shared_scope", ["shared", "shared.dyn"]) def test_matmul_relu(shared_scope): intrin_suffix = shared_scope.replace(".", "_") # fmt: off @T.prim_func def matmul_relu_0(A: T.Buffer((128, 128), "float16"), B: T.Buffer((128, 128), "float16"), compute: T.Buffer((128, 128), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) # with T.block("root"): C_reindex_shared = T.alloc_buffer((4, 8, 2, 1, 16, 16), scope=shared_scope) C_reindex_shared_wmma_accumulator = T.alloc_buffer((4, 8, 2, 1, 16, 16), scope="wmma.accumulator") A_reindex_shared = T.alloc_buffer((128, 128), "float16", scope=shared_scope) B_reindex_shared = T.alloc_buffer((128, 128), "float16", scope=shared_scope) A_reindex_shared_wmma_matrix_a = T.alloc_buffer((128, 128), "float16", scope="wmma.matrix_a") B_reindex_shared_wmma_matrix_b = T.alloc_buffer((128, 128), "float16", scope="wmma.matrix_b") for ax0_0_0_ax1_0_0_fused in T.thread_binding(8, thread="blockIdx.y"): for ax0_0_1_ax1_0_1_fused in T.thread_binding(2, thread="blockIdx.x"): for ax0_0_2_ax1_0_2_fused in T.thread_binding(2, thread="threadIdx.y"): for ax2_0_0 in range(1): for ax0_ax1_fused in range(4096): with T.block("A_reindex_shared"): v0 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused // 2 * 32 + ax0_ax1_fused // 128) v1 = T.axis.spatial(128, ax0_ax1_fused % 128) T.reads(A[v0, v1]) T.writes(A_reindex_shared[v0, v1]) T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]], "meta_schedule.cooperative_fetch": 8}) A_reindex_shared[v0, v1] = A[v0, v1] for ax0_ax1_fused in range(4096): with T.block("B_reindex_shared"): v0 = T.axis.spatial(128, ax0_ax1_fused // 32) v1 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused % 2 * 64 + ax0_0_1_ax1_0_1_fused * 32 + ax0_ax1_fused % 32) T.reads(B[v0, v1]) T.writes(B_reindex_shared[v0, v1]) T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]], "meta_schedule.cooperative_fetch": 1}) B_reindex_shared[v0, v1] = B[v0, v1] for ax2_0_1 in range(4): for ax0_0, ax1_0 in T.grid(2, 2): with T.block("A_reindex_shared_wmma.matrix_a_o"): v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused // 2 * 2 + ax0_0) v1_o = T.axis.spatial(8, ax2_0_1 * 2 + ax1_0) T.reads(A_reindex_shared[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": f"wmma_load_16x16x16_f16_a_{intrin_suffix}"}) for ax0_1, ax1_1 in T.grid(16, 16): with T.block("A_reindex_shared_wmma.matrix_a"): v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads(A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0_0, ax1_0 in T.grid(2, 1): with T.block("B_reindex_shared_wmma.matrix_b_o"): v0_o = T.axis.spatial(8, ax2_0_1 * 2 + ax0_0) v1_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0_2_ax1_0_2_fused + ax1_0) T.reads(B_reindex_shared[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": f"wmma_load_16x16x16_f16_b_{intrin_suffix}"}) for ax0_1, ax1_1 in T.grid(16, 16): with T.block("B_reindex_shared_wmma.matrix_b"): v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads(B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(1, 1, 2, 2, 1): with T.block("C_o"): v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused // 2 * 2 + ax0_0_3 * 2 + ax0_0_4) v1_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0_2_ax1_0_2_fused + ax1_0_3 + ax1_0_4) v2_o = T.axis.reduce(8, ax2_0_0 * 8 + ax2_0_1 * 2 + ax2_0_2) T.reads(A_reindex_shared_wmma_matrix_a[v0_o * 16:v0_o * 16 + 16, v2_o * 16:v2_o * 16 + 16], B_reindex_shared_wmma_matrix_b[v2_o * 16:v2_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, 0:16, 0:16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init": "wmma_fill_16x16x16_f32", "warp_execution": 1}) with T.init(): for ax0_1, ax1_1 in T.grid(16, 16): with T.block("C_init"): v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads() T.writes(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, v0_i_init, v1_i_init]) C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, v0_i_init, v1_i_init] = T.float32(0) for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16): with T.block("C"): v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1]) T.reads(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, v0_i, v1_i], A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i]) T.writes(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, v0_i, v1_i]) T.block_attr({"meta_schedule.tiling_structure": "SSSRRSRS"}) C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, v0_i, v1_i] = C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, v0_i, v1_i] + T.Cast("float32", A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i]) * T.Cast("float32", B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i]) for ax2 in range(2): for ax0_ax1_fused in T.thread_binding(2, thread="threadIdx.y"): for ax2_1, ax3 in T.grid(1, 1): with T.block("C_reindex_shared_wmma.accumulator_o"): v0 = T.axis.spatial(4, ax0_0_0_ax1_0_0_fused // 2) v1 = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_ax1_fused) v2 = T.axis.spatial(2, ax2 + ax2_1) v3 = T.axis.spatial(1, ax3) v4_o = T.axis.spatial(1, 0) v5_o = T.axis.spatial(1, 0) T.reads(C_reindex_shared_wmma_accumulator[v0, v1, v2, v3, 0:16, 0:16]) T.writes(C_reindex_shared[v0, v1, v2, v3, 0:16, 0:16]) T.block_attr({"meta_schedule.auto_tensorize": f"wmma_store_16x16x16_f32_{intrin_suffix}"}) for ax4, ax5 in T.grid(16, 16): with T.block("C_reindex_shared_wmma.accumulator"): v4_i, v5_i = T.axis.remap("SS", [ax4, ax5]) T.reads(C_reindex_shared_wmma_accumulator[v0, v1, v2, v3, v4_i, v5_i]) T.writes(C_reindex_shared[v0, v1, v2, v3, v4_i, v5_i]) C_reindex_shared[v0, v1, v2, v3, v4_i, v5_i] = C_reindex_shared_wmma_accumulator[v0, v1, v2, v3, v4_i, v5_i] for ax0_ax1_ax3_ax4_ax5_fused in range(512): with T.block("C_reindex_shared"): v0 = T.axis.spatial(4, ax0_0_0_ax1_0_0_fused // 2) v1 = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_ax1_ax3_ax4_ax5_fused // 256) v2 = T.axis.spatial(2, ax2) v3 = T.axis.spatial(1, 0) v4 = T.axis.spatial(16, ax0_ax1_ax3_ax4_ax5_fused % 256 // 16) v5 = T.axis.spatial(16, ax0_ax1_ax3_ax4_ax5_fused % 16) T.reads(C_reindex_shared[v0, v1, v2, v3, v4, v5]) T.writes(compute[v4 + v2 * 16 + v0 * 32, v5 + v1 * 16]) T.block_attr({"meta_schedule.cooperative_fetch": 4}) compute[v4 + v2 * 16 + v0 * 32, v5 + v1 * 16] = T.max(C_reindex_shared[v0, v1, v2, v3, v4, v5], T.float32(0)) # fmt: on decision_0 = [ ("SamplePerfectTile", [4, 1, 1, 1, 2]), ("SamplePerfectTile", [2, 2, 2, 1, 1]), ("SamplePerfectTile", [1, 4, 2]), ("SampleCategorical", 3), ("SampleCategorical", 3), ("SampleCategorical", 0), ] mod = te.create_prim_func( te_workload.matmul_relu( n=128, m=128, k=128, in_dtype="float16", out_dtype="float32", ) ) actual = generate_design_space( kind="cuda", mod=mod, target=tvm.target.Target("cuda --arch=sm_70"), types=None, sch_rules=[ multi_level_tiling_tensor_core( read_reuse_scope=shared_scope, write_reuse_scope=shared_scope ), ] + get_rules(kind="cuda", types=ms.schedule_rule.AutoInline), ) check_sketches( mod, sketches=actual, expected_mods=[matmul_relu_0], expected_decisions=[decision_0], ) def test_matmul_relu_with_fallback(): # fmt: off @T.prim_func def matmul_relu_fallback_0(A: T.Buffer((128, 128), "float16"), B: T.Buffer((128, 128), "float16"), compute: T.Buffer((128, 128), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": True}) # with T.block("root"): C_reindex_shared = T.alloc_buffer((4, 2, 2, 4, 16, 16), scope="shared") C_reindex_shared_wmma_accumulator = T.alloc_buffer((4, 2, 2, 4, 16, 16), scope="wmma.accumulator") A_reindex_shared = T.alloc_buffer((128, 128), "float16", scope="shared") B_reindex_shared = T.alloc_buffer((128, 128), "float16", scope="shared") A_reindex_shared_wmma_matrix_a = T.alloc_buffer((128, 128), "float16", scope="wmma.matrix_a") B_reindex_shared_wmma_matrix_b = T.alloc_buffer((128, 128), "float16", scope="wmma.matrix_b") for ax0_0_0_ax1_0_0_fused in T.thread_binding(2, thread="blockIdx.y"): for ax0_0_1_ax1_0_1_fused in T.thread_binding(2, thread="blockIdx.x"): for ax0_0_2_ax1_0_2_fused in T.thread_binding(2, thread="threadIdx.y"): for ax2_0_0 in range(2): for ax0_ax1_fused in range(2048): with T.block("A_reindex_shared"): v0 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused * 64 + ax0_0_1_ax1_0_1_fused * 32 + ax0_ax1_fused // 64) v1 = T.axis.spatial(128, ax2_0_0 * 64 + ax0_ax1_fused % 64) T.reads(A[v0, v1]) T.writes(A_reindex_shared[v0, v1]) T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]], "meta_schedule.cooperative_fetch": 4}) A_reindex_shared[v0, v1] = A[v0, v1] for ax0_ax1_fused in range(8192): with T.block("B_reindex_shared"): v0 = T.axis.spatial(128, ax2_0_0 * 64 + ax0_ax1_fused // 128) v1 = T.axis.spatial(128, ax0_ax1_fused % 128) T.reads(B[v0, v1]) T.writes(B_reindex_shared[v0, v1]) T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]], "meta_schedule.cooperative_fetch": 2}) B_reindex_shared[v0, v1] = B[v0, v1] for ax2_0_1 in range(1): for ax0_0, ax1_0 in T.grid(2, 4): with T.block("A_reindex_shared_wmma.matrix_a_o"): v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0) v1_o = T.axis.spatial(8, ax2_0_0 * 4 + ax1_0) T.reads(A_reindex_shared[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_load_16x16x16_f16_a_shared"}) for ax0_1, ax1_1 in T.grid(16, 16): with T.block("A_reindex_shared_wmma.matrix_a"): v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads(A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0_0, ax1_0 in T.grid(4, 4): with T.block("B_reindex_shared_wmma.matrix_b_o"): v0_o = T.axis.spatial(8, ax2_0_0 * 4 + ax0_0) v1_o = T.axis.spatial(8, ax0_0_2_ax1_0_2_fused * 4 + ax1_0) T.reads(B_reindex_shared[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_load_16x16x16_f16_b_shared"}) for ax0_1, ax1_1 in T.grid(16, 16): with T.block("B_reindex_shared_wmma.matrix_b"): v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads(B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(1, 1, 4, 2, 4): with T.block("C_o"): v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0_3 * 2 + ax0_0_4) v1_o = T.axis.spatial(8, ax0_0_2_ax1_0_2_fused * 4 + ax1_0_3 * 4 + ax1_0_4) v2_o = T.axis.reduce(8, ax2_0_0 * 4 + ax2_0_1 * 4 + ax2_0_2) T.reads(A_reindex_shared_wmma_matrix_a[v0_o * 16:v0_o * 16 + 16, v2_o * 16:v2_o * 16 + 16], B_reindex_shared_wmma_matrix_b[v2_o * 16:v2_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 4, v0_o % 2, v1_o % 4, 0:16, 0:16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init": "wmma_fill_16x16x16_f32", "warp_execution": 1}) with T.init(): for ax0_1, ax1_1 in T.grid(16, 16): with T.block("C_init"): v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads() T.writes(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 4, v0_o % 2, v1_o % 4, v0_i_init, v1_i_init]) C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 4, v0_o % 2, v1_o % 4, v0_i_init, v1_i_init] = T.float32(0) for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16): with T.block("C"): v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1]) T.reads(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 4, v0_o % 2, v1_o % 4, v0_i, v1_i], A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i]) T.writes(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 4, v0_o % 2, v1_o % 4, v0_i, v1_i]) T.block_attr({"meta_schedule.tiling_structure": "SSSRRSRS"}) C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 4, v0_o % 2, v1_o % 4, v0_i, v1_i] = C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 4, v0_o % 2, v1_o % 4, v0_i, v1_i] + T.Cast("float32", A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i]) * T.Cast("float32", B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i]) for ax2 in range(2): for ax0_ax1_fused in T.thread_binding(2, thread="threadIdx.y"): for ax2_1, ax3 in T.grid(1, 4): with T.block("C_reindex_shared_wmma.accumulator_o"): v0 = T.axis.spatial(4, ax0_0_0_ax1_0_0_fused * 2 + ax0_0_1_ax1_0_1_fused) v1 = T.axis.spatial(2, ax0_ax1_fused) v2 = T.axis.spatial(2, ax2 + ax2_1) v3 = T.axis.spatial(4, ax3) v4_o = T.axis.spatial(1, 0) v5_o = T.axis.spatial(1, 0) T.reads(C_reindex_shared_wmma_accumulator[v0, v1, v2, v3, 0:16, 0:16]) T.writes(C_reindex_shared[v0, v1, v2, v3, 0:16, 0:16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_store_16x16x16_f32_shared"}) for ax4, ax5 in T.grid(16, 16): with T.block("C_reindex_shared_wmma.accumulator"): v4_i, v5_i = T.axis.remap("SS", [ax4, ax5]) T.reads(C_reindex_shared_wmma_accumulator[v0, v1, v2, v3, v4_i, v5_i]) T.writes(C_reindex_shared[v0, v1, v2, v3, v4_i, v5_i]) C_reindex_shared[v0, v1, v2, v3, v4_i, v5_i] = C_reindex_shared_wmma_accumulator[v0, v1, v2, v3, v4_i, v5_i] for ax0_ax1_ax3_ax4_ax5_fused in range(2048): with T.block("C_reindex_shared"): v0 = T.axis.spatial(4, ax0_0_0_ax1_0_0_fused * 2 + ax0_0_1_ax1_0_1_fused) v1 = T.axis.spatial(2, ax0_ax1_ax3_ax4_ax5_fused // 1024) v2 = T.axis.spatial(2, ax2) v3 = T.axis.spatial(4, ax0_ax1_ax3_ax4_ax5_fused % 1024 // 256) v4 = T.axis.spatial(16, ax0_ax1_ax3_ax4_ax5_fused % 256 // 16) v5 = T.axis.spatial(16, ax0_ax1_ax3_ax4_ax5_fused % 16) T.reads(C_reindex_shared[v0, v1, v2, v3, v4, v5]) T.writes(compute[v4 + v2 * 16 + v0 * 32, v5 + v3 * 16 + v1 * 64]) T.block_attr({"meta_schedule.cooperative_fetch": 4}) compute[v4 + v2 * 16 + v0 * 32, v5 + v3 * 16 + v1 * 64] = T.max(C_reindex_shared[v0, v1, v2, v3, v4, v5], T.float32(0)) # fmt: on decision_0 = [ ("SamplePerfectTile", [2, 2, 1, 1, 2]), ("SamplePerfectTile", [1, 1, 2, 1, 4]), ("SamplePerfectTile", [2, 1, 4]), ("SampleCategorical", 3), ("SampleCategorical", 2), ("SampleCategorical", 1), ] mod = te.create_prim_func( te_workload.matmul_relu( n=128, m=128, k=128, in_dtype="float16", out_dtype="float32", ) ) actual = generate_design_space( kind="cuda", mod=mod, target=tvm.target.Target("cuda --arch=sm_70"), types=None, sch_rules=[ multi_level_tiling_tensor_core(), ] + get_rules( "cuda", ( ms.schedule_rule.MultiLevelTiling, ms.schedule_rule.AutoInline, ), ), ) check_sketches( mod, sketches=actual, expected_mods=[matmul_relu_fallback_0], expected_decisions=[decision_0], ) @pytest.mark.parametrize("shared_scope", ["shared", "shared.dyn"]) def test_conv2d(shared_scope): intrin_suffix = shared_scope.replace(".", "_") # fmt: off @T.prim_func def conv2d_0(inputs: T.Buffer((1, 16, 16, 32), "float16"), weight: T.Buffer((3, 3, 32, 32), "float16"), conv2d_nhwc: T.Buffer((1, 16, 16, 32), "float32")): T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) # with T.block("root"): PadInput = T.alloc_buffer((1, 18, 18, 32), "float16") conv2d_nhwc_reindex_shared_dyn = T.alloc_buffer((16, 2, 1, 1, 16, 16), scope=shared_scope) conv2d_nhwc_reindex_shared_dyn_wmma_accumulator = T.alloc_buffer((16, 2, 1, 1, 16, 16), scope="wmma.accumulator") PadInput_reindex_shared_dyn = T.alloc_buffer((256, 288), "float16", scope=shared_scope) weight_reindex_shared_dyn = T.alloc_buffer((288, 32), "float16", scope=shared_scope) PadInput_reindex_shared_dyn_wmma_matrix_a = T.alloc_buffer((256, 288), "float16", scope="wmma.matrix_a") weight_reindex_shared_dyn_wmma_matrix_b = T.alloc_buffer((288, 32), "float16", scope="wmma.matrix_b") for i0, i1, i2, i3 in T.grid(1, 18, 18, 32): with T.block("PadInput"): v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(1 <= v_i1 and v_i1 < 17 and 1 <= v_i2 and v_i2 < 17, inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3], T.float16(0)) for ax0_0_0_ax1_0_0_fused in T.thread_binding(2, thread="blockIdx.y"): for ax0_0_1_ax1_0_1_fused in T.thread_binding(16, thread="blockIdx.x"): for ax0_0_2_ax1_0_2_fused in T.thread_binding(1, thread="threadIdx.y"): for ax2_0_0 in range(1): for ax0_ax1_fused in range(4608): with T.block("PadInput_reindex_shared.dyn"): v0 = T.axis.spatial(256, ax0_0_1_ax1_0_1_fused * 16 + ax0_ax1_fused // 288) v1 = T.axis.spatial(288, ax0_ax1_fused % 288) T.reads(PadInput[0, v0 // 16 + v1 // 96, v0 % 16 + v1 % 96 // 32, v1 % 32]) T.writes(PadInput_reindex_shared_dyn[v0, v1]) T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]], "meta_schedule.cooperative_fetch": 2}) PadInput_reindex_shared_dyn[v0, v1] = PadInput[0, v0 // 16 + v1 // 96, v0 % 16 + v1 % 96 // 32, v1 % 32] for ax0_ax1_fused in range(4608): with T.block("weight_reindex_shared.dyn"): v0 = T.axis.spatial(288, ax0_ax1_fused // 16) v1 = T.axis.spatial(32, ax0_0_0_ax1_0_0_fused * 16 + ax0_ax1_fused % 16) T.reads(weight[v0 // 96, v0 % 96 // 32, v0 % 32, v1]) T.writes(weight_reindex_shared_dyn[v0, v1]) T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]], "meta_schedule.cooperative_fetch": 8}) weight_reindex_shared_dyn[v0, v1] = weight[v0 // 96, v0 % 96 // 32, v0 % 32, v1] for ax2_0_1 in range(18): for ax0_0, ax1_0 in T.grid(1, 1): with T.block("PadInput_reindex_shared.dyn_wmma.matrix_a_o"): v0_o = T.axis.spatial(16, ax0_0_1_ax1_0_1_fused + ax0_0) v1_o = T.axis.spatial(18, ax2_0_1 + ax1_0) T.reads(PadInput_reindex_shared_dyn[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(PadInput_reindex_shared_dyn_wmma_matrix_a[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": f"wmma_load_16x16x16_f16_a_{intrin_suffix}"}) for ax0_1, ax1_1 in T.grid(16, 16): with T.block("PadInput_reindex_shared.dyn_wmma.matrix_a"): v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads(PadInput_reindex_shared_dyn[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(PadInput_reindex_shared_dyn_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) PadInput_reindex_shared_dyn_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = PadInput_reindex_shared_dyn[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0_0, ax1_0 in T.grid(1, 1): with T.block("weight_reindex_shared.dyn_wmma.matrix_b_o"): v0_o = T.axis.spatial(18, ax2_0_1 + ax0_0) v1_o = T.axis.spatial(2, ax0_0_0_ax1_0_0_fused + ax1_0) T.reads(weight_reindex_shared_dyn[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(weight_reindex_shared_dyn_wmma_matrix_b[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": f"wmma_load_16x16x16_f16_b_{intrin_suffix}"}) for ax0_1, ax1_1 in T.grid(16, 16): with T.block("weight_reindex_shared.dyn_wmma.matrix_b"): v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads(weight_reindex_shared_dyn[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(weight_reindex_shared_dyn_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) weight_reindex_shared_dyn_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = weight_reindex_shared_dyn[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(1, 1, 1, 1, 1): with T.block("conv2d_nhwc_o"): v0_o = T.axis.spatial(16, ax0_0_1_ax1_0_1_fused + ax0_0_3 + ax0_0_4) v1_o = T.axis.spatial(2, ax0_0_0_ax1_0_0_fused + ax1_0_3 + ax1_0_4) v2_o = T.axis.reduce(18, ax2_0_0 * 18 + ax2_0_1 + ax2_0_2) T.reads(PadInput_reindex_shared_dyn_wmma_matrix_a[v0_o * 16:v0_o * 16 + 16, v2_o * 16:v2_o * 16 + 16], weight_reindex_shared_dyn_wmma_matrix_b[v2_o * 16:v2_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(conv2d_nhwc_reindex_shared_dyn_wmma_accumulator[v0_o, v1_o, 0, 0, 0:16, 0:16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init": "wmma_fill_16x16x16_f32", "warp_execution": 1}) with T.init(): for ax0_1, ax1_1 in T.grid(16, 16): with T.block("conv2d_nhwc_init"): v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads() T.writes(conv2d_nhwc_reindex_shared_dyn_wmma_accumulator[v0_o, v1_o, 0, 0, v0_i_init, v1_i_init]) conv2d_nhwc_reindex_shared_dyn_wmma_accumulator[v0_o, v1_o, 0, 0, v0_i_init, v1_i_init] = T.float32(0) for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16): with T.block("conv2d_nhwc"): v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1]) T.reads(conv2d_nhwc_reindex_shared_dyn_wmma_accumulator[v0_o, v1_o, 0, 0, v0_i, v1_i], PadInput_reindex_shared_dyn_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], weight_reindex_shared_dyn_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i]) T.writes(conv2d_nhwc_reindex_shared_dyn_wmma_accumulator[v0_o, v1_o, 0, 0, v0_i, v1_i]) T.block_attr({"meta_schedule.tiling_structure": "SSSRRSRS"}) conv2d_nhwc_reindex_shared_dyn_wmma_accumulator[v0_o, v1_o, 0, 0, v0_i, v1_i] = conv2d_nhwc_reindex_shared_dyn_wmma_accumulator[v0_o, v1_o, 0, 0, v0_i, v1_i] + T.Cast("float32", PadInput_reindex_shared_dyn_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i]) * T.Cast("float32", weight_reindex_shared_dyn_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i]) for ax2 in range(1): for ax0_ax1_fused in T.thread_binding(1, thread="threadIdx.y"): for ax2_1, ax3 in T.grid(1, 1): with T.block("conv2d_nhwc_reindex_shared.dyn_wmma.accumulator_o"): v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0_0_1_ax1_0_1_fused, ax0_0_0_ax1_0_0_fused, ax2_1, ax3]) v4_o = T.axis.spatial(1, 0) v5_o = T.axis.spatial(1, 0) T.reads(conv2d_nhwc_reindex_shared_dyn_wmma_accumulator[v0, v1, v2, v3, 0:16, 0:16]) T.writes(conv2d_nhwc_reindex_shared_dyn[v0, v1, v2, v3, 0:16, 0:16]) T.block_attr({"meta_schedule.auto_tensorize": f"wmma_store_16x16x16_f32_{intrin_suffix}"}) for ax4, ax5 in T.grid(16, 16): with T.block("conv2d_nhwc_reindex_shared.dyn_wmma.accumulator"): v4_i, v5_i = T.axis.remap("SS", [ax4, ax5]) T.reads(conv2d_nhwc_reindex_shared_dyn_wmma_accumulator[v0, v1, v2, v3, v4_i, v5_i]) T.writes(conv2d_nhwc_reindex_shared_dyn[v0, v1, v2, v3, v4_i, v5_i]) conv2d_nhwc_reindex_shared_dyn[v0, v1, v2, v3, v4_i, v5_i] = conv2d_nhwc_reindex_shared_dyn_wmma_accumulator[v0, v1, v2, v3, v4_i, v5_i] for ax0_ax1_ax3_ax4_ax5_fused in range(256): with T.block("conv2d_nhwc_reindex_shared.dyn"): v0, v1, v2 = T.axis.remap("SSS", [ax0_0_1_ax1_0_1_fused, ax0_0_0_ax1_0_0_fused, ax2]) v3 = T.axis.spatial(1, 0) v4 = T.axis.spatial(16, ax0_ax1_ax3_ax4_ax5_fused // 16) v5 = T.axis.spatial(16, ax0_ax1_ax3_ax4_ax5_fused % 16) T.reads(conv2d_nhwc_reindex_shared_dyn[v0, v1, v2, v3, v4, v5]) T.writes(conv2d_nhwc[0, (v4 + v0 * 16) // 16, (v4 + v0 * 16) % 16, v5 + v1 * 16]) T.block_attr({"meta_schedule.cooperative_fetch": 3}) conv2d_nhwc[0, (v4 + v0 * 16) // 16, (v4 + v0 * 16) % 16, v5 + v1 * 16] = conv2d_nhwc_reindex_shared_dyn[v0, v1, v2, v3, v4, v5] # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 16, 1, 1, 1]), ("SamplePerfectTile", [2, 1, 1, 1, 1]), ("SamplePerfectTile", [1, 18, 1]), ("SampleCategorical", 2), ("SampleCategorical", 1), ("SampleCategorical", 3), ] mod = te.create_prim_func( te_workload.conv2d_nhwc( N=1, H=16, W=16, CI=32, CO=32, kernel_size=3, stride=1, padding=1, in_dtype="float16", out_dtype="float32", ) ) actual = generate_design_space( kind="cuda", mod=mod, target=tvm.target.Target("cuda --arch=sm_70"), types=None, sch_rules=[ multi_level_tiling_tensor_core( read_reuse_scope=shared_scope, write_reuse_scope=shared_scope ), ], ) check_sketches( mod, sketches=actual, expected_mods=[conv2d_0], expected_decisions=[decision_0], ) # Test adding inapplicable tensor intrinsics doesn't change the search space # This test case uses the same workload, decision and the expected sketch as above actual = generate_design_space( kind="cuda", mod=mod, target=tvm.target.Target("cuda --arch=sm_70"), types=None, sch_rules=[ multi_level_tiling_tensor_core( read_reuse_scope=shared_scope, write_reuse_scope=shared_scope, in_dtype="float16", out_dtype=["float16", "float32"], ), ], ) check_sketches( mod, sketches=actual, expected_mods=[conv2d_0], expected_decisions=[decision_0], ) @pytest.mark.parametrize("shared_scope", ["shared", "shared.dyn"]) def test_matmul_relu_pipeline(shared_scope): intrin_suffix = shared_scope.replace(".", "_") # fmt: off @T.prim_func def matmul_relu_pipeline_0(A: T.Buffer((128, 128), "float16"), B: T.Buffer((128, 128), "float16"), compute: T.Buffer((128, 128), "float32")) -> None: # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) # body # with T.block("root") C = T.alloc_buffer((128, 128)) C_reindex_shared = T.alloc_buffer((4, 4, 2, 2, 16, 16), scope=shared_scope) C_reindex_shared_wmma_accumulator = T.alloc_buffer((4, 4, 2, 2, 16, 16), scope="wmma.accumulator") A_reindex_shared = T.alloc_buffer((128, 128), "float16", scope=shared_scope) B_reindex_shared = T.alloc_buffer((128, 128), "float16", scope=shared_scope) A_reindex_shared_wmma_matrix_a = T.alloc_buffer((128, 128), "float16", scope="wmma.matrix_a") B_reindex_shared_wmma_matrix_b = T.alloc_buffer((128, 128), "float16", scope="wmma.matrix_b") for ax0_0_0_ax1_0_0_fused in T.thread_binding(1, thread="blockIdx.y"): for ax0_0_1_ax1_0_1_fused in T.thread_binding(16, thread="blockIdx.x"): for ax0_0_2_ax1_0_2_fused in T.thread_binding(1, thread="threadIdx.y"): for ax2_0_0 in T.serial(4, annotations={"software_pipeline_order": [0, 3, 1, 4, 5, 2, 6], "software_pipeline_stage": [0, 0, 0, 0, 0, 1, 1]}): for ax0_ax1_fused in range(1024): with T.block("A_reindex_shared"): v0 = T.axis.spatial(128, ax0_0_1_ax1_0_1_fused // 4 * 32 + ax0_ax1_fused // 32) v1 = T.axis.spatial(128, ax2_0_0 * 32 + ax0_ax1_fused % 32) T.reads(A[v0, v1]) T.writes(A_reindex_shared[v0, v1]) T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]], "double_buffer_scope": 0, "meta_schedule.cooperative_fetch": 4, "tir.manifest_shared_memory_local_stage": 1}) A_reindex_shared[v0, v1] = A[v0, v1] for ax0_ax1_fused in range(1024): with T.block("B_reindex_shared"): v0 = T.axis.spatial(128, ax2_0_0 * 32 + ax0_ax1_fused // 32) v1 = T.axis.spatial(128, ax0_0_1_ax1_0_1_fused % 4 * 32 + ax0_ax1_fused % 32) T.reads(B[v0, v1]) T.writes(B_reindex_shared[v0, v1]) T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]], "double_buffer_scope": 0, "meta_schedule.cooperative_fetch": 2, "tir.manifest_shared_memory_local_stage": 1}) B_reindex_shared[v0, v1] = B[v0, v1] for ax2_0_1 in T.serial(2, annotations={"software_pipeline_order": [0, 1, 2], "software_pipeline_stage": [0, 0, 1]}): for ax0_0, ax1_0 in T.grid(2, 1): with T.block("A_reindex_shared_wmma.matrix_a_o"): v0_o = T.axis.spatial(8, ax0_0_1_ax1_0_1_fused // 4 * 2 + ax0_0) v1_o = T.axis.spatial(8, ax2_0_0 * 2 + ax2_0_1 + ax1_0) T.reads(A_reindex_shared[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": f"wmma_load_16x16x16_f16_a_{intrin_suffix}"}) for ax0_1, ax1_1 in T.grid(16, 16): with T.block("A_reindex_shared_wmma.matrix_a"): v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads(A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0_0, ax1_0 in T.grid(1, 2): with T.block("B_reindex_shared_wmma.matrix_b_o"): v0_o = T.axis.spatial(8, ax2_0_0 * 2 + ax2_0_1 + ax0_0) v1_o = T.axis.spatial(8, ax0_0_1_ax1_0_1_fused % 4 * 2 + ax1_0) T.reads(B_reindex_shared[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": f"wmma_load_16x16x16_f16_b_{intrin_suffix}"}) for ax0_1, ax1_1 in T.grid(16, 16): with T.block("B_reindex_shared_wmma.matrix_b"): v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads(B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(1, 1, 1, 2, 2): with T.block("C_o"): v0_o = T.axis.spatial(8, ax0_0_1_ax1_0_1_fused // 4 * 2 + ax0_0_3 * 2 + ax0_0_4) v1_o = T.axis.spatial(8, ax0_0_1_ax1_0_1_fused % 4 * 2 + ax1_0_3 * 2 + ax1_0_4) v2_o = T.axis.reduce(8, ax2_0_0 * 2 + ax2_0_1 + ax2_0_2) T.reads(A_reindex_shared_wmma_matrix_a[v0_o * 16:v0_o * 16 + 16, v2_o * 16:v2_o * 16 + 16], B_reindex_shared_wmma_matrix_b[v2_o * 16:v2_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 2, v0_o % 2, v1_o % 2, 0:16, 0:16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init": "wmma_fill_16x16x16_f32", "warp_execution": 1}) with T.init(): for ax0_1, ax1_1 in T.grid(16, 16): with T.block("C_init"): v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads() T.writes(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 2, v0_o % 2, v1_o % 2, v0_i_init, v1_i_init]) C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 2, v0_o % 2, v1_o % 2, v0_i_init, v1_i_init] = T.float32(0) for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16): with T.block("C"): v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1]) T.reads(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 2, v0_o % 2, v1_o % 2, v0_i, v1_i], A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i]) T.writes(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 2, v0_o % 2, v1_o % 2, v0_i, v1_i]) T.block_attr({"meta_schedule.tiling_structure": "SSSRRSRS"}) C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 2, v0_o % 2, v1_o % 2, v0_i, v1_i] = C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o // 2, v0_o % 2, v1_o % 2, v0_i, v1_i] + T.Cast("float32", A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i]) * T.Cast("float32", B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i]) for ax2 in range(2): for ax0_ax1_fused in T.thread_binding(1, thread="threadIdx.y"): for ax2_1, ax3 in T.grid(1, 2): with T.block("C_reindex_shared_wmma.accumulator_o"): v0 = T.axis.spatial(4, ax0_0_1_ax1_0_1_fused // 4) v1 = T.axis.spatial(4, ax0_0_1_ax1_0_1_fused % 4) v2 = T.axis.spatial(2, ax2 + ax2_1) v3 = T.axis.spatial(2, ax3) v4_o = T.axis.spatial(1, 0) v5_o = T.axis.spatial(1, 0) T.reads(C_reindex_shared_wmma_accumulator[v0, v1, v2, v3, 0:16, 0:16]) T.writes(C_reindex_shared[v0, v1, v2, v3, 0:16, 0:16]) T.block_attr({"meta_schedule.auto_tensorize": f"wmma_store_16x16x16_f32_{intrin_suffix}"}) for ax4, ax5 in T.grid(16, 16): with T.block("C_reindex_shared_wmma.accumulator"): v4_i, v5_i = T.axis.remap("SS", [ax4, ax5]) T.reads(C_reindex_shared_wmma_accumulator[v0, v1, v2, v3, v4_i, v5_i]) T.writes(C_reindex_shared[v0, v1, v2, v3, v4_i, v5_i]) C_reindex_shared[v0, v1, v2, v3, v4_i, v5_i] = C_reindex_shared_wmma_accumulator[v0, v1, v2, v3, v4_i, v5_i] for ax0_ax1_ax3_ax4_ax5_fused in range(512): with T.block("C_reindex_shared"): v0 = T.axis.spatial(4, ax0_0_1_ax1_0_1_fused // 4) v1 = T.axis.spatial(4, ax0_0_1_ax1_0_1_fused % 4) v2 = T.axis.spatial(2, ax2) v3 = T.axis.spatial(2, ax0_ax1_ax3_ax4_ax5_fused // 256) v4 = T.axis.spatial(16, ax0_ax1_ax3_ax4_ax5_fused % 256 // 16) v5 = T.axis.spatial(16, ax0_ax1_ax3_ax4_ax5_fused % 16) T.reads(C_reindex_shared[v0, v1, v2, v3, v4, v5]) T.writes(C[v4 + v2 * 16 + v0 * 32, v5 + v3 * 16 + v1 * 32]) T.block_attr({"meta_schedule.cooperative_fetch": 3}) C[v4 + v2 * 16 + v0 * 32, v5 + v3 * 16 + v1 * 32] = C_reindex_shared[v0, v1, v2, v3, v4, v5] for i0, i1 in T.grid(128, 128): with T.block("compute"): v_i0, v_i1 = T.axis.remap("SS", [i0, i1]) T.reads(C[v_i0, v_i1]) T.writes(compute[v_i0, v_i1]) compute[v_i0, v_i1] = T.max(C[v_i0, v_i1], T.float32(0)) # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 4, 1, 1, 2]), ("SamplePerfectTile", [1, 4, 1, 1, 2]), ("SamplePerfectTile", [4, 2, 1]), ("SampleCategorical", 2), ("SampleCategorical", 2), ("SampleCategorical", 1), ] mod = te.create_prim_func( te_workload.matmul_relu( n=128, m=128, k=128, in_dtype="float16", out_dtype="float32", ) ) actual = generate_design_space( kind="cuda", mod=mod, target=tvm.target.Target("cuda --arch=sm_70"), types=None, sch_rules=[ multi_level_tiling_tensor_core( read_reuse_scope=shared_scope, write_reuse_scope=shared_scope, use_software_pipeline=True, ), ], ) check_sketches( mod, sketches=actual, expected_mods=[matmul_relu_pipeline_0], expected_decisions=[decision_0], ) def test_matmul_relu_non_tensorizable(): # expected to do nothing on non-tensorizable workloads mod = te.create_prim_func( te_workload.matmul_relu( # dtype doesn't match tensor intrin n=128, m=128, k=128, ) ) (sch,) = generate_design_space( kind="cuda", mod=mod, target=tvm.target.Target("cuda --arch=sm_70"), types=None, sch_rules=[multi_level_tiling_tensor_core(write_reuse_scope="shared")] + get_rules("cuda", ms.schedule_rule.AutoInline), ) tvm.ir.assert_structural_equal(mod, sch.mod["main"]) def test_padded_matmul_relu(): # fmt: off @T.prim_func def padded_matmul_relu_0(A: T.Buffer((127, 127), "float16"), B: T.Buffer((127, 127), "float16"), compute: T.Buffer((127, 127), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) C_reindex_shared = T.alloc_buffer((4, 8, 2, 1, 16, 16), scope="shared") C_reindex_shared_wmma_accumulator = T.alloc_buffer((4, 8, 2, 1, 16, 16), scope="wmma.accumulator") A_reindex_shared = T.alloc_buffer((128, 128), "float16", scope="shared") B_reindex_shared = T.alloc_buffer((128, 128), "float16", scope="shared") A_reindex_shared_wmma_matrix_a = T.alloc_buffer((128, 128), "float16", scope="wmma.matrix_a") B_reindex_shared_wmma_matrix_b = T.alloc_buffer((128, 128), "float16", scope="wmma.matrix_b") for ax0_0_0_ax1_0_0_fused in T.thread_binding(8, thread="blockIdx.y"): for ax0_0_1_ax1_0_1_fused in T.thread_binding(2, thread="blockIdx.x"): for ax0_0_2_ax1_0_2_fused in T.thread_binding(2, thread="threadIdx.y"): for ax2_0_0 in range(1): for ax0_ax1_fused in range(4096): with T.block("A_reindex_shared"): v0 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused // 2 * 32 + ax0_ax1_fused // 128) v1 = T.axis.spatial(128, ax0_ax1_fused % 128) T.reads(A[v0, v1]) T.writes(A_reindex_shared[v0, v1]) T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]], "meta_schedule.cooperative_fetch": 8}) A_reindex_shared[v0, v1] = T.if_then_else(v0 < 127 and v1 < 127, A[v0, v1], T.float16(0)) for ax0_ax1_fused in range(4096): with T.block("B_reindex_shared"): v0 = T.axis.spatial(128, ax0_ax1_fused // 32) v1 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused % 2 * 64 + ax0_0_1_ax1_0_1_fused * 32 + ax0_ax1_fused % 32) T.reads(B[v0, v1]) T.writes(B_reindex_shared[v0, v1]) T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]], "meta_schedule.cooperative_fetch": 1}) B_reindex_shared[v0, v1] = T.if_then_else(v0 < 127 and v1 < 127, B[v0, v1], T.float16(0)) for ax2_0_1 in range(4): for ax0_0, ax1_0 in T.grid(2, 2): with T.block("A_reindex_shared_wmma.matrix_a_o"): v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused // 2 * 2 + ax0_0) v1_o = T.axis.spatial(8, ax2_0_1 * 2 + ax1_0) T.reads(A_reindex_shared[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_load_16x16x16_f16_a_shared"}) for ax0_1, ax1_1 in T.grid(16, 16): with T.block("A_reindex_shared_wmma.matrix_a"): v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads(A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = A_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0_0, ax1_0 in T.grid(2, 1): with T.block("B_reindex_shared_wmma.matrix_b_o"): v0_o = T.axis.spatial(8, ax2_0_1 * 2 + ax0_0) v1_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0_2_ax1_0_2_fused + ax1_0) T.reads(B_reindex_shared[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_load_16x16x16_f16_b_shared"}) for ax0_1, ax1_1 in T.grid(16, 16): with T.block("B_reindex_shared_wmma.matrix_b"): v0_i, v1_i = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads(B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) B_reindex_shared_wmma_matrix_b[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = B_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(1, 1, 2, 2, 1): with T.block("C_o"): v0_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused // 2 * 2 + ax0_0_3 * 2 + ax0_0_4) v1_o = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_0_2_ax1_0_2_fused + ax1_0_3 + ax1_0_4) v2_o = T.axis.reduce(8, ax2_0_0 * 8 + ax2_0_1 * 2 + ax2_0_2) T.reads(A_reindex_shared_wmma_matrix_a[v0_o * 16:v0_o * 16 + 16, v2_o * 16:v2_o * 16 + 16], B_reindex_shared_wmma_matrix_b[v2_o * 16:v2_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, 0:16, 0:16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init": "wmma_fill_16x16x16_f32", "warp_execution": 1}) with T.init(): for ax0_1, ax1_1 in T.grid(16, 16): with T.block("C_init"): v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1]) T.reads() T.writes(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, v0_i_init, v1_i_init]) C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, v0_i_init, v1_i_init] = T.float32(0) for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16): with T.block("C"): v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1]) T.reads(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, v0_i, v1_i], A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i], B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i]) T.writes(C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, v0_i, v1_i]) T.block_attr({"meta_schedule.tiling_structure": "SSSRRSRS"}) C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, v0_i, v1_i] = C_reindex_shared_wmma_accumulator[v0_o // 2, v1_o, v0_o % 2, 0, v0_i, v1_i] + T.Cast("float32", A_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v2_o * 16 + v2_i]) * T.Cast("float32", B_reindex_shared_wmma_matrix_b[v2_o * 16 + v2_i, v1_o * 16 + v1_i]) for ax2 in range(2): for ax0_ax1_fused in T.thread_binding(2, thread="threadIdx.y"): for ax2_1, ax3 in T.grid(1, 1): with T.block("C_reindex_shared_wmma.accumulator_o"): v0 = T.axis.spatial(4, ax0_0_0_ax1_0_0_fused // 2) v1 = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_ax1_fused) v2 = T.axis.spatial(2, ax2 + ax2_1) v3 = T.axis.spatial(1, ax3) v4_o = T.axis.spatial(1, 0) v5_o = T.axis.spatial(1, 0) T.reads(C_reindex_shared_wmma_accumulator[v0, v1, v2, v3, 0:16, 0:16]) T.writes(C_reindex_shared[v0, v1, v2, v3, 0:16, 0:16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_store_16x16x16_f32_shared"}) for ax4, ax5 in T.grid(16, 16): with T.block("C_reindex_shared_wmma.accumulator"): v4_i, v5_i = T.axis.remap("SS", [ax4, ax5]) T.reads(C_reindex_shared_wmma_accumulator[v0, v1, v2, v3, v4_i, v5_i]) T.writes(C_reindex_shared[v0, v1, v2, v3, v4_i, v5_i]) C_reindex_shared[v0, v1, v2, v3, v4_i, v5_i] = C_reindex_shared_wmma_accumulator[v0, v1, v2, v3, v4_i, v5_i] for ax0_ax1_ax3_ax4_ax5_fused in range(512): with T.block("C_reindex_shared"): v0 = T.axis.spatial(4, ax0_0_0_ax1_0_0_fused // 2) v1 = T.axis.spatial(8, ax0_0_0_ax1_0_0_fused % 2 * 4 + ax0_0_1_ax1_0_1_fused * 2 + ax0_ax1_ax3_ax4_ax5_fused // 256) v2 = T.axis.spatial(2, ax2) v3 = T.axis.spatial(1, 0) v4 = T.axis.spatial(16, ax0_ax1_ax3_ax4_ax5_fused % 256 // 16) v5 = T.axis.spatial(16, ax0_ax1_ax3_ax4_ax5_fused % 16) T.reads(C_reindex_shared[v0, v1, v2, v3, v4, v5]) T.writes(compute[v4 + v2 * 16 + v0 * 32, v5 + v1 * 16]) T.block_attr({"meta_schedule.cooperative_fetch": 4}) if v0 * 32 + v2 * 16 + v4 < 127 and v1 * 16 + v5 < 127: compute[v4 + v2 * 16 + v0 * 32, v5 + v1 * 16] = T.max(C_reindex_shared[v0, v1, v2, v3, v4, v5], T.float32(0)) # fmt: on decision_0 = [ ("SamplePerfectTile", [4, 1, 1, 1, 2]), ("SamplePerfectTile", [2, 2, 2, 1, 1]), ("SamplePerfectTile", [1, 4, 2]), ("SampleCategorical", 3), ("SampleCategorical", 3), ("SampleCategorical", 0), ] mod = te.create_prim_func( te_workload.matmul_relu( n=127, m=127, k=127, in_dtype="float16", out_dtype="float32", ) ) actual = generate_design_space( kind="cuda", mod=mod, target=tvm.target.Target("cuda --arch=sm_70"), types=None, sch_rules=[multi_level_tiling_tensor_core(write_reuse_scope="shared")] + get_rules("cuda", ms.schedule_rule.AutoInline), ) check_sketches( mod, sketches=actual, expected_mods=[padded_matmul_relu_0], expected_decisions=[decision_0], ) def test_conv_1x1(): # fmt: off @T.prim_func def conv2d_1x1_0(inputs: T.Buffer((1, 16, 16, 64), "float16"), weight: T.Buffer((1, 1, 64, 64), "float16"), conv2d_nhwc: T.Buffer((1, 16, 16, 64), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) conv2d_nhwc_reindex_shared = T.alloc_buffer((16, 4, 1, 1, 16, 16), scope="shared") conv2d_nhwc_reindex_shared_wmma_accumulator = T.alloc_buffer((16, 4, 1, 1, 16, 16), scope="wmma.accumulator") PadInput_reindex_shared = T.alloc_buffer((256, 64), "float16", scope="shared") weight_reindex_shared = T.alloc_buffer((1, 1, 64, 64), "float16", scope="shared") PadInput_reindex_shared_wmma_matrix_a = T.alloc_buffer((256, 64), "float16", scope="wmma.matrix_a") weight_reindex_shared_wmma_matrix_b = T.alloc_buffer((1, 1, 64, 64), "float16", scope="wmma.matrix_b") for ax2_0_0_ax3_0_0_fused in T.thread_binding(16, thread="blockIdx.y"): for ax2_0_1_ax3_0_1_fused in T.thread_binding(2, thread="blockIdx.x"): for ax2_0_2_ax3_0_2_fused in T.thread_binding(2, thread="threadIdx.y"): for ax0_0, ax1_0, ax4_0_0 in T.grid(1, 1, 1): for ax0_ax1_fused in range(1024): with T.block("PadInput_reindex_shared"): v0 = T.axis.spatial(256, ax2_0_0_ax3_0_0_fused // 2 * 32 + ax2_0_1_ax3_0_1_fused * 16 + ax0_ax1_fused // 64) v1 = T.axis.spatial(64, ax0_ax1_fused % 64) T.reads(inputs[0, v0 // 16, v0 % 16, v1]) T.writes(PadInput_reindex_shared[v0, v1]) T.block_attr({"buffer_dim_align": [[0, 0, 32, 8]], "meta_schedule.cooperative_fetch": 1}) PadInput_reindex_shared[v0, v1] = inputs[0, v0 // 16, v0 % 16, v1] for ax0_ax1_ax2_ax3_fused in range(2048): with T.block("weight_reindex_shared"): v0 = T.axis.spatial(1, 0) v1 = T.axis.spatial(1, 0) v2 = T.axis.spatial(64, ax0_ax1_ax2_ax3_fused // 32) v3 = T.axis.spatial(64, ax2_0_0_ax3_0_0_fused % 2 * 32 + ax0_ax1_ax2_ax3_fused % 32) T.reads(weight[v0, v1, v2, v3]) T.writes(weight_reindex_shared[v0, v1, v2, v3]) T.block_attr({"buffer_dim_align": [[0, 2, 32, 8]], "meta_schedule.cooperative_fetch": 4}) weight_reindex_shared[v0, v1, v2, v3] = weight[v0, v1, v2, v3] for ax0_1, ax1_1, ax4_0_1 in T.grid(1, 1, 1): for ax0_0_1, ax1_0_1 in T.grid(1, 4): with T.block("PadInput_reindex_shared_wmma.matrix_a_o"): v0_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused // 2 * 2 + ax2_0_1_ax3_0_1_fused + ax0_0_1) v1_o = T.axis.spatial(4, ax1_0_1) T.reads(PadInput_reindex_shared[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.writes(PadInput_reindex_shared_wmma_matrix_a[v0_o * 16:v0_o * 16 + 16, v1_o * 16:v1_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_load_16x16x16_f16_a_shared"}) for ax0_1_1, ax1_1_1 in T.grid(16, 16): with T.block("PadInput_reindex_shared_wmma.matrix_a"): v0_i, v1_i = T.axis.remap("SS", [ax0_1_1, ax1_1_1]) T.reads(PadInput_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) T.writes(PadInput_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i]) PadInput_reindex_shared_wmma_matrix_a[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = PadInput_reindex_shared[v0_o * 16 + v0_i, v1_o * 16 + v1_i] for ax0, ax1, ax2_0, ax3_0 in T.grid(1, 1, 4, 1): with T.block("weight_reindex_shared_wmma.matrix_b_o"): v0, v1, v2_o = T.axis.remap("SSS", [ax0, ax1, ax2_0]) v3_o = T.axis.spatial(4, ax2_0_0_ax3_0_0_fused % 2 * 2 + ax2_0_2_ax3_0_2_fused + ax3_0) T.reads(weight_reindex_shared[v0, v1, v2_o * 16:v2_o * 16 + 16, v3_o * 16:v3_o * 16 + 16]) T.writes(weight_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16:v2_o * 16 + 16, v3_o * 16:v3_o * 16 + 16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_load_16x16x16_f16_b_shared"}) for ax2_1, ax3_1 in T.grid(16, 16): with T.block("weight_reindex_shared_wmma.matrix_b"): v2_i, v3_i = T.axis.remap("SS", [ax2_1, ax3_1]) T.reads(weight_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i]) T.writes(weight_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i]) weight_reindex_shared_wmma_matrix_b[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i] = weight_reindex_shared[v0, v1, v2_o * 16 + v2_i, v3_o * 16 + v3_i] for ax2_0_3, ax3_0_3, ax0_2, ax1_2, ax4_0_2, ax2_0_4, ax3_0_4 in T.grid(1, 1, 1, 1, 4, 1, 1): with T.block("conv2d_nhwc_o"): v0 = T.axis.reduce(1, ax0_0 + ax0_1 + ax0_2) v1 = T.axis.reduce(1, ax1_0 + ax1_1 + ax1_2) v2_o = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused // 2 * 2 + ax2_0_1_ax3_0_1_fused + ax2_0_3 + ax2_0_4) v3_o = T.axis.spatial(4, ax2_0_0_ax3_0_0_fused % 2 * 2 + ax2_0_2_ax3_0_2_fused + ax3_0_3 + ax3_0_4) v4_o = T.axis.reduce(4, ax4_0_0 * 4 + ax4_0_1 * 4 + ax4_0_2) T.reads(PadInput_reindex_shared_wmma_matrix_a[v2_o * 16:v2_o * 16 + 16, v4_o * 16:v4_o * 16 + 16], weight_reindex_shared_wmma_matrix_b[v0, v1, v4_o * 16:v4_o * 16 + 16, v3_o * 16:v3_o * 16 + 16]) T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o, v3_o, 0, 0, 0:16, 0:16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init": "wmma_fill_16x16x16_f32", "warp_execution": 1}) with T.init(): for ax2_1, ax3_1 in T.grid(16, 16): with T.block("conv2d_nhwc_init"): v2_i_init, v3_i_init = T.axis.remap("SS", [ax2_1, ax3_1]) T.reads() T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o, v3_o, 0, 0, v2_i_init, v3_i_init]) conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o, v3_o, 0, 0, v2_i_init, v3_i_init] = T.float32(0) for ax2_1, ax3_1, ax4_1 in T.grid(16, 16, 16): with T.block("conv2d_nhwc"): v2_i, v3_i, v4_i = T.axis.remap("SSR", [ax2_1, ax3_1, ax4_1]) T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o, v3_o, 0, 0, v2_i, v3_i], PadInput_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i], weight_reindex_shared_wmma_matrix_b[v0, v1, v4_o * 16 + v4_i, v3_o * 16 + v3_i]) T.writes(conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o, v3_o, 0, 0, v2_i, v3_i]) T.block_attr({"meta_schedule.tiling_structure": "SSSRRSRS"}) conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o, v3_o, 0, 0, v2_i, v3_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v2_o, v3_o, 0, 0, v2_i, v3_i] + T.Cast("float32", PadInput_reindex_shared_wmma_matrix_a[v2_o * 16 + v2_i, v4_o * 16 + v4_i]) * T.Cast("float32", weight_reindex_shared_wmma_matrix_b[v0, v1, v4_o * 16 + v4_i, v3_o * 16 + v3_i]) for ax2 in range(1): for ax0_ax1_fused in T.thread_binding(2, thread="threadIdx.y"): for ax2_1, ax3 in T.grid(1, 1): with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator_o"): v0 = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused // 2 * 2 + ax2_0_1_ax3_0_1_fused) v1 = T.axis.spatial(4, ax2_0_0_ax3_0_0_fused % 2 * 2 + ax0_ax1_fused) v2, v3 = T.axis.remap("SS", [ax2_1, ax3]) v4_o = T.axis.spatial(1, 0) v5_o = T.axis.spatial(1, 0) T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0, v1, v2, v3, 0:16, 0:16]) T.writes(conv2d_nhwc_reindex_shared[v0, v1, v2, v3, 0:16, 0:16]) T.block_attr({"meta_schedule.auto_tensorize": "wmma_store_16x16x16_f32_shared"}) for ax4, ax5 in T.grid(16, 16): with T.block("conv2d_nhwc_reindex_shared_wmma.accumulator"): v4_i, v5_i = T.axis.remap("SS", [ax4, ax5]) T.reads(conv2d_nhwc_reindex_shared_wmma_accumulator[v0, v1, v2, v3, v4_i, v5_i]) T.writes(conv2d_nhwc_reindex_shared[v0, v1, v2, v3, v4_i, v5_i]) conv2d_nhwc_reindex_shared[v0, v1, v2, v3, v4_i, v5_i] = conv2d_nhwc_reindex_shared_wmma_accumulator[v0, v1, v2, v3, v4_i, v5_i] for ax0_ax1_ax3_ax4_ax5_fused in range(512): with T.block("conv2d_nhwc_reindex_shared"): v0 = T.axis.spatial(16, ax2_0_0_ax3_0_0_fused // 2 * 2 + ax2_0_1_ax3_0_1_fused) v1 = T.axis.spatial(4, ax2_0_0_ax3_0_0_fused % 2 * 2 + ax0_ax1_ax3_ax4_ax5_fused // 256) v2 = T.axis.spatial(1, ax2) v3 = T.axis.spatial(1, 0) v4 = T.axis.spatial(16, ax0_ax1_ax3_ax4_ax5_fused % 256 // 16) v5 = T.axis.spatial(16, ax0_ax1_ax3_ax4_ax5_fused % 16) T.reads(conv2d_nhwc_reindex_shared[v0, v1, v2, v3, v4, v5]) T.writes(conv2d_nhwc[0, (v4 + v0 * 16) // 16, (v4 + v0 * 16) % 16, v5 + v1 * 16]) T.block_attr({"meta_schedule.cooperative_fetch": 2}) conv2d_nhwc[0, (v4 + v0 * 16) // 16, (v4 + v0 * 16) % 16, v5 + v1 * 16] = conv2d_nhwc_reindex_shared[v0, v1, v2, v3, v4, v5] # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 1, 1]), ("SamplePerfectTile", [1, 1, 1]), ("SamplePerfectTile", [8, 2, 1, 1, 1]), ("SamplePerfectTile", [2, 1, 2, 1, 1]), ("SamplePerfectTile", [1, 1, 4]), ("SampleCategorical", 1), ("SampleCategorical", 0), ("SampleCategorical", 2), ] mod = te.create_prim_func( te_workload.conv2d_nhwc( 1, 16, 16, 64, 64, 1, 1, 0, in_dtype="float16", out_dtype="float32", ) ) actual = generate_design_space( kind="cuda", mod=mod, target=tvm.target.Target("cuda --arch=sm_70"), types=None, sch_rules=[multi_level_tiling_tensor_core(write_reuse_scope="shared")] + get_rules("cuda", ms.schedule_rule.AutoInline), ) check_sketches( mod, sketches=actual, expected_mods=[conv2d_1x1_0], expected_decisions=[decision_0], ) if __name__ == "__main__": tvm.testing.main()
77,317
72.010387
397
py
tvm
tvm-main/tests/python/unittest/test_inject_ptx_ldg32.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm.script import tir as T import numpy as np import tvm.testing @T.prim_func def vector_add(A: T.Buffer((16), "float32"), B: T.Buffer((32), "float32")) -> None: T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) bx = T.env_thread("blockIdx.x") tx = T.env_thread("threadIdx.x") T.launch_thread(bx, 1) T.launch_thread(tx, 32) A_local = T.Buffer((32), "float32", scope="local") with T.block(): T.reads(A[0:16]) T.writes(A_local[0:32]) A_local[tx] = T.if_then_else(tx % 2 == 0, A[tx // 2], T.float32(0), dtype="float32") B[tx] = A_local[tx] + 1.0 @tvm.testing.requires_cuda def test_inject_ptx_intrin(): f = vector_add arch = tvm.contrib.nvcc.get_target_compute_version() major, _ = tvm.contrib.nvcc.parse_compute_version(arch) if major < 8: # Require at least SM80 return with tvm.transform.PassContext(config={"tir.ptx_ldg32": True}): mod = tvm.build(f, target="cuda") A_np = np.random.rand(16).astype("float32") B_np = np.zeros((32)).astype("float32") dev = tvm.cuda(0) A_nd = tvm.nd.array(A_np, device=dev) B_nd = tvm.nd.array(B_np, device=dev) mod(A_nd, B_nd) C_np = np.zeros((32)).astype("float32") for i in range(32): if i % 2 == 0: C_np[i] = A_np[i // 2] C_np[i] += 1.0 tvm.testing.assert_allclose(B_nd.numpy(), C_np) if __name__ == "__main__": test_inject_ptx_intrin()
2,279
32.529412
92
py
tvm
tvm-main/tests/python/unittest/test_auto_scheduler_layout_rewrite.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test AutoScheduler Layout Rewrite""" import tempfile import numpy as np import pytest import tvm import tvm.testing from tvm import topi from tvm import auto_scheduler, te from tvm.testing.auto_scheduler import get_tiled_matmul, matmul_auto_scheduler_test def test_apply_steps_with_layout_rewrite(): dag, s = get_tiled_matmul() _, bufs = dag.apply_steps_from_state(s) assert bufs[1].shape[0] == 512 assert bufs[1].shape[1] == 512 _, bufs = dag.apply_steps_from_state( s, layout_rewrite=auto_scheduler.LayoutRewriteOption.REWRITE_FOR_PRE_TRANSFORMED ) assert bufs[1].shape[0] == 4 assert bufs[1].shape[1] == 8 assert bufs[1].shape[2] == 4 assert bufs[1].shape[3] == 4 assert bufs[1].shape[4] == 512 _, bufs = dag.apply_steps_from_state( s, layout_rewrite=auto_scheduler.LayoutRewriteOption.INSERT_TRANSFORM_STAGE ) assert bufs[1].shape[0] == 512 assert bufs[1].shape[1] == 512 def test_apply_steps_with_layout_rewrite_corner_case(): A, B, C = matmul_auto_scheduler_test(1, 1, 1) dag = auto_scheduler.ComputeDAG([A, B, C]) s = dag.get_init_state() s.compute_root(C) i_j_fused = s.fuse(C, [s[C].iters[0], s[C].iters[1]]) s.parallel(C, i_j_fused) _, bufs = dag.apply_steps_from_state( s, layout_rewrite=auto_scheduler.LayoutRewriteOption.REWRITE_FOR_PRE_TRANSFORMED ) @tvm.testing.requires_llvm def test_correctness_layout_rewrite_rewrite_for_preTransformed(): N = 16 target = tvm.target.Target("llvm") task = auto_scheduler.SearchTask(func=matmul_auto_scheduler_test, args=(N, N, N), target=target) dag = task.compute_dag with tempfile.NamedTemporaryFile() as fp: log_file = fp.name search_policy = auto_scheduler.SketchPolicy(task) measure_ctx = auto_scheduler.LocalRPCMeasureContext() tuning_options = auto_scheduler.TuningOptions( num_measure_trials=100, runner=measure_ctx.runner, verbose=2, early_stopping=1, measure_callbacks=[auto_scheduler.RecordToFile(log_file)], ) task.tune(tuning_options, search_policy=search_policy) inp, _ = auto_scheduler.load_best_record(log_file, task.workload_key, target) s, bufs = dag.apply_steps_from_state( inp.state, layout_rewrite=auto_scheduler.LayoutRewriteOption.REWRITE_FOR_PRE_TRANSFORMED ) s_ref, bufs_ref = dag.apply_steps_from_state(inp.state) np_args = [np.random.randn(*topi.get_const_tuple(x.shape)).astype(x.dtype) for x in bufs] np_args_ref = [np.array(x) for x in np_args] weight = np_args_ref[1] # infer shape for the rewritten layout if len(weight.shape) >= 6: # For cpu tile structure SSRSRS base = len(weight.shape) - 6 red_dim = weight.shape[2 + base] * weight.shape[4 + base] out_dim = weight.shape[3 + base] * weight.shape[5 + base] for i in range(base + 2): out_dim *= weight.shape[i] new_order = ( [ 2 + base, 4 + base, ] + list(range(base + 2)) + [ 3 + base, 5 + base, ] ) np_args_ref[1] = np_args_ref[1].transpose(new_order) np_args_ref[1] = np_args_ref[1].reshape((red_dim, out_dim)) func = tvm.build(s, bufs, target=target) func_ref = tvm.build(s_ref, bufs_ref, target=target) dev = tvm.device(str(target)) dev_ref = tvm.cpu() args = [tvm.nd.array(x, device=dev) for x in np_args] args_ref = [tvm.nd.array(x, device=dev_ref) for x in np_args_ref] dev.sync() func(*args) func_ref(*args_ref) dev.sync() tvm.testing.assert_allclose(args[0].numpy(), args_ref[0].numpy(), atol=1e-3, rtol=1e-3) tvm.testing.assert_allclose(args[2].numpy(), args_ref[2].numpy(), atol=1e-3, rtol=1e-3) del measure_ctx @tvm.testing.requires_llvm def test_correctness_layout_rewrite_insert_transform_stage(): N = 128 target = tvm.target.Target("llvm") task = auto_scheduler.SearchTask(func=matmul_auto_scheduler_test, args=(N, N, N), target=target) dag = task.compute_dag with tempfile.NamedTemporaryFile() as fp: log_file = fp.name search_policy = auto_scheduler.SketchPolicy(task) measure_ctx = auto_scheduler.LocalRPCMeasureContext() tuning_options = auto_scheduler.TuningOptions( num_measure_trials=2, runner=measure_ctx.runner, verbose=1, measure_callbacks=[auto_scheduler.RecordToFile(log_file)], ) task.tune(tuning_options, search_policy=search_policy) inp, _ = auto_scheduler.load_best_record(log_file, task.workload_key, target) s, bufs = dag.apply_steps_from_state( inp.state, layout_rewrite=auto_scheduler.LayoutRewriteOption.INSERT_TRANSFORM_STAGE ) s_ref, bufs_ref = dag.apply_steps_from_state(inp.state) np_args = [np.random.randn(*topi.get_const_tuple(x.shape)).astype(x.dtype) for x in bufs] func = tvm.build(s, bufs, target=target) func_ref = tvm.build(s_ref, bufs_ref, target=target) dev = tvm.device(str(target)) dev_ref = tvm.cpu() args = [tvm.nd.array(x, device=dev) for x in np_args] args_ref = [tvm.nd.array(x, device=dev_ref) for x in np_args] dev.sync() func(*args) func_ref(*args_ref) dev.sync() tvm.testing.assert_allclose(args[0].numpy(), args_ref[0].numpy(), atol=1e-3, rtol=1e-3) tvm.testing.assert_allclose(args[1].numpy(), args_ref[1].numpy(), atol=1e-3, rtol=1e-3) tvm.testing.assert_allclose(args[2].numpy(), args_ref[2].numpy(), atol=1e-3, rtol=1e-3) del measure_ctx if __name__ == "__main__": test_apply_steps_with_layout_rewrite() test_apply_steps_with_layout_rewrite_corner_case() test_correctness_layout_rewrite_rewrite_for_preTransformed() test_correctness_layout_rewrite_insert_transform_stage()
7,010
35.706806
100
py
tvm
tvm-main/tests/python/unittest/test_target_target.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import json import pytest import tvm import tvm.testing from tvm.target import Target, arm_cpu, bifrost, cuda, intel_graphics, mali, rocm, vta @tvm.target.generic_func def mygeneric(data): # default generic function return data + 1 @mygeneric.register(["cuda", "gpu"]) def cuda_func(data): return data + 2 @mygeneric.register("rocm") def rocm_func(data): return data + 3 @mygeneric.register("cpu") def rocm_func(data): return data + 10 def test_all_targets_device_type_verify(): """Consistency verification for all targets' device type""" all_targets = [tvm.target.Target(t) for t in tvm.target.Target.list_kinds()] for tgt in all_targets: # skip targets with hooks or otherwise intended to be used with external codegen relay_to_tir = tgt.get_kind_attr("RelayToTIR") tir_to_runtime = tgt.get_kind_attr("TIRToRuntime") is_external_codegen = tgt.get_kind_attr("is_external_codegen") if relay_to_tir is not None or tir_to_runtime is not None or is_external_codegen: continue if tgt.kind.name not in tvm._ffi.runtime_ctypes.Device.STR2MASK: raise KeyError("Cannot find target kind: %s in Device.STR2MASK" % tgt.kind.name) assert ( tgt.get_target_device_type() == tvm._ffi.runtime_ctypes.Device.STR2MASK[tgt.kind.name] ) def test_target_dispatch(): with tvm.target.cuda(): assert mygeneric(1) == 3 assert mygeneric.get_packed_func()(1) == 3 with tvm.target.rocm(): assert mygeneric(1) == 4 assert mygeneric.get_packed_func()(1) == 4 with tvm.target.Target("cuda"): assert mygeneric(1) == 3 assert mygeneric.get_packed_func()(1) == 3 with tvm.target.arm_cpu(): assert mygeneric(1) == 11 assert mygeneric.get_packed_func()(1) == 11 with tvm.target.Target("metal"): assert mygeneric(1) == 3 assert mygeneric.get_packed_func()(1) == 3 assert tvm.target.Target.current() is None @tvm.target.override_native_generic_func("test_target_temp_strategy") def target_generic(data): # default generic function return data + 1 @target_generic.register(["cuda", "gpu"]) def target_cuda_func(data): return data + 2 def temp_target_cuda_func(data): return data + 3 def test_target_temp_strategy(): class TempStrategy(object): def __init__(self, name, target, fstrategy): generic_fstrategy = tvm.target.get_native_generic_func(name) self.target = target self.name = name self.origin_func = {} with tvm.target.Target(target) as target_obj: for tgt_key in target_obj.keys: self.origin_func[tgt_key] = generic_fstrategy.get_packed_func() generic_fstrategy.register(fstrategy, tgt_key, allow_override=True) def __enter__(self): return self def __exit__(self, typ, value, traceback): generic_fstrategy = tvm.target.get_native_generic_func(self.name) with tvm.target.Target(self.target) as target_obj: for tgt_key in target_obj.keys: generic_fstrategy.register( self.origin_func[tgt_key], tgt_key, allow_override=True ) with tvm.target.Target("cuda"): assert target_generic(1) == 3 # The strategy func change to temp_target_cuda_func. with TempStrategy("test_target_temp_strategy", "cuda", temp_target_cuda_func): with tvm.target.Target("cuda"): assert target_generic(1) == 4 with tvm.target.Target("cuda"): assert target_generic(1) == 3 def test_target_string_parse(): target = tvm.target.Target("cuda -model=unknown -libs=cublas,cudnn") assert target.kind.name == "cuda" assert target.model == "unknown" assert set(target.keys) == set(["cuda", "gpu"]) assert set(target.libs) == set(["cublas", "cudnn"]) assert str(target) == str(tvm.target.cuda(options="-libs=cublas,cudnn")) assert tvm.target.intel_graphics().device_name == "intel_graphics" assert tvm.target.mali().device_name == "mali" assert tvm.target.arm_cpu().device_name == "arm_cpu" def test_target_string_with_spaces(): target = tvm.target.Target( "vulkan -device_name='Name of GPU with spaces' -device_type=discrete" ) assert target.attrs["device_name"] == "Name of GPU with spaces" assert target.attrs["device_type"] == "discrete" target = tvm.target.Target(str(target)) assert target.attrs["device_name"] == "Name of GPU with spaces" assert target.attrs["device_type"] == "discrete" def test_target_llvm_options(): target = tvm.target.Target("llvm -cl-opt='-unroll-threshold:uint=100,-unroll-count:uint=3'") assert sorted(target.attrs["cl-opt"]) == sorted( ["-unroll-threshold:uint=100", "-unroll-count:uint=3"] ) def test_target_create(): targets = [cuda(), rocm(), mali(), intel_graphics(), arm_cpu("rk3399"), vta(), bifrost()] for tgt in targets: assert tgt is not None def test_target_config(): """ Test that constructing a target from a dictionary works. """ target_config = { "kind": "llvm", "keys": ["arm_cpu", "cpu"], "device": "arm_cpu", "libs": ["cblas"], "mfloat-abi": "hard", "mattr": ["+neon", "-avx512f"], } # Convert config dictionary to json string. target_config_str = json.dumps(target_config) # Test both dictionary input and json string. for config in [target_config, target_config_str]: target = tvm.target.Target(config) assert target.kind.name == "llvm" assert all([key in target.keys for key in ["arm_cpu", "cpu"]]) assert target.device_name == "arm_cpu" assert target.libs == ["cblas"] assert target.attrs["mfloat-abi"] == "hard" assert all([attr in target.attrs["mattr"] for attr in ["+neon", "-avx512f"]]) def test_config_map(): """ Confirm that constructing a target with invalid attributes fails as expected. """ target_config = {"kind": "llvm", "libs": {"a": "b", "c": "d"}} with pytest.raises(ValueError): tvm.target.Target(target_config) def test_composite_target(): tgt = tvm.target.Target("composite --host=llvm --devices=cuda,opencl") assert tgt.kind.name == "composite" assert tgt.host.kind.name == "llvm" assert len(tgt.attrs["devices"]) == 2 cuda_device, opencl_device = tgt.attrs["devices"] assert cuda_device.kind.name == "cuda" assert opencl_device.kind.name == "opencl" def test_target_tag_0(): tgt = tvm.target.Target("nvidia/geforce-rtx-2080-ti") assert tgt.kind.name == "cuda" assert tgt.attrs["arch"] == "sm_75" assert tgt.attrs["max_shared_memory_per_block"] == 49152 assert tgt.attrs["max_threads_per_block"] == 1024 assert tgt.attrs["thread_warp_size"] == 32 assert tgt.attrs["registers_per_block"] == 65536 def test_target_tag_1(): tgt = tvm.target.Target("nvidia/jetson-nano") assert tgt.kind.name == "cuda" assert tgt.attrs["arch"] == "sm_53" assert tgt.attrs["max_shared_memory_per_block"] == 49152 assert tgt.attrs["max_threads_per_block"] == 1024 assert tgt.attrs["thread_warp_size"] == 32 assert tgt.attrs["registers_per_block"] == 32768 def test_list_kinds(): targets = tvm.target.Target.list_kinds() assert len(targets) != 0 assert "llvm" in targets assert all(isinstance(target_name, str) for target_name in targets) def test_target_host_tags(): tgt = tvm.target.Target("nvidia/jetson-nano", "nvidia/geforce-rtx-2080-ti") assert tgt.kind.name == "cuda" assert tgt.attrs["arch"] == "sm_53" assert tgt.attrs["max_shared_memory_per_block"] == 49152 assert tgt.attrs["max_threads_per_block"] == 1024 assert tgt.attrs["thread_warp_size"] == 32 assert tgt.attrs["registers_per_block"] == 32768 assert tgt.host.kind.name == "cuda" assert tgt.host.attrs["arch"] == "sm_75" assert tgt.host.attrs["max_shared_memory_per_block"] == 49152 assert tgt.host.attrs["max_threads_per_block"] == 1024 assert tgt.host.attrs["thread_warp_size"] == 32 assert tgt.host.attrs["registers_per_block"] == 65536 def test_target_host_tag_dict(): tgt = tvm.target.Target("nvidia/jetson-nano", {"kind": "llvm"}) assert tgt.kind.name == "cuda" assert tgt.attrs["arch"] == "sm_53" assert tgt.attrs["max_shared_memory_per_block"] == 49152 assert tgt.attrs["max_threads_per_block"] == 1024 assert tgt.attrs["thread_warp_size"] == 32 assert tgt.attrs["registers_per_block"] == 32768 assert tgt.host.kind.name == "llvm" def test_target_host_single_dict(): tgt = tvm.target.Target({"kind": "llvm", "host": "nvidia/jetson-nano"}) assert tgt.kind.name == "llvm" assert tgt.host.kind.name == "cuda" assert tgt.host.attrs["arch"] == "sm_53" assert tgt.host.attrs["max_shared_memory_per_block"] == 49152 assert tgt.host.attrs["max_threads_per_block"] == 1024 assert tgt.host.attrs["thread_warp_size"] == 32 assert tgt.host.attrs["registers_per_block"] == 32768 def test_target_host_single_string(): tgt = tvm.target.Target("cuda --host llvm") assert tgt.kind.name == "cuda" assert tgt.host.kind.name == "llvm" def test_target_host_single_string_with_tag(): tgt = tvm.target.Target("cuda --host nvidia/jetson-nano") assert tgt.kind.name == "cuda" assert tgt.host.kind.name == "cuda" assert tgt.host.attrs["arch"] == "sm_53" assert tgt.host.attrs["max_shared_memory_per_block"] == 49152 assert tgt.host.attrs["max_threads_per_block"] == 1024 assert tgt.host.attrs["thread_warp_size"] == 32 assert tgt.host.attrs["registers_per_block"] == 32768 def test_target_host_merge_0(): tgt = tvm.target.Target(tvm.target.Target("cuda --host nvidia/jetson-nano"), None) assert tgt.kind.name == "cuda" assert tgt.host.kind.name == "cuda" assert tgt.host.attrs["arch"] == "sm_53" assert tgt.host.attrs["max_shared_memory_per_block"] == 49152 assert tgt.host.attrs["max_threads_per_block"] == 1024 assert tgt.host.attrs["thread_warp_size"] == 32 assert tgt.host.attrs["registers_per_block"] == 32768 def test_target_host_merge_1(): tgt = tvm.target.Target("cuda --host llvm") tgt = tvm.target.Target(tgt, tgt.host) assert tgt.kind.name == "cuda" assert tgt.host.kind.name == "llvm" def test_target_host_merge_2(): """Test picking the same host is ok.""" tgt = tvm.target.Target(tvm.target.Target("cuda --host llvm"), tvm.target.Target("llvm")) assert tgt.kind.name == "cuda" assert tgt.host.kind.name == "llvm" def test_target_tvm_object(): """Test creating Target by using TVM Objects""" String = tvm.runtime.container.String tgt = tvm.target.Target(target=String("cuda --host llvm")) assert tgt.kind.name == "cuda" assert tgt.host.kind.name == "llvm" tgt = tvm.target.Target(target=String("cuda"), host=String("llvm")) assert tgt.kind.name == "cuda" assert tgt.host.kind.name == "llvm" @pytest.mark.skip(reason="Causing infinite loop because of pytest and handle issue") def test_target_host_merge_3(): with pytest.raises(ValueError, match=r"target host has to be a string or dictionary."): tvm.target.Target(tvm.target.Target("cuda --host llvm"), 12.34) def test_target_with_host(): tgt = tvm.target.Target("cuda") llvm = tvm.target.Target("llvm") tgt = tgt.with_host(llvm) assert tgt.kind.name == "cuda" assert tgt.host.kind.name == "llvm" cuda_host = tvm.target.Target("nvidia/jetson-nano") tgt = tgt.with_host(cuda_host) assert tgt.host.kind.name == "cuda" assert tgt.host.attrs["arch"] == "sm_53" assert tgt.host.attrs["max_shared_memory_per_block"] == 49152 assert tgt.host.attrs["max_threads_per_block"] == 1024 assert tgt.host.attrs["thread_warp_size"] == 32 assert tgt.host.attrs["registers_per_block"] == 32768 def test_canon_target_and_host_0(): target = None host = None target, host = Target.canon_target_and_host(target, host) assert target is None assert host is None def test_canon_target_and_host_1(): target = None host = "llvm" with pytest.raises(AssertionError, match=r"Target host is not empty when target is empty."): target, host = Target.canon_target_and_host(target, host) def test_canon_target_and_host_2(): target = Target("cuda") host = Target("llvm") target, host = Target.canon_target_and_host(target, host) assert target.kind.name == "cuda" assert target.host.kind.name == "llvm" def test_canon_target_and_host_3(): target = Target(target="cuda", host="llvm") host = None target, host = Target.canon_target_and_host(target, host) assert target.kind.name == "cuda" assert target.host.kind.name == "llvm" assert host.kind.name == "llvm" assert target.host == host def test_canon_multi_target_and_host_0(): with pytest.raises(AssertionError): Target.canon_multi_target_and_host(None) def test_canon_multi_target_and_host_1(): raw_targets = Target.canon_multi_target_and_host({"kind": "llvm"}) assert len(raw_targets) == 1 assert raw_targets[0].kind.name == "llvm" def test_canon_multi_target_and_host_2(): raw_targets = Target.canon_multi_target_and_host({1: "llvm", 2: "cuda"}) assert len(raw_targets) == 2 assert raw_targets[0].kind.name == "llvm" assert raw_targets[1].kind.name == "cuda" def test_canon_multi_target_and_host_3(): raw_targets = Target.canon_multi_target_and_host(["llvm", "cuda"]) assert len(raw_targets) == 2 assert raw_targets[0].kind.name == "llvm" assert raw_targets[1].kind.name == "cuda" def test_canon_multi_target_and_host_4(): raw_targets = Target.canon_multi_target_and_host("llvm") assert len(raw_targets) == 1 assert raw_targets[0].kind.name == "llvm" def test_canon_multi_target_and_host_5(): raw_targets = Target.canon_multi_target_and_host("cuda", "llvm") assert len(raw_targets) == 1 assert raw_targets[0].kind.name == "cuda" assert raw_targets[0].host.kind.name == "llvm" def test_canon_multi_target_and_host_6(): """Test `canon_target_and_host` by using TVM Objects""" cuda_device_type = tvm.device("cuda").device_type target = {cuda_device_type: Target(target="cuda", host="llvm")} host = None raw_targets_1 = Target.canon_multi_target_and_host(target, host) assert len(raw_targets_1) == 1 assert raw_targets_1[0].kind.name == "cuda" assert raw_targets_1[0].host.kind.name == "llvm" target = {cuda_device_type: Target(tvm.runtime.container.String("cuda"))} host = Target(tvm.runtime.container.String("llvm")) target = tvm.runtime.convert(target) assert isinstance(target, tvm.ir.container.Map) raw_targets_2 = Target.canon_multi_target_and_host(target, host) assert len(raw_targets_2) == 1 assert raw_targets_2[0].kind.name == "cuda" assert raw_targets_2[0].host.kind.name == "llvm" def test_canon_target_map_and_host(): target_map = {"cuda": "cuda_module", "llvm": "cpu_module"} target_map, host = Target.canon_target_map_and_host(target_map, "llvm") assert host.kind.name == "llvm" for t, v in target_map.items(): assert t.host.kind.name == "llvm" if t.kind.name == "cuda": assert v == "cuda_module" elif t.kind.name == "llvm": assert v == "cpu_module" else: assert False def test_target_attr_bool_value(): target0 = Target("vulkan --supports_float16=True") assert target0.attrs["supports_float16"] == 1 target1 = Target("vulkan --supports_float16=true") assert target1.attrs["supports_float16"] == 1 target2 = Target("vulkan --supports_float16=False") assert target2.attrs["supports_float16"] == 0 target3 = Target("vulkan --supports_float16=false") assert target3.attrs["supports_float16"] == 0 def test_target_features(): target_no_features = Target("cuda") assert target_no_features.features assert not target_no_features.features.is_test target_with_features = Target("test") assert target_with_features.features.is_test assert not target_with_features.features.is_missing if __name__ == "__main__": tvm.testing.main()
17,275
34.042596
98
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_schedule_rule_apply_custom_rule.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring from typing import List import tempfile import pytest import tvm from tvm import meta_schedule as ms from tvm.meta_schedule.schedule_rule import ApplyCustomRule from tvm.script import tir as T @tvm.script.ir_module class Matmul: @T.prim_func def main(a: T.handle, b: T.handle, c: T.handle) -> None: T.func_attr({"global_symbol": "main"}) A = T.match_buffer(a, (1024, 1024), "float32") B = T.match_buffer(b, (1024, 1024), "float32") C = T.match_buffer(c, (1024, 1024), "float32") for i, j, k in T.grid(1024, 1024, 1024): with T.block("matmul"): T.block_attr({"schedule_rule": "test_apply_custom_rule"}) vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj] @tvm.register_func("meta_schedule.cpu.test_apply_custom_rule") def sch_fn(sch: tvm.tir.Schedule, block: tvm.tir.Block) -> List[tvm.tir.Schedule]: raise ValueError("Intended for meta_schedule.cpu.test_apply_custom_rule") def test_custom_rule(): with pytest.raises(ValueError) as e_info: with tempfile.TemporaryDirectory() as tmpdir: sch_rules = [ApplyCustomRule()] space_gen = ms.space_generator.PostOrderApply(sch_rules=sch_rules) ms.tune_tir( mod=Matmul, target="llvm -num-cores=1", work_dir=tmpdir, max_trials_global=10, space=space_gen, ) assert "ValueError: Intended for meta_schedule.cpu.test_apply_custom_rule" in str(e_info.value) if __name__ == "__main__": test_custom_rule()
2,591
37.686567
99
py
tvm
tvm-main/tests/python/unittest/test_te_build_lower.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te def test_lower_rfactor(): n = te.size_var("n") m = te.size_var("m") A = te.placeholder((n, m), name="A") k = te.reduce_axis((0, m), "k") B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B") s = te.create_schedule(B.op) ko, ki = s[B].split(B.op.reduce_axis[0], factor=16) BF = s.rfactor(B, ki) xo, xi = s[B].split(s[B].op.axis[0], factor=32) s[B.op].bind(xo, te.thread_axis("blockIdx.x")) s[B.op].bind(xi, te.thread_axis("threadIdx.y")) s[B].bind(s[B].op.reduce_axis[0], te.thread_axis("threadIdx.x")) s[BF].compute_at(s[B], s[B].op.reduce_axis[0]) fapi = tvm.lower(s, [A, B]) def test_dependent_output_shape(): n, m, x = te.size_var("n"), te.size_var("m"), te.size_var("x") A = te.placeholder((n, m)) B = te.compute((m, n // x), lambda i, j: A[i, j], name="B") s = te.create_schedule(B.op) mod = tvm.build(s, [A, B, x]) def test_split_uneven_unique_likely(): a = te.placeholder( (16, 16), ) b = te.placeholder( (16, 16), ) c = te.compute((16, 16), lambda x, y: a[x, y] + b[x, y]) x, y = c.op.axis sch = te.create_schedule(c.op) xo, xi = sch[c].split(x, 5) stmt = tvm.lower(sch, [a, b, c])["main"].body assert isinstance(stmt.body.body, tvm.tir.stmt.IfThenElse) if __name__ == "__main__": test_lower_rfactor() test_dependent_output_shape() test_split_uneven_unique_likely()
2,254
33.166667
69
py
tvm
tvm-main/tests/python/unittest/test_te_group.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test group effect""" import tvm from tvm import te def test_scan_group(): m = te.size_var("m") n = te.size_var("n") x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x") s_state = te.placeholder((m, n)) s_init = te.compute((1, n), lambda _, i: x[0, i]) s_update1 = te.compute((m, n), lambda t, i: s_state[t - 1, i] + x[t, i]) s_update2 = te.compute((m, n), lambda t, i: s_update1[t, i] + 1) s_update3 = te.compute((m, n), lambda t, i: s_update2[t, i] + 1) res = tvm.te.scan(s_init, s_update3, s_state, inputs=x) s = te.create_schedule(res.op) assert s[s_update1].group is not None assert s[s_update2].group == s[s_update1].group # Assign within group, is valid s[s_update1].compute_at(s[s_update2], s_update2.op.axis[1]) # create a new group, for [s_update2 and s_update1] g2 = s.create_group(outputs=s_update2, inputs=[s_state, x]) assert g2.group is not None assert g2.group == s[s_update3].group assert s[s_update2].group == g2 assert s[s_update1].group == g2 g2.compute_at(s[s_update3], s_update3.op.axis[1]) assert g2.attach_stage == s[s_update3] try: # compute outside group error. s[s_update2].compute_at(s[s_init], s_init.op.axis[0]) assert False except tvm.error.TVMError: pass def test_compute_group(): m = te.size_var("m") n = te.size_var("n") x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x") x1 = te.compute(x.shape, lambda *i: x(*i) + 1, name="x1") x2 = te.compute(x.shape, lambda *i: x1(*i) + 2, name="x2") s = te.create_schedule(x2.op) g = s.create_group(outputs=x1, inputs=x, include_inputs=True) assert s[x1].group == g assert s[x].group == g g.compute_at(s[x2], x2.op.axis[1]) assert g.attach_stage == s[x2] assert g.num_child_stages == 2 def test_nest_group(): m = te.size_var("m") n = te.size_var("n") x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x") x1 = te.compute(x.shape, lambda *i: x(*i) + 1, name="x1") x2 = te.compute(x.shape, lambda *i: x1(*i) + 2, name="x2") s = te.create_schedule(x2.op) g1 = s.create_group(outputs=x1, inputs=x) g2 = s.create_group(outputs=x1, inputs=x, include_inputs=True) assert set(s.groups) == set([g1, g2]) assert s[x].group == g2 assert s[x1].group == g1 assert g1.group == g2 assert g2.num_child_stages == 2 assert g1.num_child_stages == 1 if __name__ == "__main__": test_nest_group() test_compute_group() test_scan_group()
3,395
36.318681
78
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_lower_init_block.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te from tvm.script import tir as T # pylint: disable=no-self-argument @tvm.script.ir_module class WithInit: @T.prim_func def main(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [64, 64, 64]) B = T.match_buffer(b, [64]) for i0, j0 in T.grid(64, 64): for k0 in T.serial(32, 64): with T.block(): i, j, k = T.axis.remap("SRR", [i0, j0, k0]) with T.init(): B[i] = T.float32(0) B[i] += A[i, j, k] @tvm.script.ir_module class WithBranch: @T.prim_func def main(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [64, 64, 64]) B = T.match_buffer(b, [64]) for i0, j0 in T.grid(64, 64): for k0 in T.serial(32, 64): with T.block(): i, j, k = T.axis.remap("SRR", [i0, j0, k0]) T.reads(A[i, j, k]) T.writes(B[i]) if (j == 0) and (k == 32): B[i] = T.float32(0) B[i] += A[i, j, k] @tvm.script.ir_module class InitWithMatchBuffer: @T.prim_func def main(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [64, 64, 64]) B = T.match_buffer(b, [64]) for i0, j0 in T.grid(64, 64): for k0 in T.serial(32, 64): with T.block(): i, j, k = T.axis.remap("SRR", [i0, j0, k0]) BB = T.match_buffer(B[i], ()) AA = T.match_buffer(A[i, 0:64, 0:64], (64, 64)) with T.init(): BB[()] = T.float32(0) BB[()] += AA[j, k] @tvm.script.ir_module class BranchWithMatchBuffer: @T.prim_func def main(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [64, 64, 64]) B = T.match_buffer(b, [64]) for i0, j0 in T.grid(64, 64): for k0 in T.serial(32, 64): with T.block(): i, j, k = T.axis.remap("SRR", [i0, j0, k0]) T.reads(A[i, j, k]) T.writes(B[i]) BB = T.match_buffer(B[i], ()) AA = T.match_buffer(A[i, 0:64, 0:64], (64, 64)) if (j == 0) and (k == 32): BB[()] = T.float32(0) BB[()] += AA[j, k] def test_lower_reduction(): origin_mod = WithInit mod = tvm.tir.transform.LowerInitBlock()(origin_mod) tvm.ir.assert_structural_equal(mod, WithBranch, True) def test_lower_match_buffer(): origin_mod = InitWithMatchBuffer mod = tvm.tir.transform.LowerInitBlock()(origin_mod) tvm.ir.assert_structural_equal(mod, BranchWithMatchBuffer, True) def test_lower_te(): x = te.placeholder((1,)) y = te.compute((1,), lambda i: x[i] + 2) s = te.create_schedule(y.op) orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y]) mod = tvm.tir.transform.LowerInitBlock()(orig_mod) tvm.ir.assert_structural_equal(mod, orig_mod) # LowerInitBlock should do nothing on TE if __name__ == "__main__": test_lower_reduction() test_lower_match_buffer() test_lower_te()
4,054
32.512397
91
py
tvm
tvm-main/tests/python/unittest/test_target_codegen_cross_llvm.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test cross compilation""" import tvm import tvm.testing from tvm import te import os import struct from tvm import rpc from tvm.contrib import utils, cc import numpy as np @tvm.testing.requires_llvm def test_llvm_add_pipeline(): nn = 1024 n = tvm.runtime.convert(nn) A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C") s = te.create_schedule(C.op) xo, xi = s[C].split(C.op.axis[0], factor=4) s[C].parallel(xo) s[C].vectorize(xi) def verify_elf(path, e_machine): with open(path, "rb") as fi: arr = fi.read(20) assert struct.unpack("ccc", arr[1:4]) == (b"E", b"L", b"F") endian = struct.unpack("b", arr[0x5:0x6])[0] endian = "<" if endian == 1 else ">" assert struct.unpack(endian + "h", arr[0x12:0x14])[0] == e_machine def build_i386(): temp = utils.tempdir() target = "llvm -mtriple=i386-pc-linux-gnu" f = tvm.build(s, [A, B, C], target) path = temp.relpath("myadd.o") f.save(path) verify_elf(path, 0x03) def build_arm(): target = "llvm -mtriple=armv7-none-linux-gnueabihf" if not tvm.runtime.enabled(target): print("Skip because %s is not enabled.." % target) return temp = utils.tempdir() f = tvm.build(s, [A, B, C], target) path = temp.relpath("myadd.o") f.save(path) verify_elf(path, 0x28) asm_path = temp.relpath("myadd.asm") f.save(asm_path) # Do a RPC verification, launch kernel on Arm Board if available. host = os.environ.get("TVM_RPC_ARM_HOST", None) remote = None if host: port = int(os.environ["TVM_RPC_ARM_PORT"]) try: remote = rpc.connect(host, port) except tvm.error.TVMError as e: pass if remote: remote.upload(path) farm = remote.load_module("myadd.o") dev = remote.cpu(0) n = nn a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) farm(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) print("Verification finish on remote..") build_i386() build_arm() if __name__ == "__main__": test_llvm_add_pipeline()
3,347
33.875
78
py
tvm
tvm-main/tests/python/unittest/test_target_codegen_bool.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """codegen related to bool types""" import tvm import tvm.testing from tvm import te import numpy as np import tvm.testing arr_size = tvm.testing.parameter(32) @tvm.testing.fixture def compute(arr_size): A = te.placeholder((arr_size,), name="A") B = te.placeholder((arr_size,), name="B") C = te.compute(A.shape, lambda *i: A(*i) > B(*i), name="C") D = te.compute(C.shape, lambda *i: tvm.tir.all(C(*i), A(*i) > 1).astype("float32"), name="D") return [A, B, C, D] @tvm.testing.fixture def schedule(target, compute): target = tvm.target.Target(target) A, B, C, D = compute if target.kind.name == "llvm": s = te.create_schedule(D.op) xo, xi = s[C].split(C.op.axis[0], factor=4) xo1, xo2 = s[C].split(xo, factor=13) s[C].parallel(xo2) else: s = te.create_schedule(D.op) for stage in [C, D]: xo, xi = s[stage].split(stage.op.axis[0], factor=4) s[stage].bind(xo, te.thread_axis("blockIdx.x")) s[stage].bind(xi, te.thread_axis("threadIdx.x")) return s @tvm.testing.uses_gpu def test_cmp_load_store(target, dev, arr_size, compute, schedule): A, B, _, D = compute f = tvm.build(schedule, [A, B, D], target) a_np = np.random.uniform(size=arr_size).astype(A.dtype) b_np = np.random.uniform(size=arr_size).astype(B.dtype) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) d = tvm.nd.array(np.zeros(arr_size, dtype=D.dtype), dev) f(a, b, d) np.testing.assert_equal( d.numpy(), np.logical_and(a_np > b_np, a_np > 1).astype("float32"), ) if __name__ == "__main__": tvm.testing.main()
2,456
31.328947
97
py
tvm
tvm-main/tests/python/unittest/test_tir_analysis_verify_ssa.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te def test_verify_ssa(): x = te.var("x") y = te.var() z = tvm.tir.Evaluate(x + y) assert tvm.tir.analysis.verify_ssa(tvm.tir.PrimFunc([x, y], z)) assert not tvm.tir.analysis.verify_ssa(tvm.tir.PrimFunc([x, y], tvm.tir.LetStmt(x, 1, z))) def test_verify_weak_let_ssa(): x = te.var("x") z1 = tvm.tir.Let(x, 1, x + 1) z2 = tvm.tir.Let(x, 2, x + 2) assert tvm.tir.analysis.verify_ssa(tvm.tir.PrimFunc([], tvm.tir.Evaluate(z1 + z1))) assert not tvm.tir.analysis.verify_ssa(tvm.tir.PrimFunc([], tvm.tir.Evaluate(z1 * z2))) if __name__ == "__main__": test_verify_ssa() test_verify_weak_let_ssa()
1,458
33.738095
94
py
tvm
tvm-main/tests/python/unittest/test_tir_imm_values.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import math import random import numpy as np import tvm import tvm.testing import pytest from tvm import tir from tvm.script import tir as T import pytest @pytest.mark.parametrize( "dtype, literals", [ ["int8", [-128, 0, 127]], ["uint8", [0, 255]], ["int32", [-2147483648, 2147483647]], ["uint32", [0, 4294967295]], ["int64", [-9223372036854775808, 9223372036854775807]], ["uint64", [0, 9223372036854775807]], ], ) def test_tir_make_intimm(dtype, literals): for l in literals: imm = tir.const(l, dtype) assert imm.value == l, imm @pytest.mark.parametrize( "dtype, literals", [ ["int8", [-129, 128]], ["uint8", [-1, 256]], ["int32", [-2147483650, 2147483648]], ["uint32", [-1, 4294967296]], ["uint64", [-1, 18446744073709551616]], ], ) def test_tir_invalid_intimm(dtype, literals): for l in literals: with pytest.raises(tvm.TVMError): tir.const(l, dtype) @pytest.mark.parametrize( "dtype, literals", [ [ "uint64", { 9223372036854775807: 9223372036854775807, 18446744073709551615: 18446744073709551615, }, ], ], ) def test_tir_large_py_int_literals(dtype, literals): """ For large uint value, use LargeUIntImm intrin, """ for l in literals: x = tir.const(l, dtype) if isinstance(x, (tir.IntImm, tir.FloatImm)): assert x.value == literals[l] else: # LargeUIntImm(low32, hi32) assert (int(x.args[1]) << 32) + int(x.args[0]) == literals[l] def test_tir_intimm_overflow(): assert int(tir.const(255, "uint8") + tir.const(1, "uint8")) == 0 assert int(tir.const(2**31 - 1, "int32") + tir.const(1, "int32")) == -(2**31) assert int(tir.const(2**32 - 1, "uint32") + tir.const(1, "uint32")) == 0 assert int(tir.const(2**63 - 1, "int64") + tir.const(1, "int64")) == -(2**63) assert int(tir.const(2**32, "uint64") * tir.const(2**32, "uint64")) == 0 # customized int types assert int(tir.const(7, "int4") + tir.const(1, "int4")) == -8 assert int(tir.const(2**39 - 1, "int40") + tir.const(1, "int40")) == -(2**39) def compare_float_value(value, expect, msg): if math.isfinite(value): assert np.abs(value - expect) < 1e-5, f"{value} vs {expect}, {msg}" elif math.isnan(value): assert math.isnan(expect), f"{value} vs {expect}, {msg}" elif math.isinf(value): assert math.isinf(expect), f"{value} vs {expect}, {msg}" @pytest.mark.parametrize( "dtype, literals", [ ["float16", [-65504.0, 3.14, 65504.0, np.inf, np.nan]], ["bfloat16", [-3.38953139e38, 3.38953139e38, 3.14]], ["float32", [np.finfo("float32").min, 3.14, np.finfo("float32").max, np.inf, np.nan]], ["float64", [np.finfo("float64").min, 3.14, np.finfo("float64").max, np.inf, np.nan]], ], ) def test_tir_make_floatimm(dtype, literals): for l in literals: imm = tir.const(l, dtype) compare_float_value(imm.value, l, "imm value should match feed value") @pytest.mark.parametrize( "dtype, literals", [ ["float16", [-65505.0, 65505.0]], ["float32", [-3.402e39, 3.402e39]], ], ) def test_tir_invalid_floatimm(dtype, literals): """Currently only fp16 and fp32 have range check.""" for l in literals: with pytest.raises(tvm.TVMError): tir.const(l, dtype) @pytest.mark.parametrize("dtype", ["float16", "float32", "float64"]) @pytest.mark.parametrize("literal", [3.14, np.nan, np.inf]) def test_tir_special_floatimms(dtype, literal): x = tir.const(literal, dtype) compare_float_value(x.value, literal, "imm value should match feed value") @tvm.testing.requires_llvm() def test_tir_too_large_literal_f64(): # Behavior check: if literal f64 value is out of dtype range, the # object is still constructed, and eval to infinity. @T.prim_func def imm_overflow_fp64() -> T.float64: T.evaluate(T.ret(T.float64(1.7976e309), dtype="float64")) f = tvm.build(imm_overflow_fp64, target="llvm") assert math.isinf(f()) @pytest.mark.parametrize( "literal, expect_dtype", [ (256, "int32"), (2147483647, "int32"), (-2147483648, "int32"), (2147483648, "int64"), (-2147483649, "int64"), (3.14159, "float32"), (np.finfo("float32").min, "float32"), (np.finfo("float32").max, "float32"), (-3.402e39, "float64"), (3.402e39, "float64"), ], ) def test_tir_const_auto_dtype(literal, expect_dtype): x = tir.const(literal, dtype=None) assert x.dtype == expect_dtype assert x.value == literal def check_tir_const_fold( dtype, foldf, calcf, x_range=None, y_range=None, expect=None, skip_overflow=False ): """Helper to check constant folding behavior Parameters ---------- dtype: str Datatype of constants foldf: (x, y) -> z Folding function to call calcf: (x, y) -> z Compiled calculation function to call x_range: Union[int, float, tuple] Single value or value range [min, max] y_range: Union[int, float, tuple] Single value or value range [min, max] expect: Union[int, float] Expected calculation result skip_overflow: bool Skip assertion if the overflow happens """ seed = random.randint(0, 2147483648) np.random.seed(seed) ninfo = np.finfo(dtype) if dtype.startswith("float") else np.iinfo(dtype) if x_range is None: x_range = (ninfo.min, ninfo.max) if isinstance(x_range, (int, float)): x = x_range elif dtype.startswith("int") or dtype.startswith("uint"): x = np.random.randint(x_range[0], x_range[1] + 1, dtype=dtype) else: x = np.random.uniform(x_range[0], x_range[1]) if y_range is None: y_range = (ninfo.min, ninfo.max) if isinstance(y_range, (int, float)): y = y_range elif dtype.startswith("int") or dtype.startswith("uint"): y = np.random.randint(y_range[0], y_range[1] + 1, dtype=dtype) else: y = np.random.uniform(y_range[0], y_range[1]) if skip_overflow: py_res = foldf(x, y) if isinstance(py_res, (tir.IntImm, tir.FloatImm)): py_res = py_res.value if not (ninfo.min <= py_res <= ninfo.max): # If the result overflow, certain arithmetics is non-defined # thus we intentionally do not make the test failed. return fold_res = foldf(tir.const(x, dtype), tir.const(y, dtype)) calc_res = calcf(x, y) flaky_msg = ( f"{dtype} ({x}, {y}, {expect}) const folding check failed.\n" + "This test is intentionally non-deterministic, " + f"if it fails please report it in github issue together with this seed {seed}\n" ) if dtype.startswith("float"): compare_float_value(calc_res, fold_res.value, flaky_msg) if expect: compare_float_value(expect, calc_res, flaky_msg) else: assert calc_res == fold_res.value, flaky_msg if expect: assert expect == calc_res, flaky_msg @tvm.testing.requires_llvm() def test_tir_floatimm_const_fold(): """Behavior check: folding fp32 match platform f32 arithmetic""" @T.prim_func def float_imm_multiply(x: T.float32, y: T.float32, z: T.Buffer((), "float32")): z[()] = x * y @T.prim_func def float_imm_add(x: T.float32, y: T.float32, z: T.Buffer((), "float32")): z[()] = x + y @T.prim_func def float_imm_sub(x: T.float32, y: T.float32, z: T.Buffer((), "float32")): z[()] = x - y @T.prim_func def float_imm_div(x: T.float32, y: T.float32, z: T.Buffer((), "float32")): z[()] = x / y def __wrap_build(f): lib = tvm.build(f, target="llvm") z = tvm.nd.array(np.zeros([]).astype("float32")) def _func(x, y): lib(x, y, z) return z.numpy() return _func fmul = __wrap_build(float_imm_multiply) fadd = __wrap_build(float_imm_add) fsub = __wrap_build(float_imm_sub) fdiv = __wrap_build(float_imm_div) # overflow check_tir_const_fold("float32", lambda x, y: x * y, fmul, 3.0e30, 3.0e30, np.inf) check_tir_const_fold("float32", lambda x, y: x * y, fmul, 3.0e30, -3.0e30, -np.inf) check_tir_const_fold("float32", lambda x, y: x / y, fdiv, 3.0e30, 3.0e-30, np.inf) # divide by zero with pytest.raises(tvm.TVMError): check_tir_const_fold("float32", lambda x, y: x / y, fdiv, 1.0, 0.0) # nan and inf check_tir_const_fold("float32", lambda x, y: x + y, fadd, 1.0, np.nan, np.nan) check_tir_const_fold("float32", lambda x, y: x + y, fadd, 1.0, np.inf, np.inf) check_tir_const_fold("float32", lambda x, y: x + y, fadd, 1.0, -np.inf, -np.inf) # randomized check check_tir_const_fold("float32", lambda x, y: x * y, fmul) check_tir_const_fold("float32", lambda x, y: x + y, fadd) check_tir_const_fold("float32", lambda x, y: x - y, fsub) check_tir_const_fold( "float32", lambda x, y: x / y, fdiv, y_range=(0.01, np.finfo("float32").max) ) @tvm.testing.requires_llvm() def test_tir_int8_const_fold(): """Behavior check: folding i8 operation match platform i8 arithmetic""" @T.prim_func def imm_multiply(x: T.int8, y: T.int8) -> T.int8: T.evaluate(T.ret(x * y, dtype="int8")) @T.prim_func def imm_add(x: T.int8, y: T.int8) -> T.int8: T.evaluate(T.ret(x + y, dtype="int8")) @T.prim_func def imm_sub(x: T.int8, y: T.int8) -> T.int8: T.evaluate(T.ret(x - y, dtype="int8")) @T.prim_func def imm_truncdiv(x: T.int8, y: T.int8) -> T.int8: T.evaluate(T.ret(T.truncdiv(x, y), dtype="int8")) @T.prim_func def imm_floordiv(x: T.int8, y: T.int8) -> T.int8: T.evaluate(T.ret(T.floordiv(x, y), dtype="int8")) fmul = tvm.build(imm_multiply, target="llvm") fadd = tvm.build(imm_add, target="llvm") fsub = tvm.build(imm_sub, target="llvm") ffloordiv = tvm.build(imm_floordiv, target="llvm") ftruncdiv = tvm.build(imm_truncdiv, target="llvm") # overflow check_tir_const_fold("int8", lambda x, y: x + y, fadd, 127, 1, -128) check_tir_const_fold("int8", lambda x, y: x * y, fmul, 127, 127, 1) # divide by zero with pytest.raises(tvm.TVMError): check_tir_const_fold("int8", lambda x, y: tir.floordiv(x, y), ffloordiv, 1, 0) with pytest.raises(tvm.TVMError): check_tir_const_fold("int8", lambda x, y: tir.truncdiv(x, y), ftruncdiv, 1, 0) # i8 mod folding is not implemented assert not isinstance(tir.floormod(tir.const(7, "int8"), tir.const(3, "int8")), tir.IntImm) assert not isinstance(tir.truncmod(tir.const(7, "int8"), tir.const(3, "int8")), tir.IntImm) # randomized check check_tir_const_fold("int8", lambda x, y: x * y, fmul) check_tir_const_fold("int8", lambda x, y: x + y, fadd) check_tir_const_fold("int8", lambda x, y: x - y, fsub) check_tir_const_fold( "int8", lambda x, y: tir.floordiv(x, y), ffloordiv, y_range=(1, np.iinfo("int8").max) ) check_tir_const_fold( "int8", lambda x, y: tir.truncdiv(x, y), ftruncdiv, y_range=(1, np.iinfo("int8").max) ) @tvm.testing.requires_llvm() def test_tir_uint8_const_fold(): """Behavior check: folding u8 operation match platform u8 arithmetic""" @T.prim_func def imm_multiply(x: T.uint8, y: T.uint8) -> T.uint8: T.evaluate(T.ret(x * y, dtype="uint8")) @T.prim_func def imm_add(x: T.uint8, y: T.uint8) -> T.uint8: T.evaluate(T.ret(x + y, dtype="uint8")) @T.prim_func def imm_sub(x: T.uint8, y: T.uint8) -> T.uint8: T.evaluate(T.ret(x - y, dtype="uint8")) @T.prim_func def imm_truncdiv(x: T.uint8, y: T.uint8) -> T.uint8: T.evaluate(T.ret(T.truncdiv(x, y), dtype="uint8")) @T.prim_func def imm_floordiv(x: T.uint8, y: T.uint8) -> T.uint8: T.evaluate(T.ret(T.floordiv(x, y), dtype="uint8")) fmul = tvm.build(imm_multiply, target="llvm") fadd = tvm.build(imm_add, target="llvm") fsub = tvm.build(imm_sub, target="llvm") ffloordiv = tvm.build(imm_floordiv, target="llvm") ftruncdiv = tvm.build(imm_truncdiv, target="llvm") # overflow check_tir_const_fold("uint8", lambda x, y: x + y, fadd, 255, 1, 0) # zero sub with pytest.raises(tvm.TVMError): check_tir_const_fold("uint8", lambda x, y: x - y, fsub, 0, 10) # divide by zero with pytest.raises(tvm.TVMError): check_tir_const_fold("uint8", lambda x, y: tir.floordiv(x, y), ffloordiv, 1, 0) with pytest.raises(tvm.TVMError): check_tir_const_fold("uint8", lambda x, y: tir.truncdiv(x, y), ftruncdiv, 1, 0) # u8 mod folding is not implemented assert not isinstance(tir.floormod(tir.const(7, "uint8"), tir.const(3, "uint8")), tir.IntImm) assert not isinstance(tir.truncmod(tir.const(7, "uint8"), tir.const(3, "uint8")), tir.IntImm) # randomized check check_tir_const_fold("uint8", lambda x, y: x * y, fmul) check_tir_const_fold("uint8", lambda x, y: x + y, fadd) check_tir_const_fold("uint8", lambda x, y: x - y, fsub) check_tir_const_fold( "uint8", lambda x, y: tir.floordiv(x, y), ffloordiv, y_range=(1, np.iinfo("uint8").max) ) check_tir_const_fold( "uint8", lambda x, y: tir.truncdiv(x, y), ftruncdiv, y_range=(1, np.iinfo("uint8").max) ) @tvm.testing.requires_llvm() def test_tir_int32_const_fold(): """Behavior check: folding i32 operation match platform i32 arithmetic""" @T.prim_func def imm_multiply(x: T.int32, y: T.int32) -> T.int32: T.evaluate(T.ret(x * y, dtype="int32")) @T.prim_func def imm_add(x: T.int32, y: T.int32) -> T.int32: T.evaluate(T.ret(x + y, dtype="int32")) @T.prim_func def imm_sub(x: T.int32, y: T.int32) -> T.int32: T.evaluate(T.ret(x - y, dtype="int32")) @T.prim_func def imm_truncdiv(x: T.int32, y: T.int32) -> T.int32: T.evaluate(T.ret(T.truncdiv(x, y), dtype="int32")) @T.prim_func def imm_truncmod(x: T.int32, y: T.int32) -> T.int32: T.evaluate(T.ret(T.truncmod(x, y), dtype="int32")) @T.prim_func def imm_floordiv(x: T.int32, y: T.int32) -> T.int32: T.evaluate(T.ret(T.floordiv(x, y), dtype="int32")) @T.prim_func def imm_floormod(x: T.int32, y: T.int32) -> T.int32: T.evaluate(T.ret(T.floormod(x, y), dtype="int32")) fmul = tvm.build(imm_multiply, target="llvm") fadd = tvm.build(imm_add, target="llvm") fsub = tvm.build(imm_sub, target="llvm") ffloordiv = tvm.build(imm_floordiv, target="llvm") ffloormod = tvm.build(imm_floormod, target="llvm") ftruncdiv = tvm.build(imm_truncdiv, target="llvm") ftruncmod = tvm.build(imm_truncmod, target="llvm") # i32 overflow is not specified, only check for range assert -(2**31) <= int(tir.const(2**31 - 1, "int32") + tir.const(1, "int32")) < 2**31 assert -(2**31) <= int(tir.const(-(2**31), "int32") - tir.const(1, "int32")) < 2**31 # divide by zero with pytest.raises(tvm.TVMError): check_tir_const_fold("int32", lambda x, y: tir.floordiv(x, y), ffloordiv, 1, 0) with pytest.raises(tvm.TVMError): check_tir_const_fold("int32", lambda x, y: tir.floormod(x, y), ffloormod, 1, 0) with pytest.raises(tvm.TVMError): check_tir_const_fold("int32", lambda x, y: tir.truncdiv(x, y), ftruncdiv, 1, 0) with pytest.raises(tvm.TVMError): check_tir_const_fold("int32", lambda x, y: tir.truncmod(x, y), ftruncmod, 1, 0) # randomized check check_tir_const_fold("int32", lambda x, y: x * y, fmul, skip_overflow=True) check_tir_const_fold("int32", lambda x, y: x + y, fadd, skip_overflow=True) check_tir_const_fold("int32", lambda x, y: x - y, fsub, skip_overflow=True) check_tir_const_fold( "int32", lambda x, y: tir.floordiv(x, y), ffloordiv, y_range=(1, np.iinfo("int32").max), skip_overflow=True, ) check_tir_const_fold( "int32", lambda x, y: tir.truncdiv(x, y), ftruncdiv, y_range=(1, np.iinfo("int32").max), skip_overflow=True, ) check_tir_const_fold( "int32", lambda x, y: tir.floormod(x, y), ffloormod, y_range=(1, np.iinfo("int32").max), skip_overflow=False, ) check_tir_const_fold( "int32", lambda x, y: tir.truncmod(x, y), ftruncmod, y_range=(1, np.iinfo("int32").max), skip_overflow=False, ) @tvm.testing.requires_llvm() def test_tir_uint32_const_fold(): """Behavior check: folding u32 operation match platform u32 arithmetic""" @T.prim_func def imm_multiply(x: T.uint32, y: T.uint32) -> T.uint32: T.evaluate(T.ret(x * y, dtype="uint32")) @T.prim_func def imm_add(x: T.uint32, y: T.uint32) -> T.uint32: T.evaluate(T.ret(x + y, dtype="uint32")) @T.prim_func def imm_sub(x: T.uint32, y: T.uint32) -> T.uint32: T.evaluate(T.ret(x - y, dtype="uint32")) @T.prim_func def imm_truncdiv(x: T.uint32, y: T.uint32) -> T.uint32: T.evaluate(T.ret(T.truncdiv(x, y), dtype="uint32")) @T.prim_func def imm_floordiv(x: T.uint32, y: T.uint32) -> T.uint32: T.evaluate(T.ret(T.floordiv(x, y), dtype="uint32")) fmul = tvm.build(imm_multiply, target="llvm") fadd = tvm.build(imm_add, target="llvm") fsub = tvm.build(imm_sub, target="llvm") ffloordiv = tvm.build(imm_floordiv, target="llvm") ftruncdiv = tvm.build(imm_truncdiv, target="llvm") # u32 overflow is not specified, only check for range assert 0 <= int(tir.const(2**32 - 1, "uint32") + tir.const(1, "uint32")) < 2**32 # divide by zero with pytest.raises(tvm.TVMError): check_tir_const_fold("uint32", lambda x, y: tir.floordiv(x, y), ffloordiv, 1, 0) with pytest.raises(tvm.TVMError): check_tir_const_fold("uint32", lambda x, y: tir.truncdiv(x, y), ftruncdiv, 1, 0) # u8 mod folding is not implemented assert not isinstance(tir.floormod(tir.const(7, "uint32"), tir.const(3, "uint32")), tir.IntImm) assert not isinstance(tir.truncmod(tir.const(7, "uint32"), tir.const(3, "uint32")), tir.IntImm) # randomized check check_tir_const_fold("uint32", lambda x, y: x * y, fmul, skip_overflow=True) check_tir_const_fold("uint32", lambda x, y: x + y, fadd, skip_overflow=True) check_tir_const_fold("uint32", lambda x, y: x - y, fsub, skip_overflow=True) check_tir_const_fold( "uint32", lambda x, y: tir.floordiv(x, y), ffloordiv, y_range=(1, np.iinfo("uint32").max), skip_overflow=False, ) check_tir_const_fold( "uint32", lambda x, y: tir.truncdiv(x, y), ftruncdiv, y_range=(1, np.iinfo("uint32").max), skip_overflow=False, ) if __name__ == "__main__": tvm.testing.main()
19,917
33.460208
99
py
tvm
tvm-main/tests/python/unittest/test_tir_schedule_for_kind.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import sys import pytest import tvm import tvm.testing from tvm import tir from tvm.script import tir as T from tvm.tir.schedule.testing import verify_trace_roundtrip # pylint: disable=no-member,invalid-name,unused-variable @T.prim_func def element_wise(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.match_buffer(b, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 @T.prim_func def element_wise_parallelized(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.match_buffer(b, (128, 128)) for i0 in T.parallel(0, 128): for i1 in T.serial(0, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i0, i1]) B[vi, vj] = A[vi, vj] * 2.0 @T.prim_func def element_wise_i_bound(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.match_buffer(b, (128, 128)) for i0 in T.thread_binding(0, 128, thread="threadIdx.x"): for i1 in T.serial(0, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i0, i1]) B[vi, vj] = A[vi, vj] * 2.0 @T.prim_func def element_wise_compute_at_split(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) B = T.alloc_buffer((128, 128)) for i in T.serial(0, 128): for j0 in T.serial(0, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j0]) B[vi, vj] = A[vi, vj] * 2.0 for j1o, j1i in T.grid(32, 4): with T.block("C"): vi = T.axis.S(128, i) vj = T.axis.S(128, j1o * 4 + j1i) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def element_wise_compute_at_split_vectorized(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) B = T.alloc_buffer((128, 128)) for i in T.serial(0, 128): for j0 in T.serial(0, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j0]) B[vi, vj] = A[vi, vj] * 2.0 for j1o in T.serial(0, 32): for j1i in T.vectorized(0, 4): with T.block("C"): vi = T.axis.S(128, i) vj = T.axis.S(128, j1o * 4 + j1i) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def element_wise_split_predicate(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) for i, j_0, j_1 in T.grid(128, 13, 10): with T.block("B"): T.where(j_0 * 10 + j_1 < 128) vi = T.axis.S(128, i) vj = T.axis.S(128, j_0 * 10 + j_1) B[vi, vj] = A[vi, vj] * 2.0 @T.prim_func def element_wise_split_predicate_parallelized(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) for i in T.serial(0, 128): for j_0 in T.parallel(0, 13): for j_1 in T.serial(0, 10): with T.block("B"): T.where(j_0 * 10 + j_1 < 128) vi = T.axis.S(128, i) vj = T.axis.S(128, j_0 * 10 + j_1) B[vi, vj] = A[vi, vj] * 2.0 @T.prim_func def element_wise_split_predicate_vectorized(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) for i in T.vectorized(0, 128): for j_0, j_1 in T.grid(13, 10): with T.block("B"): T.where(j_0 * 10 + j_1 < 128) vi = T.axis.S(128, i) vj = T.axis.S(128, j_0 * 10 + j_1) B[vi, vj] = A[vi, vj] * 2.0 @T.prim_func def element_wise_compute_at_split_j0_j1o_bound(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) B = T.alloc_buffer((128, 128)) for i in T.serial(0, 128): for j0 in T.thread_binding(0, 128, thread="threadIdx.x"): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j0]) B[vi, vj] = A[vi, vj] * 2.0 for j1o in T.thread_binding(0, 32, thread="threadIdx.x"): for j1i in T.serial(0, 4): with T.block("C"): vi = T.axis.S(128, i) vj = T.axis.S(128, j1o * 4 + j1i) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def matmul(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.match_buffer(b, (128, 128)) C = T.match_buffer(c, (128, 128)) for i, j, k in T.grid(128, 128, 128): with T.block("C"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] @T.prim_func def rowsum(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.match_buffer(b, (128,)) for i, k in T.grid(128, 128): with T.block("B"): vi, vk = T.axis.remap("SR", [i, k]) with T.init(): B[vi] = 0.0 B[vi] = B[vi] + A[vi, vk] @T.prim_func def rowsum_unrolled(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.match_buffer(b, (128,)) for i0 in T.unroll(0, 128): for i1 in T.serial(0, 128): with T.block("B"): vi, vk = T.axis.remap("SR", [i0, i1]) with T.init(): B[vi] = 0.0 B[vi] = B[vi] + A[vi, vk] @T.prim_func def rowsum_not_quasi_affine(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.match_buffer(b, (128,)) for i, k in T.grid(128, 16): with T.block("B"): vi = T.axis.S(128, i) vk = T.axis.R(128, T.floordiv(k * k, 2)) with T.init(): B[vi] = 0.0 B[vi] = B[vi] + A[vi, vk] @T.prim_func def rowsum_not_compact_data_flow(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.match_buffer(b, (128,)) for i, k in T.grid(128, 16): with T.block("B"): vi, vk = T.axis.remap("SR", [i, k]) with T.init(): B[vk] = 0.0 B[vk] = B[vk] + A[vi, vk] @T.prim_func def rowsum_cross_thread_reduction(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.match_buffer(b, (128,)) for i0 in T.serial(0, 128): for i1 in T.thread_binding(0, 128, thread="threadIdx.x"): with T.block("B"): vi, vk = T.axis.remap("SR", [i0, i1]) with T.init(): B[vi] = 0.0 B[vi] = B[vi] + A[vi, vk] @T.prim_func def opaque_block(a: T.handle) -> None: A = T.match_buffer(a, (16,)) for i in T.serial(0, 15): with T.block("opaque"): A[i + 1] = A[i + 1] + A[i] @T.prim_func def block_inside_init(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128, 128], dtype="float32") B = T.match_buffer(b, [128, 128], dtype="float32") for i in T.serial(0, 128): with T.block("outer"): vi = T.axis.S(128, i) with T.init(): for j in T.serial(0, 128): with T.block("init"): vj = T.axis.S(128, j) B[vi, vj] = 0.0 for k in T.serial(0, 128): for j in T.serial(0, 128): with T.block("inner"): vj, vk = T.axis.remap("SR", [j, k]) B[vi, vj] = B[vi, vj] + A[vi, vj, vk] @T.prim_func def thread_bound_block_inside_init(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128, 128], dtype="float32") B = T.match_buffer(b, [128, 128], dtype="float32") for i in T.thread_binding(0, 128, thread="threadIdx.x"): with T.block("outer"): vi = T.axis.S(128, i) with T.init(): for j in T.serial(0, 128): with T.block("init"): vj = T.axis.S(128, j) B[vi, vj] = 0.0 for k in T.serial(0, 128): for j in T.serial(0, 128): with T.block("inner"): vj, vk = T.axis.remap("SR", [j, k]) B[vi, vj] = B[vi, vj] + A[vi, vj, vk] @T.prim_func def decomposed_gemm( A: T.Buffer((16, 16), "float32"), B: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32"), ): local = T.alloc_buffer((16, 16), "float32") for i, j in T.grid(4, 4): for ii, jj in T.grid(4, 4): with T.block("init"): vi = T.axis.S(16, i * 4 + ii) vj = T.axis.S(16, j * 4 + jj) local[vi, vj] = 0 for k, ii, jj in T.grid(16, 4, 4): with T.block("update"): vi = T.axis.S(16, i * 4 + ii) vj = T.axis.S(16, j * 4 + jj) vk = T.axis.R(16, k) local[vi, vj] += A[vi, vk] * B[vj, vk] for ii, jj in T.grid(4, 4): with T.block("C"): vi = T.axis.S(16, i * 4 + ii) vj = T.axis.S(16, j * 4 + jj) C[vi, vj] = local[vi, vj] @T.prim_func def decomposed_gemm_after_vectorize( A: T.Buffer((16, 16), "float32"), B: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32"), ): local = T.alloc_buffer((16, 16), "float32") for i, j in T.grid(4, 4): for ii, jj in T.grid(4, 4): with T.block("init"): vi = T.axis.S(16, i * 4 + ii) vj = T.axis.S(16, j * 4 + jj) local[vi, vj] = 0 for k, ii, jj in T.grid(16, 4, 4): with T.block("update"): vi = T.axis.S(16, i * 4 + ii) vj = T.axis.S(16, j * 4 + jj) vk = T.axis.R(16, k) local[vi, vj] += A[vi, vk] * B[vj, vk] for ii in range(4): for jj in T.vectorized(4): with T.block("C"): vi = T.axis.S(16, i * 4 + ii) vj = T.axis.S(16, j * 4 + jj) C[vi, vj] = local[vi, vj] @T.prim_func def nested_block_bind( A: T.Buffer((16, 16, 16, 16), "float32"), B: T.Buffer((16, 16, 16), "float32") ): for i, j in T.grid(16, 16): with T.block("outer"): vi, vj = T.axis.remap("SS", [i, j]) for k, l in T.grid(16, 16): with T.block("inner"): vk, vl = T.axis.remap("SR", [k, l]) with T.init(): B[vi, vj, vk] = 0.0 B[vi, vj, vk] = B[vi, vj, vk] + A[vi, vj, vk, vl] @T.prim_func def thread_bound_nested_block( A: T.Buffer((16, 16, 16, 16), "float32"), B: T.Buffer((16, 16, 16), "float32") ) -> None: for i in T.serial(16): for j in T.thread_binding(16, thread="blockIdx.x"): with T.block("outer"): vi, vj = T.axis.remap("SS", [i, j]) for k in T.serial(16): for l in T.thread_binding(16, thread="threadIdx.x"): with T.block("inner"): vk, vl = T.axis.remap("SR", [k, l]) with T.init(): B[vi, vj, vk] = T.float32(0) B[vi, vj, vk] = B[vi, vj, vk] + A[vi, vj, vk, vl] @T.prim_func def nested_block_bind_after_cache_read( A: T.Buffer((16, 16), "float32"), B: T.Buffer((16,), "float32") ) -> None: for i in T.serial(16): with T.block("outer"): vi = T.axis.spatial(16, i) A_shared = T.alloc_buffer([1, 16], dtype="float32", scope="shared") for ax0, ax1 in T.grid(1, 16): with T.block("A_shared"): v0 = T.axis.spatial(16, vi + ax0) v1 = T.axis.spatial(16, ax1) A_shared[v0, v1] = A[v0, v1] for j in T.serial(16): with T.block("inner"): vj = T.axis.reduce(16, j) with T.init(): B[vi] = T.float32(0) B[vi] = B[vi] + A_shared[vi, vj] @T.prim_func def thread_bound_nested_block_after_cache_read( A: T.Buffer((16, 16), "float32"), B: T.Buffer((16,), "float32") ) -> None: for i in T.thread_binding(16, thread="blockIdx.x"): with T.block("outer"): vi = T.axis.spatial(16, i) A_shared = T.alloc_buffer([1, 16], dtype="float32", scope="shared") for ax0, ax1 in T.grid(1, 16): with T.block("A_shared"): v0 = T.axis.spatial(16, vi + ax0) v1 = T.axis.spatial(16, ax1) A_shared[v0, v1] = A[v0, v1] for j in T.thread_binding(16, thread="threadIdx.x"): with T.block("inner"): vj = T.axis.reduce(16, j) with T.init(): B[vi] = T.float32(0) B[vi] = B[vi] + A_shared[vi, vj] @T.prim_func def decomposed_gemm_parallelize_init( A: T.Buffer((16, 16), "float32"), B: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32"), ) -> None: local = T.alloc_buffer([16, 16], dtype="float32") for i, j in T.grid(4, 4): for ii in T.serial(4): for jj in T.vectorized(4): with T.block("init"): vi = T.axis.spatial(16, i * 4 + ii) vj = T.axis.spatial(16, j * 4 + jj) T.reads() T.writes(local[vi, vj]) local[vi, vj] = 0 for k, ii, jj in T.grid(16, 4, 4): with T.block("update"): vi = T.axis.spatial(16, i * 4 + ii) vj = T.axis.spatial(16, j * 4 + jj) vk = T.axis.reduce(16, k) T.reads(local[vi, vj], A[vi, vk], B[vj, vk]) T.writes(local[vi, vj]) local[vi, vj] = local[vi, vj] + A[vi, vk] * B[vj, vk] for ii, jj in T.grid(4, 4): with T.block("C"): vi = T.axis.spatial(16, i * 4 + ii) vj = T.axis.spatial(16, j * 4 + jj) T.reads(local[vi, vj]) T.writes(C[vi, vj]) C[vi, vj] = local[vi, vj] @T.prim_func def scatter_compute(A: T.Buffer((16,), "float32"), B: T.Buffer((16,), "float32")): for i in T.grid(8): with T.block("first_half"): vi = T.axis.spatial(16, 8 + i) B[vi] = A[vi - 8] for i in T.grid(8): with T.block("last_half"): vi = T.axis.spatial(16, i) B[vi] = A[vi + 8] @T.prim_func def scatter_compute_parallelize( A: T.Buffer((16,), "float32"), B: T.Buffer((16,), "float32") ) -> None: # body # with T.block("root") for i in T.parallel(8): with T.block("first_half"): vi = T.axis.spatial(16, 8 + i) T.reads(A[vi - 8]) T.writes(B[vi]) B[vi] = A[vi - 8] for i in T.parallel(8): with T.block("last_half"): vi = T.axis.spatial(16, i) T.reads(A[vi + 8]) T.writes(B[vi]) B[vi] = A[vi + 8] # pylint: enable=no-member,invalid-name,unused-variable def test_parallel(): s = tir.Schedule(element_wise, debug_mask="all") i, _ = s.get_loops(s.get_block("B")) s.parallel(i) tvm.ir.assert_structural_equal(s.mod["main"], element_wise_parallelized) verify_trace_roundtrip(s, mod=element_wise) def test_parallel_predicate(): s = tir.Schedule(element_wise_split_predicate, debug_mask="all") _, j, _ = s.get_loops(s.get_block("B")) s.parallel(j) tvm.ir.assert_structural_equal(s.mod["main"], element_wise_split_predicate_parallelized) verify_trace_roundtrip(s, mod=element_wise_split_predicate) def test_parallel_reduction_block_iter(): s = tir.Schedule(matmul, debug_mask="all") _, _, k = s.get_loops(s.get_block("C")) with pytest.raises(tvm.tir.ScheduleError): s.parallel(k) def test_parallel_not_quasi_affine(): s = tir.Schedule(rowsum_not_quasi_affine, debug_mask="all") i, _ = s.get_loops(s.get_block("B")) with pytest.raises(tvm.tir.ScheduleError): s.parallel(i) def test_parallel_not_compact_data_flow(): s = tir.Schedule(rowsum_not_compact_data_flow, debug_mask="all") i, _ = s.get_loops(s.get_block("B")) with pytest.raises(tvm.tir.ScheduleError): s.parallel(i) def test_vectorize(): s = tir.Schedule(element_wise_compute_at_split, debug_mask="all") _, _, j1i = s.get_loops(s.get_block("C")) s.vectorize(j1i) tvm.ir.assert_structural_equal(s.mod["main"], element_wise_compute_at_split_vectorized) verify_trace_roundtrip(s, mod=element_wise_compute_at_split) def test_vectorize_predicate(): s = tir.Schedule(element_wise_split_predicate, debug_mask="all") i, _, _ = s.get_loops(s.get_block("B")) s.vectorize(i) tvm.ir.assert_structural_equal(s.mod["main"], element_wise_split_predicate_vectorized) verify_trace_roundtrip(s, mod=element_wise_split_predicate) def test_vectorize_opaque_block(): s = tir.Schedule(opaque_block, debug_mask="all") (i,) = s.get_loops(s.get_block("opaque")) with pytest.raises(tvm.tir.ScheduleError): s.vectorize(i) def test_unroll(): s = tir.Schedule(rowsum, debug_mask="all") i, _ = s.get_loops(s.get_block("B")) s.unroll(i) tvm.ir.assert_structural_equal(s.mod["main"], rowsum_unrolled) verify_trace_roundtrip(s, mod=rowsum) def test_unroll_after_bind(): s = tir.Schedule(rowsum, debug_mask="all") i, _ = s.get_loops(s.get_block("B")) s.bind(i, "blockIdx.x") s.unroll(i) tvm.ir.assert_structural_equal(s.mod["main"], rowsum_unrolled) verify_trace_roundtrip(s, mod=rowsum) def test_bind1(): s = tir.Schedule(element_wise, debug_mask="all") i, _ = s.get_loops(s.get_block("B")) s.bind(i, "threadIdx.x") tvm.ir.assert_structural_equal(s.mod["main"], element_wise_i_bound) verify_trace_roundtrip(s, mod=element_wise) def test_bind2(): s = tir.Schedule(element_wise_compute_at_split, debug_mask="all") _, j0 = s.get_loops(s.get_block("B")) _, j1o, _ = s.get_loops(s.get_block("C")) s.bind(j0, "threadIdx.x") s.bind(j1o, "threadIdx.x") tvm.ir.assert_structural_equal(s.mod["main"], element_wise_compute_at_split_j0_j1o_bound) verify_trace_roundtrip(s, mod=element_wise_compute_at_split) def test_bind_cross_thread_reduction(): s = tir.Schedule(rowsum, debug_mask="all") _, k = s.get_loops(s.get_block("B")) s.bind(k, "threadIdx.x") tvm.ir.assert_structural_equal(s.mod["main"], rowsum_cross_thread_reduction) verify_trace_roundtrip(s, mod=rowsum) def test_bind_not_cross_thread_reduction(): s = tir.Schedule(rowsum, debug_mask="all") _, k = s.get_loops(s.get_block("B")) with pytest.raises(tvm.tir.ScheduleError): s.bind(k, "blockIdx.x") def test_bind_after_bind(): s = tir.Schedule(element_wise, debug_mask="all") i, _ = s.get_loops(s.get_block("B")) s.bind(i, "blockIdx.x") s.bind(i, "threadIdx.x") tvm.ir.assert_structural_equal(s.mod["main"], element_wise_i_bound) verify_trace_roundtrip(s, mod=element_wise) def test_block_inside_init(): s = tir.Schedule(block_inside_init, debug_mask="all") (i,) = s.get_loops(s.get_block("outer")) s.bind(i, "threadIdx.x") tvm.ir.assert_structural_equal(s.mod["main"], thread_bound_block_inside_init) verify_trace_roundtrip(s, mod=block_inside_init) def test_vectorize_after_decompose(): s = tir.Schedule(decomposed_gemm, debug_mask="all") jj = s.get_loops(s.get_block("C"))[-1] s.vectorize(jj) tvm.ir.assert_structural_equal(s.mod["main"], decomposed_gemm_after_vectorize) verify_trace_roundtrip(s, mod=decomposed_gemm) def test_nested_block_bind(): s = tir.Schedule(nested_block_bind) block_outer = s.get_block("outer") block_inner = s.get_block("inner") _, j = s.get_loops(block_outer) _, l = s.get_loops(block_inner) s.bind(l, "threadIdx.x") s.bind(j, "blockIdx.x") tvm.ir.assert_structural_equal(s.mod["main"], thread_bound_nested_block) verify_trace_roundtrip(s, mod=nested_block_bind) def test_nexted_block_bind_after_cache_read(): s = tir.Schedule(nested_block_bind_after_cache_read) block_outer = s.get_block("outer") block_inner = s.get_block("inner") (i,) = s.get_loops(block_outer) (j,) = s.get_loops(block_inner) s.bind(i, "blockIdx.x") s.bind(j, "threadIdx.x") tvm.ir.assert_structural_equal(s.mod["main"], thread_bound_nested_block_after_cache_read) verify_trace_roundtrip(s, mod=nested_block_bind_after_cache_read) def test_vectorize_init(): s = tir.Schedule(decomposed_gemm, debug_mask="all") init_blk = s.get_block("init") upd_blk = s.get_block("update") _, _, ii_0, jj_0 = s.get_loops(init_blk) _, _, k_1, ii_1, jj_1 = s.get_loops(upd_blk) s.vectorize(jj_0) tvm.ir.assert_structural_equal(s.mod["main"], decomposed_gemm_parallelize_init) verify_trace_roundtrip(s, mod=decomposed_gemm) def test_scatter_parallelize(): s = tir.Schedule(scatter_compute, debug_mask="all") first = s.get_block("first_half") last = s.get_block("last_half") (i_0,) = s.get_loops(first) (i_1,) = s.get_loops(last) s.parallel(i_0) s.parallel(i_1) tvm.ir.assert_structural_equal(s.mod["main"], scatter_compute_parallelize) verify_trace_roundtrip(s, mod=scatter_compute) if __name__ == "__main__": tvm.testing.main()
22,842
33.610606
93
py
tvm
tvm-main/tests/python/unittest/test_target_codegen_x86.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import platform import pytest import re import textwrap import tvm from tvm import te llvm_version = tvm.target.codegen.llvm_version_major() machine = platform.machine() if machine not in ["i386", "x86_64", "AMD64", "amd64"]: pytest.skip(f"Requires x86_64/i386, but machine is {machine}", allow_module_level=True) @tvm.testing.requires_llvm @pytest.mark.skipif(llvm_version < 6, reason=f"Requires LLVM 6+, got {llvm_version}") def test_fp16_to_fp32(): def fp16_to_fp32(target, width, match=None, not_match=None): elements = 64 n = tvm.runtime.convert(elements) A = te.placeholder((n, width), dtype="float16", name="A") B = te.compute(A.shape, lambda *i: A(*i).astype("float32"), name="B") s = te.create_schedule(B.op) s[B].vectorize(s[B].op.axis[1]) f = tvm.build(s, [A, B], target) assembly = f.get_source("asm").splitlines() if match: matches = [l for l in assembly if re.search(match, l)] assert matches if not_match: not_matches = [l for l in assembly if re.search(not_match, l)] assert not not_matches fp16_to_fp32("llvm -mcpu=skylake-avx512", 15, match="vcvtph2ps.*mm") fp16_to_fp32("llvm -mcpu=skylake-avx512", 16, match="vcvtph2ps.*mm") fp16_to_fp32("llvm -mcpu=skylake-avx512", 17, match="vcvtph2ps.*mm") fp16_to_fp32("llvm -mcpu=skylake-avx512", 49, match="vcvtph2ps.*mm") fp16_to_fp32("llvm -mcpu=skylake-avx512 -mattr=-avx512f", 49, match="vcvtph2ps.*mm") fp16_to_fp32("llvm -mcpu=skylake-avx512 -mattr=-f16c,-avx512f", 49, not_match="vcvtph2ps") fp16_to_fp32("llvm -mcpu=core-avx2", 8, match="vcvtph2ps.*mm") fp16_to_fp32("llvm -mcpu=core-avx2", 9, match="vcvtph2ps.*mm") fp16_to_fp32("llvm", 9, not_match="vcvtph2ps") is_32bit = platform.architecture()[0] == "32bit" @tvm.testing.requires_llvm @pytest.mark.skipif(is_32bit, reason=f"Fails in CI due to architecture mismatch in JIT") @pytest.mark.parametrize("feature_string", ["-sse2", "+sse2"]) def test_fp16_fp32_conversions(feature_string): relay_model = textwrap.dedent( """ #[version = "0.0.5"] def @main(%inp : Tensor[(3), float32], %cst : Tensor[(3), float32]) { %1 = cast(%inp, dtype="float16"); %2 = cast(%cst, dtype="float16"); %3 = add(%1, %2); %4 = cast(%3, dtype="float32"); %4 } """ ) ir_mod = tvm.relay.fromtext(relay_model) arch = "i386" if machine == "i386" else "x86_64" aot_factory = tvm.relay.build( ir_mod, params={"cst": np.array([1.0, 2.0, 3.0], dtype="float32")}, target=f"llvm --mtriple={arch} --mattr={feature_string}", executor=tvm.relay.backend.Executor( "aot", {"interface-api": "packed", "unpacked-api": False} ), ) mod_name = aot_factory["list_module_names"]()[0] executor = aot_factory[mod_name] mod = executor(tvm.cpu(0)) inp = tvm.nd.array(np.array([1.1, 2.1, 3.1], dtype="float32"), device=tvm.cpu(0)) mod.get_function("set_input")(0, inp) mod.get_function("run")() out = mod.get_function("get_output")(0) expected = np.array([2.1, 4.1, 6.1], dtype="float32") np.testing.assert_allclose(out.asnumpy(), expected, rtol=1e-3) if __name__ == "__main__": test_fp16_to_fp32()
4,180
36.330357
94
py
tvm
tvm-main/tests/python/unittest/test_autotvm_ga_tuner.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test genetic algorithm tuner""" from tvm.testing.autotvm import DummyRunner, get_sample_task from tvm import autotvm def test_ga_tuner(): """Test GATuner""" # Test population size smaller than space size tuning configuration task, _ = get_sample_task() tuner = autotvm.tuner.GATuner(task, pop_size=32) valid_indexes = list( filter(lambda idx: tuner.space.is_index_valid(idx), range(tuner.space.range_length)) ) assert tuner.visited.issubset(valid_indexes) assert tuner.pop_size == len(tuner.visited) == len(tuner.genes) assert len(tuner.space) == 64 measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner()) tuner.tune(n_trial=len(tuner.space), measure_option=measure_option) assert tuner.visited.issubset(valid_indexes) # Test population size bigger than space size tuning configuration task, _ = get_sample_task() tuner = autotvm.tuner.GATuner(task, pop_size=100) valid_indexes = list( filter(lambda idx: tuner.space.is_index_valid(idx), range(tuner.space.range_length)) ) assert tuner.visited.issubset(valid_indexes) assert tuner.pop_size == len(tuner.visited) == len(tuner.genes) assert len(tuner.space) == 64 measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner()) tuner.tune(n_trial=len(tuner.space), measure_option=measure_option) assert tuner.visited.issubset(valid_indexes) # Test population size smaller than multi-filtered space size tuning configuration task, _ = get_sample_task() task.config_space.multi_filter( filter=lambda entity: 8 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024 ) tuner = autotvm.tuner.GATuner(task, pop_size=32) valid_indexes = list( filter(lambda idx: tuner.space.is_index_valid(idx), range(tuner.space.range_length)) ) assert tuner.visited.issubset(valid_indexes) assert tuner.pop_size == len(tuner.visited) == len(tuner.genes) assert len(tuner.space) == 43 measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner()) tuner.tune(n_trial=len(tuner.space), measure_option=measure_option) assert tuner.visited.issubset(valid_indexes) # Test population size bigger than multi-filtered space size tuning configuration task, _ = get_sample_task() task.config_space.multi_filter( filter=lambda entity: 8 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024 ) tuner = autotvm.tuner.GATuner(task, pop_size=100) valid_indexes = list( filter(lambda idx: tuner.space.is_index_valid(idx), range(tuner.space.range_length)) ) assert tuner.visited.issubset(valid_indexes) assert tuner.pop_size == len(tuner.visited) == len(tuner.genes) assert len(tuner.space) == 43 measure_option = autotvm.measure_option(builder=autotvm.LocalBuilder(), runner=DummyRunner()) tuner.tune(n_trial=len(tuner.space), measure_option=measure_option) assert tuner.visited.issubset(valid_indexes) if __name__ == "__main__": test_ga_tuner()
3,922
42.588889
97
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_combine_context_call.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm.script import tir as T, ir as I def _device_context(dev_type, dev_id): ctx = tvm.tir.call_extern("handle", "device_context", dev_type, dev_id) return tvm.tir.Call("handle", "tir.tvm_thread_context", [ctx]) class TestCombineContextsInLoop(tvm.testing.CompareBeforeAfter): """Device contexts should be hoisted and merged""" transform = tvm.tir.transform.CombineContextCall() def before(self): @T.prim_func def func(dev_type: T.int32, n: T.int32): T.func_attr({"target": T.target("llvm")}) A = T.allocate([n], "float32", "global") for i in range(n): T.call_extern( "int32", "fadd", _device_context(dev_type, 0), A, ) for j in range(10): T.call_extern( "int32", "fadd", _device_context(dev_type, 1), A, ) T.call_extern( "int32", "fadd", _device_context(dev_type, 0), A, ) return func def expected(dev_type: T.int32, n: T.int32): T.func_attr({"target": T.target("llvm")}) ctx_cache_: T.handle = T.call_extern("handle", "device_context", dev_type, 0) ctx_cache__1: T.handle = T.call_extern("handle", "device_context", dev_type, 1) A = T.allocate([n], "float32", "global") for i in range(n): T.call_extern("int32", "fadd", ctx_cache_, A) for j in range(10): T.call_extern("int32", "fadd", ctx_cache__1, A) T.call_extern("int32", "fadd", ctx_cache_, A) class TestCombineContextsInLoopWithoutTarget(TestCombineContextsInLoop): """CombineContextCall only updates host-side functions""" def before(self): @T.prim_func def func(dev_type: T.int32, n: T.int32): A = T.allocate([n], "float32", "global") for i in range(n): T.call_extern( "int32", "fadd", _device_context(dev_type, 0), A, ) for j in range(10): T.call_extern( "int32", "fadd", _device_context(dev_type, 1), A, ) T.call_extern( "int32", "fadd", _device_context(dev_type, 0), A, ) return func expected = before if __name__ == "__main__": tvm.testing.main()
3,690
32.862385
87
py
tvm
tvm-main/tests/python/unittest/test_auto_scheduler_evolutionary_search.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Test evolutionary search. """ import tvm import pytest from tvm.testing.auto_scheduler import matmul_auto_scheduler_test from tvm import auto_scheduler, te from tvm.auto_scheduler.cost_model.cost_model import PythonBasedModel def test_mutate_tile_size(): """ The test case initializes evo search with a batch of "bad" states and check whether the search algorithm can find "good" states by mutating the "bad" states. This unit test has been tested with 1,000 runs with no failures, meaning that the failure rate is less than 0.1%. """ class MockCostModel(PythonBasedModel): """A mock cost model that rates 1 only for the states with tile_k=2.""" @staticmethod def is_good_state(state): for line in str(state).split("\n"): if line.find("k.1") != -1 and line.find("(0,2)") != -1: return True return False def predict(self, task, states): scores = [] for state in states: scores.append(1 if self.is_good_state(state) else 0) return scores task = auto_scheduler.SearchTask( func=matmul_auto_scheduler_test, args=(10, 10, 4), target=tvm.target.Target("llvm") ) policy = auto_scheduler.SketchPolicy(task, program_cost_model=MockCostModel(), verbose=0) states = policy.sample_initial_population()[:50] bad_states = [] for state in states: if not MockCostModel.is_good_state(state): bad_states.append(state) new_states = policy.evolutionary_search(bad_states, 50) found = False for state in new_states: if MockCostModel.is_good_state(state): found = True break assert found @pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/11440") def test_mutate_parallel(): """ The test case initializes evo search with a batch of "bad" states and check whether the search algorithm can find "good" states by mutating the "bad" states. """ class MockCostModel(PythonBasedModel): @staticmethod def is_good_state(state): for line in str(state).split("\n"): if ( line.find("parallel i.0@ (0") != -1 or line.find("parallel i.0@j.0@ (0") != -1 or line.find("parallel i.0@j.0@i.1@ (0") != -1 ): return True return False def predict(self, task, states): scores = [] for state in states: scores.append(1 if self.is_good_state(state) else 0) return scores task = auto_scheduler.SearchTask( func=matmul_auto_scheduler_test, args=(1024, 1024, 1024), target="llvm" ) policy = auto_scheduler.SketchPolicy(task, program_cost_model=MockCostModel(), verbose=0) found = False retry_ct = 0 while retry_ct < 10 and not found: states = policy.sample_initial_population()[:100] bad_states = [] for state in states: if not MockCostModel.is_good_state(state): bad_states.append(state) new_states = policy.evolutionary_search(bad_states, 50) for state in new_states: if MockCostModel.is_good_state(state): found = True break retry_ct += 1 assert found if __name__ == "__main__": test_mutate_tile_size() test_mutate_parallel()
4,274
33.756098
93
py
tvm
tvm-main/tests/python/unittest/test_autotvm_space.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test space definition primitives""" from tvm import te from tvm.autotvm.task.space import ConfigSpace, FallbackConfigEntity def gemm_func(cfg, N, filter_y=None, filter_x=None): A = te.placeholder((N, N), name="A") B = te.placeholder((N, N), name="B") k = te.reduce_axis((0, N), name="k") C = te.compute((N, N), lambda i, j: te.sum(A[i, k] * B[k, j], axis=[k]), name="C") s = te.create_schedule([C.op]) y, x = s[C].op.axis cfg.define_split("tile_y", cfg.axis(y), num_outputs=2, filter=filter_y) cfg.define_split("tile_x", cfg.axis(x), num_outputs=2, filter=filter_x) return s, [A, B, C] def test_split(): cfg = ConfigSpace() gemm_func(cfg, 128) assert cfg.range_length == 64 assert len(cfg.space_map["tile_y"]) == 8 # test policy cfg = ConfigSpace() cfg.define_split("tile_x", cfg.axis(256), policy="factors", num_outputs=3) assert len(cfg.space_map["tile_x"]) == 45 cfg.define_split("tile_y", cfg.axis(256), policy="power2", num_outputs=3) assert len(cfg.space_map["tile_y"]) == 45 cfg.define_split("tile_z", cfg.axis(256), policy="verbose", num_outputs=3) assert len(cfg.space_map["tile_z"]) == 45 cfg.define_split("tile_a", cfg.axis(224), policy="factors", num_outputs=3) assert len(cfg.space_map["tile_a"]) == 63 cfg.define_split("tile_b", cfg.axis(224), policy="power2", num_outputs=3) assert len(cfg.space_map["tile_b"]) == 36 cfg.define_split("tile_c", cfg.axis(224), policy="verbose", num_outputs=3) assert len(cfg.space_map["tile_c"]) == 84 # Count the number of non-negative integer solutions of a + b + c + d = n def count4(n): cnt = 0 for a in range(0, n + 1): for b in range(0, n - a + 1): cnt += n - a - b + 1 return cnt # test overflow n = 25 cfg = ConfigSpace() cfg.define_split("x", cfg.axis(2**n), policy="factors", num_outputs=4) # count4(25) is 3276. assert len(cfg.space_map["x"]) == count4(n) # test fallback cfg = FallbackConfigEntity() cfg.define_split("tile_n", cfg.axis(128), num_outputs=3) cfg.fallback_split("tile_n", [-1, 8, 4]) # verify if define_split override previously manualy defined split params cfg.define_split("tile_n", cfg.axis(128), num_outputs=3) assert cfg["tile_n"].size == [4, 8, 4] cfg = FallbackConfigEntity() cfg.define_split("tile_n", cfg.axis(49), num_outputs=3) cfg.fallback_split("tile_n", [-1, 8, 4]) assert cfg["tile_n"].size == [7, 7, 1] cfg = FallbackConfigEntity() cfg.define_split("tile_n", cfg.axis(49), num_outputs=3) try: cfg.fallback_split("tile_n", [-1, 1, 0]) assert False except RuntimeError: pass def _raises_exception(f): try: f() except Exception: return True return False def test_multi_filter(): # create config without multi_filter cfg = ConfigSpace() gemm_func(cfg, 128) # create config with multi_filter cfg_mf = ConfigSpace() gemm_func(cfg_mf, 128) cfg_mf.multi_filter( filter=lambda entity: 32 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024 ) # test len assert len(cfg) == 64 assert len(cfg_mf) == 34 # test range_length assert cfg.range_length == 64 assert cfg_mf.range_length == 64 # test dims assert cfg.dims == [8, 8] assert cfg_mf.dims == [8, 8] # test is_index_valid assert cfg.is_index_valid(0) is True assert cfg.is_index_valid(15) is True assert cfg_mf.is_index_valid(0) is False assert cfg_mf.is_index_valid(15) is True # test get assert _raises_exception(lambda: cfg.get(0)) is False assert _raises_exception(lambda: cfg.get(15)) is False assert _raises_exception(lambda: cfg_mf.get(0)) is True assert _raises_exception(lambda: cfg_mf.get(15)) is False # test subrange_length assert cfg.subrange_length(0, 64) == 64 assert cfg.subrange_length(0, 32) == 32 assert cfg.subrange_length(16, 32) == 16 assert cfg.subrange_length(16, 16) == 0 assert _raises_exception(lambda: cfg.subrange_length(0, 128)) assert _raises_exception(lambda: cfg.subrange_length(-64, 64)) assert _raises_exception(lambda: cfg.subrange_length(64, 0)) assert cfg_mf.subrange_length(0, 64) == 34 assert cfg_mf.subrange_length(0, 32) == 17 assert cfg_mf.subrange_length(16, 32) == 10 assert cfg_mf.subrange_length(16, 16) == 0 assert _raises_exception(lambda: cfg_mf.subrange_length(0, 128)) assert _raises_exception(lambda: cfg_mf.subrange_length(-64, 64)) assert _raises_exception(lambda: cfg_mf.subrange_length(64, 0)) # test point2knob assert cfg.point2knob(0) == [0, 0] assert cfg.point2knob(4) == [4, 0] assert cfg.point2knob(8) == [0, 1] assert cfg.point2knob(12) == [4, 1] assert cfg_mf.point2knob(0) == [0, 0] assert cfg_mf.point2knob(4) == [4, 0] assert cfg_mf.point2knob(8) == [0, 1] assert cfg_mf.point2knob(12) == [4, 1] # test knob2point assert cfg.knob2point([0, 0]) == 0 assert cfg.knob2point([4, 0]) == 4 assert cfg.knob2point([0, 1]) == 8 assert cfg.knob2point([4, 1]) == 12 assert cfg_mf.knob2point([0, 0]) == 0 assert cfg_mf.knob2point([4, 0]) == 4 assert cfg_mf.knob2point([0, 1]) == 8 assert cfg_mf.knob2point([4, 1]) == 12 # get_rand_index cfg_valid_indexes = list(filter(lambda idx: cfg.is_index_valid(idx), range(cfg.range_length))) assert cfg.get_rand_index() in cfg_valid_indexes assert cfg.get_rand_index(start=15, end=16) == 15 assert 10 <= cfg.get_rand_index(start=10, end=20) < 20 assert cfg.get_rand_index(to_exclude=cfg_valid_indexes[:-1]) == cfg_valid_indexes[-1:][0] cfg_mf_valid_indexes = list( filter(lambda idx: cfg_mf.is_index_valid(idx), range(cfg_mf.range_length)) ) assert cfg_mf.get_rand_index() in cfg_mf_valid_indexes assert cfg_mf.get_rand_index(start=15, end=16) == 15 assert 10 <= cfg_mf.get_rand_index(start=10, end=20) < 20 assert ( cfg_mf.get_rand_index(to_exclude=cfg_mf_valid_indexes[:-1]) == cfg_mf_valid_indexes[-1:][0] ) # get_next_index assert cfg.get_next_index(0) == 1 assert cfg.get_next_index(0, 1) == 1 assert cfg.get_next_index(0, 2) == 2 assert cfg.get_next_index(0, -1) is None assert cfg.get_next_index(0, -2) is None assert cfg.get_next_index(63) is None assert cfg.get_next_index(63, 1) is None assert cfg.get_next_index(63, 2) is None assert cfg.get_next_index(63, -1) == 62 assert cfg.get_next_index(63, -2) == 61 assert cfg.get_next_index(60, 1, end=63) == 61 assert cfg.get_next_index(63, -1, start=60) == 62 assert cfg_mf.get_next_index(0) == 5 assert cfg_mf.get_next_index(0, 1) == 5 assert cfg_mf.get_next_index(0, 2) == 6 assert cfg_mf.get_next_index(0, -1) is None assert cfg_mf.get_next_index(0, -2) is None assert cfg_mf.get_next_index(63) is None assert cfg_mf.get_next_index(63, 1) is None assert cfg_mf.get_next_index(63, 2) is None assert cfg_mf.get_next_index(63, -1) == 58 assert cfg_mf.get_next_index(63, -2) == 57 assert cfg_mf.get_next_index(60, 1, end=63) is None assert cfg_mf.get_next_index(63, -1, start=60) is None # test sample_ints cfg_ints = cfg.sample_ints(5) assert len(cfg_ints) == 5 assert set(cfg_ints).issubset(cfg_valid_indexes) cfg_mf_ints = cfg_mf.sample_ints(5) assert len(cfg_mf_ints) == 5 assert set(cfg_mf_ints).issubset(cfg_mf_valid_indexes) # test random_walk cfg_walk = cfg.random_walk(15) assert cfg_walk != 15 assert cfg_walk in cfg_valid_indexes cfg_mf_walk = cfg_mf.random_walk(15) assert cfg_mf_walk != 15 assert cfg_mf_walk in cfg_mf_valid_indexes def test_filter_and_multi_filter(): # test the order: filter -> multi_filter cfg = ConfigSpace() gemm_func(cfg, 128, filter_y=lambda y: y.size[-1] < 64) # after adding filter assert len(cfg) == 48 assert cfg.range_length == 48 cfg.multi_filter( filter=lambda entity: 32 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024 ) # after adding multi_filter assert len(cfg) == 27 assert cfg.range_length == 48 # test the order: multi_filter -> filter cfg = ConfigSpace() s, (A, B, C) = gemm_func(cfg, 128, filter_y=None) cfg.multi_filter( filter=lambda entity: 32 <= (entity["tile_x"].size[1] * entity["tile_y"].size[1]) < 1024 ) # after adding multi_filter assert len(cfg) == 34 assert cfg.range_length == 64 y, x = s[C].op.axis cfg.define_split("tile_y", cfg.axis(y), num_outputs=2, filter=lambda y: y.size[-1] < 64) # after adding filter assert len(cfg) == 27 assert cfg.range_length == 48 if __name__ == "__main__": test_split() test_multi_filter() test_filter_and_multi_filter()
9,746
35.920455
99
py
tvm
tvm-main/tests/python/unittest/test_tvmscript_roundtrip.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys import pytest import tvm import tvm.testing from tvm import tir from tvm.script import tir as T, ir as I import numpy as np def opt_gemm_normalize(): @tvm.script.ir_module class Module: @T.prim_func def mmult(A: T.handle, B: T.handle, C: T.handle) -> None: # function attr dict T.func_attr({"global_symbol": "mmult", "tir.noalias": True}) # buffer definition C_global = T.Buffer([1024, 1024], elem_offset=0, align=64, offset_factor=1) packedB = T.Buffer([32, 1024, 32], elem_offset=0, align=64, offset_factor=1) A_1 = T.match_buffer(A, [1024, 1024], elem_offset=0, align=64, offset_factor=1) B_1 = T.match_buffer(B, [1024, 1024], elem_offset=0, align=64, offset_factor=1) C_1 = T.match_buffer(C, [1024, 1024], elem_offset=0, align=64, offset_factor=1) # body T.realize(packedB[0:32, 0:1024, 0:32], "") for x in T.parallel(0, 32): for y in T.serial(0, 1024): for z in T.vectorized(0, 32): packedB[x, y, z] = B_1[y, ((x * 32) + z)] T.realize(C_1[0:1024, 0:1024], "") for x_outer in T.parallel(0, 32): for y_outer in T.serial(0, 32): T.realize( C_global[ (x_outer * 32) : ((x_outer * 32) + 32), (y_outer * 32) : ((y_outer * 32) + 32), ], "global", ) for x_c_init in T.serial(0, 32): for y_c_init in T.vectorized(0, 32): C_global[ (x_c_init + (x_outer * 32)), (y_c_init + (y_outer * 32)) ] = T.float32(0) for k_outer in T.serial(0, 256): for x_c in T.serial(0, 32): for k_inner in T.unroll(0, 4): for y_c in T.vectorized(0, 32): C_global[ (x_c + (x_outer * 32)), (y_c + (y_outer * 32)) ] = C_global[(x_c + (x_outer * 32)), (y_c + (y_outer * 32))] + ( A_1[(x_c + (x_outer * 32)), (k_inner + (k_outer * 4))] * packedB[ T.floordiv((y_c + (y_outer * 32)), 32), (k_inner + (k_outer * 4)), T.floormod((y_c + (y_outer * 32)), 32), ] ) for x_inner in T.serial(0, 32): for y_inner in T.serial(0, 32): C_1[(x_inner + (x_outer * 32)), (y_inner + (y_outer * 32))] = C_global[ (x_inner + (x_outer * 32)), (y_inner + (y_outer * 32)) ] return Module def opt_gemm_lower(): @tvm.script.ir_module class Module: @T.prim_func def mmult(A: T.handle, B: T.handle, C: T.handle) -> None: # function attr dict T.func_attr({"global_symbol": "mmult", "tir.noalias": True}) A_1 = T.match_buffer(A, [16384], elem_offset=0, align=64, offset_factor=1) B_1 = T.match_buffer(B, [1024, 1024], elem_offset=0, align=64, offset_factor=1) C_1 = T.match_buffer(C, [16384], elem_offset=0, align=64, offset_factor=1) # body packedB_data = T.allocate([32768], "float32", "global") packedB = T.Buffer(shape=[32768], dtype="float32", scope="global", data=packedB_data) for x in T.parallel(0, 32): for y in T.serial(0, 1024): packedB[T.ramp(((x * 32768) + (y * 32)), 1, 32)] = B_1[y, T.ramp(x * 32, 1, 32)] for x_outer in T.parallel(0, 32): C_global_data = T.allocate([1024], "float32", "global") C_global = T.Buffer( shape=[1024], dtype="float32", scope="global", data=C_global_data ) for y_outer in T.serial(0, 32): for x_c_init in T.serial(0, 32): C_global[T.ramp((x_c_init * 32), 1, 32)] = T.broadcast(T.float32(0), 32) for k_outer in T.serial(0, 256): for x_c in T.serial(0, 32): C_global[T.ramp((x_c * 32), 1, 32)] = C_global[ T.ramp((x_c * 32), 1, 32) ] + ( T.broadcast( A_1[ (((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4)), ], 32, ) * packedB[T.ramp(((y_outer * 32768) + (k_outer * 128)), 1, 32)] ) C_global[T.ramp((x_c * 32), 1, 32)] = C_global[ T.ramp((x_c * 32), 1, 32) ] + ( T.broadcast( A_1[ ((((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4)) + 1), ], 32, ) * packedB[ T.ramp((((y_outer * 32768) + (k_outer * 128)) + 32), 1, 32) ] ) C_global[T.ramp((x_c * 32), 1, 32)] = C_global[ T.ramp((x_c * 32), 1, 32) ] + ( T.broadcast( A_1[ ((((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4)) + 2), ], 32, ) * packedB[ T.ramp((((y_outer * 32768) + (k_outer * 128)) + 64), 1, 32) ] ) C_global[T.ramp((x_c * 32), 1, 32)] = C_global[ T.ramp((x_c * 32), 1, 32) ] + ( T.broadcast( A_1[ ((((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4)) + 3), ], 32, ) * packedB[ T.ramp((((y_outer * 32768) + (k_outer * 128)) + 96), 1, 32) ] ) for x_inner in T.serial(0, 32): for y_inner in T.serial(0, 32): C_1[ ( (((x_outer * 32768) + (x_inner * 1024)) + (y_outer * 32)) + y_inner ) ] = C_global[((x_inner * 32) + y_inner)] return Module def launch_env_thread(): @T.prim_func def main(inputs: T.Buffer((64, 2, 4), "float32")) -> None: bx = T.launch_thread("blockIdx.x", 64) for i, j in T.grid(2, 4): T.evaluate(inputs[bx, i, j]) return main def opt_gemm_mod_host(): @tvm.script.ir_module class Module: @T.prim_func def mmult( args: T.handle, arg_type_ids: T.handle, num_args: T.int32, out_ret_value: T.handle, out_ret_tcode: T.handle, ) -> T.int32: # function attr dict T.func_attr( { "tir.noalias": True, "global_symbol": "mmult", "tir.is_entry_func": True, "calling_conv": 1, } ) # buffer definition buf_type_ids = T.match_buffer(arg_type_ids, [3], dtype="int32") packedB = T.Buffer([32768], dtype="float32") C_global = T.Buffer([1024], dtype="float32") # body assert num_args == 3, "mmult: num_args should be 3" arg0: T.handle = T.tvm_struct_get(args, 0, 12, dtype="handle") arg0_code: T.int32 = buf_type_ids[0] arg1: T.handle = T.tvm_struct_get(args, 1, 12, dtype="handle") arg1_code: T.int32 = buf_type_ids[1] arg2: T.handle = T.tvm_struct_get(args, 2, 12, dtype="handle") arg2_code: T.int32 = buf_type_ids[2] A_data: T.handle("int32") = T.tvm_struct_get(arg0, 0, 1, dtype="handle") T.attr(A_data, "storage_alignment", 128) A = T.Buffer([1024 * 1024], dtype="int32", data=A_data) buf0_shape_data: T.handle("int32") = T.tvm_struct_get(arg0, 0, 2, dtype="handle") buf0_shape = T.Buffer([2], dtype="int32", data=buf0_shape_data) buf0_strides_data: T.handle("int32") = T.tvm_struct_get(arg0, 0, 3, dtype="handle") buf0_strides = T.Buffer([2], dtype="int32", data=buf0_strides_data) dev_id: T.int32 = T.tvm_struct_get(arg0, 0, 9, dtype="int32") B_data: T.handle("int32") = T.tvm_struct_get(arg1, 0, 1, dtype="handle") T.attr(B_data, "storage_alignment", 128) B = T.Buffer([1024 * 1024], dtype="int32", data=B_data) buf1_shape_data: T.handle("int32") = T.tvm_struct_get(arg1, 0, 2, dtype="handle") buf1_shape = T.Buffer([2], dtype="int32", data=buf1_shape_data) buf1_strides_data: T.handle("int32") = T.tvm_struct_get(arg1, 0, 3, dtype="handle") buf1_strides = T.Buffer([2], dtype="int32", data=buf1_strides_data) C_data: T.handle("int32") = T.tvm_struct_get(arg2, 0, 1, dtype="handle") T.attr(C_data, "storage_alignment", 128) C = T.Buffer([1024 * 1024], dtype="int32", data=C_data) buf2_shape_data: T.handle("int32") = T.tvm_struct_get(arg2, 0, 2, dtype="handle") buf2_shape = T.Buffer([2], dtype="int32", data=buf2_shape_data) buf2_strides_data: T.handle("int32") = T.tvm_struct_get(arg2, 0, 3, dtype="handle") buf2_strides = T.Buffer([2], dtype="int32", data=buf2_strides_data) assert (((arg0_code == 3) or (arg0_code == 13)) or (arg0_code == 7)) or ( arg0_code == 4 ), "mmult: Expect arg[0] to be pointer" assert (((arg1_code == 3) or (arg1_code == 13)) or (arg1_code == 7)) or ( arg1_code == 4 ), "mmult: Expect arg[1] to be pointer" assert (((arg2_code == 3) or (arg2_code == 13)) or (arg2_code == 7)) or ( arg2_code == 4 ), "mmult: Expect arg[2] to be pointer" assert 2 == T.tvm_struct_get( arg0, 0, 4, dtype="int32" ), "arg0.ndim is expected to equal 2" assert 2 == T.tvm_struct_get( arg0, 0, 4, dtype="int32" ), "arg0.ndim is expected to equal 2" assert ( (T.tvm_struct_get(arg0, 0, 5, dtype="uint8") == T.uint8(2)) and (T.tvm_struct_get(arg0, 0, 6, dtype="uint8") == T.uint8(32)) ) and ( T.tvm_struct_get(arg0, 0, 7, dtype="uint16") == T.uint16(1) ), "arg0.dtype is expected to be float32" assert 1024 == T.cast( buf0_shape[0], "int32" ), "Argument arg0.shape[0] has an unsatisfied constraint" assert 1024 == T.cast( buf0_shape[1], "int32" ), "Argument arg0.shape[1] has an unsatisfied constraint" if not (T.isnullptr(buf0_strides.data, dtype="bool")): assert (1 == T.cast(buf0_strides[1], "int32")) and ( 1024 == T.cast(buf0_strides[0], "int32") ), "arg0.strides: expected to be compact array" T.evaluate(0) assert T.uint64(0) == T.tvm_struct_get( arg0, 0, 8, dtype="uint64" ), "Argument arg0.byte_offset has an unsatisfied constraint" assert 1 == T.tvm_struct_get( arg0, 0, 10, dtype="int32" ), "Argument arg0.device_type has an unsatisfied constraint" assert 2 == T.tvm_struct_get( arg1, 0, 4, dtype="int32" ), "arg1.ndim is expected to equal 2" assert 2 == T.tvm_struct_get( arg1, 0, 4, dtype="int32" ), "arg1.ndim is expected to equal 2" assert ( (T.tvm_struct_get(arg1, 0, 5, dtype="uint8") == T.uint8(2)) and (T.tvm_struct_get(arg1, 0, 6, dtype="uint8") == T.uint8(32)) ) and ( T.tvm_struct_get(arg1, 0, 7, dtype="uint16") == T.uint16(1) ), "arg1.dtype is expected to be float32" assert 1024 == T.cast( buf1_shape[0], "int32" ), "Argument arg1.shape[0] has an unsatisfied constraint" assert 1024 == T.cast( buf1_shape[1], "int32" ), "Argument arg1.shape[1] has an unsatisfied constraint" if not (T.isnullptr(buf1_strides.data, dtype="bool")): assert (1 == T.cast(buf1_strides[1], "int32")) and ( 1024 == T.cast(buf1_strides[0], "int32") ), "arg1.strides: expected to be compact array" T.evaluate(0) assert T.uint64(0) == T.tvm_struct_get( arg1, 0, 8, dtype="uint64" ), "Argument arg1.byte_offset has an unsatisfied constraint" assert 1 == T.tvm_struct_get( arg1, 0, 10, dtype="int32" ), "Argument arg1.device_type has an unsatisfied constraint" assert dev_id == T.tvm_struct_get( arg1, 0, 9, dtype="int32" ), "Argument arg1.device_id has an unsatisfied constraint" assert 2 == T.tvm_struct_get( arg2, 0, 4, dtype="int32" ), "arg2.ndim is expected to equal 2" assert 2 == T.tvm_struct_get( arg2, 0, 4, dtype="int32" ), "arg2.ndim is expected to equal 2" assert ( (T.tvm_struct_get(arg2, 0, 5, dtype="uint8") == T.uint8(2)) and (T.tvm_struct_get(arg2, 0, 6, dtype="uint8") == T.uint8(32)) ) and ( T.tvm_struct_get(arg2, 0, 7, dtype="uint16") == T.uint16(1) ), "arg2.dtype is expected to be float32" assert 1024 == T.cast( buf2_shape[0], "int32" ), "Argument arg2.shape[0] has an unsatisfied constraint" assert 1024 == T.cast( buf2_shape[1], "int32" ), "Argument arg2.shape[1] has an unsatisfied constraint" if not (T.isnullptr(buf2_strides.data, dtype="bool")): assert (1 == T.cast(buf2_strides[1], "int32")) and ( 1024 == T.cast(buf2_strides[0], "int32") ), "arg2.strides: expected to be compact array" T.evaluate(0) assert T.uint64(0) == T.tvm_struct_get( arg2, 0, 8, dtype="uint64" ), "Argument arg2.byte_offset has an unsatisfied constraint" assert 1 == T.tvm_struct_get( arg2, 0, 10, dtype="int32" ), "Argument arg2.device_type has an unsatisfied constraint" assert dev_id == T.tvm_struct_get( arg2, 0, 9, dtype="int32" ), "Argument arg2.device_id has an unsatisfied constraint" T.attr(0, "compute_scope", "mmult_compute_") T.attr(packedB.data, "storage_scope", "global") T.attr(packedB.data, "storage_alignment", 128) with T.LetStmt( T.TVMBackendAllocWorkspace(1, dev_id, T.uint64(4194304), 2, 32, dtype="handle"), var=packedB.data, ): if T.isnullptr(packedB.data, dtype="bool"): T.evaluate(T.tvm_throw_last_error(dtype="int32")) for x in T.parallel(0, 32): for y in T.serial(0, 1024): packedB[T.ramp(((x * 32768) + (y * 32)), 1, 32)] = B[ T.ramp(((y * 1024) + (x * 32)), 1, 32) ] for x_outer in T.parallel(0, 32): T.attr(C_global.data, "storage_scope", "global") T.attr(C_global.data, "storage_alignment", 128) with T.LetStmt( T.TVMBackendAllocWorkspace( 1, dev_id, T.uint64(4096), 2, 32, dtype="handle" ), var=C_global.data, ): if T.isnullptr(C_global.data, dtype="bool"): T.evaluate(T.tvm_throw_last_error(dtype="int32")) for y_outer in T.serial(0, 32): for x_c_init in T.serial(0, 32): C_global[T.ramp((x_c_init * 32), 1, 32)] = T.broadcast( T.float32(0), 32 ) for k_outer in T.serial(0, 256): for x_c in T.serial(0, 32): C_global[T.ramp((x_c * 32), 1, 32)] = T.call_llvm_pure_intrin( T.uint32(97), T.uint32(3), T.broadcast( A[ ( ((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4) ), ], 32, ), packedB[ T.ramp(((y_outer * 32768) + (k_outer * 128)), 1, 32) ], C_global[T.ramp((x_c * 32), 1, 32)], dtype="float32x32", ) C_global[T.ramp((x_c * 32), 1, 32)] = T.call_llvm_pure_intrin( T.uint32(97), T.uint32(3), T.broadcast( A[ ( ( ((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4) ) + 1 ), ], 32, ), packedB[ T.ramp( (((y_outer * 32768) + (k_outer * 128)) + 32), 1, 32 ) ], C_global[T.ramp((x_c * 32), 1, 32)], dtype="float32x32", ) C_global[T.ramp((x_c * 32), 1, 32)] = T.call_llvm_pure_intrin( T.uint32(97), T.uint32(3), T.broadcast( A[ ( ( ((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4) ) + 2 ), ], 32, ), packedB[ T.ramp( (((y_outer * 32768) + (k_outer * 128)) + 64), 1, 32 ) ], C_global[T.ramp((x_c * 32), 1, 32)], dtype="float32x32", ) C_global[T.ramp((x_c * 32), 1, 32)] = T.call_llvm_pure_intrin( T.uint32(97), T.uint32(3), T.broadcast( A[ ( ( ((x_outer * 32768) + (x_c * 1024)) + (k_outer * 4) ) + 3 ), ], 32, ), packedB[ T.ramp( (((y_outer * 32768) + (k_outer * 128)) + 96), 1, 32 ) ], C_global[T.ramp((x_c * 32), 1, 32)], dtype="float32x32", ) for x_inner in T.serial(0, 32): for y_inner in T.serial(0, 32): C[ ( ( ((x_outer * 32768) + (x_inner * 1024)) + (y_outer * 32) ) + y_inner ) ] = C_global[((x_inner * 32) + y_inner)] if T.TVMBackendFreeWorkspace(1, dev_id, C_global.data, dtype="int32") != 0: T.evaluate(T.tvm_throw_last_error(dtype="int32")) if T.TVMBackendFreeWorkspace(1, dev_id, packedB.data, dtype="int32") != 0: T.evaluate(T.tvm_throw_last_error(dtype="int32")) return Module def opt_conv_tensorcore_normalize(): @T.prim_func def func(A: T.handle, W: T.handle, Conv: T.handle) -> None: # function attr dict T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) # var definition bx = T.env_thread("blockIdx.x") by = T.env_thread("blockIdx.y") bz = T.env_thread("blockIdx.z") tx = T.env_thread("threadIdx.x") ty = T.env_thread("threadIdx.y") tz = T.env_thread("threadIdx.z") # buffer definition Apad_shared = T.Buffer( [16, 16, 16, 16, 16, 16], dtype="float16", elem_offset=0, align=64, offset_factor=1 ) Apad_shared_wmma_matrix_a = T.Buffer( [16, 16, 16, 16, 16, 16], dtype="float16", elem_offset=0, align=64, offset_factor=1 ) BA = T.Buffer([16, 16], dtype="float16", scope="wmma.matrix_a", align=32, offset_factor=256) BB = T.Buffer([16, 16], dtype="float16", scope="wmma.matrix_b", align=32, offset_factor=256) BC = T.Buffer([16, 16], scope="wmma.accumulator", align=32, offset_factor=256) Conv_wmma_accumulator = T.Buffer( [16, 14, 14, 32, 16, 16], elem_offset=0, align=64, offset_factor=1 ) W_shared = T.Buffer( [3, 3, 16, 32, 16, 16], dtype="float16", elem_offset=0, align=64, offset_factor=1 ) W_shared_wmma_matrix_b = T.Buffer( [3, 3, 16, 32, 16, 16], dtype="float16", elem_offset=0, align=64, offset_factor=1 ) buffer = T.Buffer([16, 16], dtype="float16", scope="shared", align=32, offset_factor=256) buffer_1 = T.Buffer( [16, 16], dtype="float16", scope="wmma.matrix_a", align=32, offset_factor=256 ) buffer_2 = T.Buffer([16, 16], dtype="float16", scope="shared", align=32, offset_factor=256) buffer_3 = T.Buffer( [16, 16], dtype="float16", scope="wmma.matrix_b", align=32, offset_factor=256 ) buffer_4 = T.Buffer([16, 16], scope="wmma.accumulator", align=32, offset_factor=256) buffer_5 = T.Buffer([16, 16], align=32, offset_factor=256) A_1 = T.match_buffer( A, [16, 14, 14, 16, 16, 16], dtype="float16", elem_offset=0, align=64, offset_factor=1 ) W_1 = T.match_buffer( W, [3, 3, 16, 32, 16, 16], dtype="float16", elem_offset=0, align=64, offset_factor=1 ) Conv_1 = T.match_buffer( Conv, [16, 14, 14, 32, 16, 16], elem_offset=0, align=64, offset_factor=1 ) # body T.realize(Conv_1[0:16, 0:14, 0:14, 0:32, 0:16, 0:16], "") T.launch_thread(bz, 196) T.launch_thread(bx, 2) T.launch_thread(by, 4) T.launch_thread(ty, 4) T.launch_thread(tz, 2) T.realize( Conv_wmma_accumulator[ ((bx * 8) + (ty * 2)) : (((bx * 8) + (ty * 2)) + 2), T.floordiv(bz, 14) : (T.floordiv(bz, 14) + 1), T.floormod(bz, 14) : (T.floormod(bz, 14) + 1), ((by * 8) + (tz * 4)) : (((by * 8) + (tz * 4)) + 4), 0:16, 0:16, ], "wmma.accumulator", ) for n_c_init in T.serial(0, 2): for o_c_init in T.serial(0, 4): T.attr( [BC, Conv_wmma_accumulator], "buffer_bind_scope", T.tvm_tuple( (n_c_init + ((bx * 8) + (ty * 2))), 1, T.floordiv(bz, 14), 1, T.floormod(bz, 14), 1, (o_c_init + ((by * 8) + (tz * 4))), 1, 0, 16, 0, 16, dtype="handle", ), ) T.evaluate( T.tvm_fill_fragment( BC.data, 16, 16, 16, T.floordiv(BC.elem_offset, 256), T.float32(0), dtype="handle", ) ) for ic_outer in T.serial(0, 8): for kh in T.serial(0, 3): T.realize( Apad_shared[ (bx * 8) : ((bx * 8) + 8), (T.floordiv(bz, 14) + kh) : ((T.floordiv(bz, 14) + kh) + 1), T.floormod(bz, 14) : (T.floormod(bz, 14) + 3), (ic_outer * 2) : ((ic_outer * 2) + 2), 0:16, 0:16, ], "shared", ) for ax2 in T.serial(0, 3): for ax3 in T.serial(0, 2): for ax4_ax5_fused_outer in T.serial(0, 8): T.launch_thread(tx, 32) Apad_shared[ ((tz + (ty * 2)) + (bx * 8)), (T.floordiv(bz, 14) + kh), (ax2 + T.floormod(bz, 14)), (ax3 + (ic_outer * 2)), T.floordiv((tx + (ax4_ax5_fused_outer * 32)), 16), T.floormod((tx + (ax4_ax5_fused_outer * 32)), 16), ] = T.if_then_else( ( ( ( ((T.floordiv(bz, 14) + kh) >= 1) and (((T.floordiv(bz, 14) + kh) - 1) < 14) ) and ((ax2 + T.floormod(bz, 14)) >= 1) ) and (((ax2 + T.floormod(bz, 14)) - 1) < 14) ), A_1[ ((tz + (ty * 2)) + (bx * 8)), ((T.floordiv(bz, 14) + kh) - 1), ((ax2 + T.floormod(bz, 14)) - 1), (ax3 + (ic_outer * 2)), T.floordiv((tx + (ax4_ax5_fused_outer * 32)), 16), T.floormod((tx + (ax4_ax5_fused_outer * 32)), 16), ], T.float16(0), dtype="float16", ) T.realize( W_shared[ kh : (kh + 1), 0:3, (ic_outer * 2) : ((ic_outer * 2) + 2), (by * 8) : ((by * 8) + 8), 0:16, 0:16, ], "shared", ) for ax1 in T.serial(0, 3): for ax2_1 in T.serial(0, 2): T.launch_thread(tx, 32) for ax4_ax5_fused_inner in T.vectorized(0, 8): W_shared[ kh, ax1, (ax2_1 + (ic_outer * 2)), ((tz + (ty * 2)) + (by * 8)), T.floordiv((ax4_ax5_fused_inner + (tx * 8)), 16), T.floormod((ax4_ax5_fused_inner + (tx * 8)), 16), ] = W_1[ kh, ax1, (ax2_1 + (ic_outer * 2)), ((tz + (ty * 2)) + (by * 8)), T.floordiv((ax4_ax5_fused_inner + (tx * 8)), 16), T.floormod((ax4_ax5_fused_inner + (tx * 8)), 16), ] for ic_inner in T.serial(0, 2): for kw in T.serial(0, 3): T.realize( Apad_shared_wmma_matrix_a[ ((bx * 8) + (ty * 2)) : (((bx * 8) + (ty * 2)) + 2), (T.floordiv(bz, 14) + kh) : ((T.floordiv(bz, 14) + kh) + 1), (kw + T.floormod(bz, 14)) : ((kw + T.floormod(bz, 14)) + 1), ((ic_outer * 2) + ic_inner) : (((ic_outer * 2) + ic_inner) + 1), 0:16, 0:16, ], "wmma.matrix_a", ) for ax0 in T.serial(0, 2): T.attr( [buffer, Apad_shared], "buffer_bind_scope", T.tvm_tuple( (ax0 + ((bx * 8) + (ty * 2))), 1, (T.floordiv(bz, 14) + kh), 1, (kw + T.floormod(bz, 14)), 1, ((ic_outer * 2) + ic_inner), 1, 0, 16, 0, 16, dtype="handle", ), ) T.attr( [buffer_1, Apad_shared_wmma_matrix_a], "buffer_bind_scope", T.tvm_tuple( (ax0 + ((bx * 8) + (ty * 2))), 1, (T.floordiv(bz, 14) + kh), 1, (kw + T.floormod(bz, 14)), 1, ((ic_outer * 2) + ic_inner), 1, 0, 16, 0, 16, dtype="handle", ), ) T.evaluate( T.tvm_load_matrix_sync( buffer_1.data, 16, 16, 16, T.floordiv(buffer_1.elem_offset, 256), T.tvm_access_ptr( T.type_annotation(dtype="float16"), buffer.data, buffer.elem_offset, 256, 1, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.realize( W_shared_wmma_matrix_b[ kh : (kh + 1), kw : (kw + 1), ((ic_outer * 2) + ic_inner) : (((ic_outer * 2) + ic_inner) + 1), ((by * 8) + (tz * 4)) : (((by * 8) + (tz * 4)) + 4), 0:16, 0:16, ], "wmma.matrix_b", ) for ax3_1 in T.serial(0, 4): T.attr( [buffer_2, W_shared], "buffer_bind_scope", T.tvm_tuple( kh, 1, kw, 1, ((ic_outer * 2) + ic_inner), 1, (ax3_1 + ((by * 8) + (tz * 4))), 1, 0, 16, 0, 16, dtype="handle", ), ) T.attr( [buffer_3, W_shared_wmma_matrix_b], "buffer_bind_scope", T.tvm_tuple( kh, 1, kw, 1, ((ic_outer * 2) + ic_inner), 1, (ax3_1 + ((by * 8) + (tz * 4))), 1, 0, 16, 0, 16, dtype="handle", ), ) T.evaluate( T.tvm_load_matrix_sync( buffer_3.data, 16, 16, 16, T.floordiv(buffer_3.elem_offset, 256), T.tvm_access_ptr( T.type_annotation(dtype="float16"), buffer_2.data, buffer_2.elem_offset, 256, 1, dtype="handle", ), 16, "row_major", dtype="handle", ) ) for n_c in T.serial(0, 2): for o_c in T.serial(0, 4): T.attr( [BA, Apad_shared_wmma_matrix_a], "buffer_bind_scope", T.tvm_tuple( (n_c + ((bx * 8) + (ty * 2))), 1, (T.floordiv(bz, 14) + kh), 1, (T.floormod(bz, 14) + kw), 1, ((ic_outer * 2) + ic_inner), 1, 0, 16, 0, 16, dtype="handle", ), ) T.attr( [BB, W_shared_wmma_matrix_b], "buffer_bind_scope", T.tvm_tuple( kh, 1, kw, 1, ((ic_outer * 2) + ic_inner), 1, (o_c + ((by * 8) + (tz * 4))), 1, 0, 16, 0, 16, dtype="handle", ), ) T.attr( [BC, Conv_wmma_accumulator], "buffer_bind_scope", T.tvm_tuple( (n_c + ((bx * 8) + (ty * 2))), 1, T.floordiv(bz, 14), 1, T.floormod(bz, 14), 1, (o_c + ((by * 8) + (tz * 4))), 1, 0, 16, 0, 16, dtype="handle", ), ) T.evaluate( T.tvm_mma_sync( BC.data, T.floordiv(BC.elem_offset, 256), BA.data, T.floordiv(BA.elem_offset, 256), BB.data, T.floordiv(BB.elem_offset, 256), BC.data, T.floordiv(BC.elem_offset, 256), dtype="handle", ) ) for n_inner in T.serial(0, 2): for o_inner in T.serial(0, 4): T.attr( [buffer_4, Conv_wmma_accumulator], "buffer_bind_scope", T.tvm_tuple( ((((bx * 4) + ty) * 2) + n_inner), 1, T.floordiv(bz, 14), 1, T.floormod(bz, 14), 1, ((((by * 2) + tz) * 4) + o_inner), 1, 0, 16, 0, 16, dtype="handle", ), ) T.attr( [buffer_5, Conv_1], "buffer_bind_scope", T.tvm_tuple( ((((bx * 4) + ty) * 2) + n_inner), 1, T.floordiv(bz, 14), 1, T.floormod(bz, 14), 1, ((((by * 2) + tz) * 4) + o_inner), 1, 0, 16, 0, 16, dtype="handle", ), ) T.evaluate( T.tvm_store_matrix_sync( buffer_4.data, 16, 16, 16, T.floordiv(buffer_4.elem_offset, 256), T.tvm_access_ptr( T.type_annotation(dtype="float32"), buffer_5.data, buffer_5.elem_offset, 256, 2, dtype="handle", ), 16, "row_major", dtype="handle", ) ) return func def opt_conv_tensorcore_lower(): @T.prim_func def func( A: T.Buffer((16, 14, 14, 16, 16, 16), "float16"), W: T.Buffer((3, 3, 16, 32, 16, 16), "float16"), Conv: T.Buffer((16, 14, 14, 32, 16, 16), "float32"), ) -> None: # function attr dict T.func_attr({"global_symbol": "default_function", "tir.noalias": True}) # body A_1 = T.Buffer([12845056], dtype="float16", data=A.data) W_1 = T.Buffer([1179648], dtype="float16", data=W.data) Conv_1 = T.Buffer([25690112], data=Conv.data) bx = T.env_thread("blockIdx.x") by = T.env_thread("blockIdx.y") bz = T.env_thread("blockIdx.z") tx = T.env_thread("threadIdx.x") ty = T.env_thread("threadIdx.y") tz = T.env_thread("threadIdx.z") T.launch_thread(bz, 196) Conv_wmma_accumulator_data = T.allocate([2048], "float32", "wmma.accumulator") Conv_wmma_accumulator = T.Buffer( shape=[2048], dtype="float32", scope="wmma.accumulator", data=Conv_wmma_accumulator_data ) Apad_shared_data = T.allocate([12288], "float16", "shared") Apad_shared = T.Buffer( shape=[12288], dtype="float16", scope="shared", data=Apad_shared_data ) W_shared_data = T.allocate([12288], "float16", "shared") W_shared = T.Buffer(shape=[12288], dtype="float16", scope="shared", data=W_shared_data) Apad_shared_wmma_matrix_a_data = T.allocate([512], "float16", "wmma.matrix_a") Apad_shared_wmma_matrix_a = T.Buffer( shape=[512], dtype="float16", scope="wmma.matrix_a", data=Apad_shared_wmma_matrix_a_data ) W_shared_wmma_matrix_b_data = T.allocate([1024], "float16", "wmma.matrix_b") W_shared_wmma_matrix_b = T.Buffer( shape=[1024], dtype="float16", scope="wmma.matrix_b", data=W_shared_wmma_matrix_b_data ) T.launch_thread(bx, 2) T.launch_thread(by, 4) T.launch_thread(ty, 4) T.launch_thread(tz, 2) T.evaluate( T.tvm_fill_fragment( Conv_wmma_accumulator.data, 16, 16, 16, 0, T.float32(0), dtype="handle" ) ) T.evaluate( T.tvm_fill_fragment( Conv_wmma_accumulator.data, 16, 16, 16, 1, T.float32(0), dtype="handle" ) ) T.evaluate( T.tvm_fill_fragment( Conv_wmma_accumulator.data, 16, 16, 16, 2, T.float32(0), dtype="handle" ) ) T.evaluate( T.tvm_fill_fragment( Conv_wmma_accumulator.data, 16, 16, 16, 3, T.float32(0), dtype="handle" ) ) T.evaluate( T.tvm_fill_fragment( Conv_wmma_accumulator.data, 16, 16, 16, 4, T.float32(0), dtype="handle" ) ) T.evaluate( T.tvm_fill_fragment( Conv_wmma_accumulator.data, 16, 16, 16, 5, T.float32(0), dtype="handle" ) ) T.evaluate( T.tvm_fill_fragment( Conv_wmma_accumulator.data, 16, 16, 16, 6, T.float32(0), dtype="handle" ) ) T.evaluate( T.tvm_fill_fragment( Conv_wmma_accumulator.data, 16, 16, 16, 7, T.float32(0), dtype="handle" ) ) for ic_outer in T.serial(0, 8): for kh in T.serial(0, 3): for ax2 in T.serial(0, 3): with T.launch_thread(tx, 32): Apad_shared[ ((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61440 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 32) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61408 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 64) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61376 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 96) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61344 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 128) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61312 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 160) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61280 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 192) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61248 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 224) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61216 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 256) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61184 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 288) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61152 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 320) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61120 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 352) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61088 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 384) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61056 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 416) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 61024 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 448) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 60992 ), ], T.float16(0), dtype="float16", ) T.launch_thread(tx, 32) Apad_shared[ (((((ty * 3072) + (tz * 1536)) + (ax2 * 512)) + tx) + 480) ] = T.if_then_else( ( ( ( (1 <= (T.floordiv(bz, 14) + kh)) and ((T.floordiv(bz, 14) + kh) < 15) ) and (1 <= (ax2 + T.floormod(bz, 14))) ) and ((ax2 + T.floormod(bz, 14)) < 15) ), A_1[ ( ( ( ( ( ( ( ((bx * 6422528) + (ty * 1605632)) + (tz * 802816) ) + (kh * 57344) ) + (bz * 4096) ) + (ax2 * 4096) ) + (ic_outer * 512) ) + tx ) - 60960 ), ], T.float16(0), dtype="float16", ) with T.launch_thread(tx, 32): W_shared[T.ramp((((ty * 512) + (tz * 256)) + (tx * 8)), 1, 8)] = W_1[ T.ramp( ( ( ( (((kh * 393216) + (ic_outer * 16384)) + (by * 2048)) + (ty * 512) ) + (tz * 256) ) + (tx * 8) ), 1, 8, ) ] with T.launch_thread(tx, 32): W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 2048), 1, 8)] = W_1[ T.ramp( ( ( ( ( (((kh * 393216) + (ic_outer * 16384)) + (by * 2048)) + (ty * 512) ) + (tz * 256) ) + (tx * 8) ) + 8192 ), 1, 8, ) ] with T.launch_thread(tx, 32): W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 4096), 1, 8)] = W_1[ T.ramp( ( ( ( ( (((kh * 393216) + (ic_outer * 16384)) + (by * 2048)) + (ty * 512) ) + (tz * 256) ) + (tx * 8) ) + 131072 ), 1, 8, ) ] with T.launch_thread(tx, 32): W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 6144), 1, 8)] = W_1[ T.ramp( ( ( ( ( (((kh * 393216) + (ic_outer * 16384)) + (by * 2048)) + (ty * 512) ) + (tz * 256) ) + (tx * 8) ) + 139264 ), 1, 8, ) ] with T.launch_thread(tx, 32): W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 8192), 1, 8)] = W_1[ T.ramp( ( ( ( ( (((kh * 393216) + (ic_outer * 16384)) + (by * 2048)) + (ty * 512) ) + (tz * 256) ) + (tx * 8) ) + 262144 ), 1, 8, ) ] with T.launch_thread(tx, 32): W_shared[T.ramp(((((ty * 512) + (tz * 256)) + (tx * 8)) + 10240), 1, 8)] = W_1[ T.ramp( ( ( ( ( (((kh * 393216) + (ic_outer * 16384)) + (by * 2048)) + (ty * 512) ) + (tz * 256) ) + (tx * 8) ) + 270336 ), 1, 8, ) ] for ic_inner in T.serial(0, 2): for kw in T.serial(0, 3): T.evaluate( T.tvm_load_matrix_sync( Apad_shared_wmma_matrix_a.data, 16, 16, 16, 0, T.tvm_access_ptr( T.type_annotation(dtype="float16"), Apad_shared.data, (((ty * 3072) + (kw * 512)) + (ic_inner * 256)), 256, 1, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.evaluate( T.tvm_load_matrix_sync( Apad_shared_wmma_matrix_a.data, 16, 16, 16, 1, T.tvm_access_ptr( T.type_annotation(dtype="float16"), Apad_shared.data, ((((ty * 3072) + (kw * 512)) + (ic_inner * 256)) + 1536), 256, 1, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.evaluate( T.tvm_load_matrix_sync( W_shared_wmma_matrix_b.data, 16, 16, 16, 0, T.tvm_access_ptr( T.type_annotation(dtype="float16"), W_shared.data, (((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)), 256, 1, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.evaluate( T.tvm_load_matrix_sync( W_shared_wmma_matrix_b.data, 16, 16, 16, 1, T.tvm_access_ptr( T.type_annotation(dtype="float16"), W_shared.data, ((((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)) + 256), 256, 1, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.evaluate( T.tvm_load_matrix_sync( W_shared_wmma_matrix_b.data, 16, 16, 16, 2, T.tvm_access_ptr( T.type_annotation(dtype="float16"), W_shared.data, ((((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)) + 512), 256, 1, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.evaluate( T.tvm_load_matrix_sync( W_shared_wmma_matrix_b.data, 16, 16, 16, 3, T.tvm_access_ptr( T.type_annotation(dtype="float16"), W_shared.data, ((((kw * 4096) + (ic_inner * 2048)) + (tz * 1024)) + 768), 256, 1, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.evaluate( T.tvm_mma_sync( Conv_wmma_accumulator.data, 0, Apad_shared_wmma_matrix_a.data, 0, W_shared_wmma_matrix_b.data, 0, Conv_wmma_accumulator.data, 0, dtype="handle", ) ) T.evaluate( T.tvm_mma_sync( Conv_wmma_accumulator.data, 1, Apad_shared_wmma_matrix_a.data, 0, W_shared_wmma_matrix_b.data, 1, Conv_wmma_accumulator.data, 1, dtype="handle", ) ) T.evaluate( T.tvm_mma_sync( Conv_wmma_accumulator.data, 2, Apad_shared_wmma_matrix_a.data, 0, W_shared_wmma_matrix_b.data, 2, Conv_wmma_accumulator.data, 2, dtype="handle", ) ) T.evaluate( T.tvm_mma_sync( Conv_wmma_accumulator.data, 3, Apad_shared_wmma_matrix_a.data, 0, W_shared_wmma_matrix_b.data, 3, Conv_wmma_accumulator.data, 3, dtype="handle", ) ) T.evaluate( T.tvm_mma_sync( Conv_wmma_accumulator.data, 4, Apad_shared_wmma_matrix_a.data, 1, W_shared_wmma_matrix_b.data, 0, Conv_wmma_accumulator.data, 4, dtype="handle", ) ) T.evaluate( T.tvm_mma_sync( Conv_wmma_accumulator.data, 5, Apad_shared_wmma_matrix_a.data, 1, W_shared_wmma_matrix_b.data, 1, Conv_wmma_accumulator.data, 5, dtype="handle", ) ) T.evaluate( T.tvm_mma_sync( Conv_wmma_accumulator.data, 6, Apad_shared_wmma_matrix_a.data, 1, W_shared_wmma_matrix_b.data, 2, Conv_wmma_accumulator.data, 6, dtype="handle", ) ) T.evaluate( T.tvm_mma_sync( Conv_wmma_accumulator.data, 7, Apad_shared_wmma_matrix_a.data, 1, W_shared_wmma_matrix_b.data, 3, Conv_wmma_accumulator.data, 7, dtype="handle", ) ) T.evaluate( T.tvm_store_matrix_sync( Conv_wmma_accumulator.data, 16, 16, 16, 0, T.tvm_access_ptr( T.type_annotation(dtype="float32"), Conv_1.data, ( ((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048)) + (tz * 1024) ), 256, 2, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.evaluate( T.tvm_store_matrix_sync( Conv_wmma_accumulator.data, 16, 16, 16, 1, T.tvm_access_ptr( T.type_annotation(dtype="float32"), Conv_1.data, ( ( ((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048)) + (tz * 1024) ) + 256 ), 256, 2, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.evaluate( T.tvm_store_matrix_sync( Conv_wmma_accumulator.data, 16, 16, 16, 2, T.tvm_access_ptr( T.type_annotation(dtype="float32"), Conv_1.data, ( ( ((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048)) + (tz * 1024) ) + 512 ), 256, 2, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.evaluate( T.tvm_store_matrix_sync( Conv_wmma_accumulator.data, 16, 16, 16, 3, T.tvm_access_ptr( T.type_annotation(dtype="float32"), Conv_1.data, ( ( ((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048)) + (tz * 1024) ) + 768 ), 256, 2, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.evaluate( T.tvm_store_matrix_sync( Conv_wmma_accumulator.data, 16, 16, 16, 4, T.tvm_access_ptr( T.type_annotation(dtype="float32"), Conv_1.data, ( ( ((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048)) + (tz * 1024) ) + 1605632 ), 256, 2, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.evaluate( T.tvm_store_matrix_sync( Conv_wmma_accumulator.data, 16, 16, 16, 5, T.tvm_access_ptr( T.type_annotation(dtype="float32"), Conv_1.data, ( ( ((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048)) + (tz * 1024) ) + 1605888 ), 256, 2, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.evaluate( T.tvm_store_matrix_sync( Conv_wmma_accumulator.data, 16, 16, 16, 6, T.tvm_access_ptr( T.type_annotation(dtype="float32"), Conv_1.data, ( ( ((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048)) + (tz * 1024) ) + 1606144 ), 256, 2, dtype="handle", ), 16, "row_major", dtype="handle", ) ) T.evaluate( T.tvm_store_matrix_sync( Conv_wmma_accumulator.data, 16, 16, 16, 7, T.tvm_access_ptr( T.type_annotation(dtype="float32"), Conv_1.data, ( ( ((((bx * 12845056) + (ty * 3211264)) + (bz * 8192)) + (by * 2048)) + (tz * 1024) ) + 1606400 ), 256, 2, dtype="handle", ), 16, "row_major", dtype="handle", ) ) return func def opt_conv_tensorcore_mod_host(): @T.prim_func def opt_conv_tensorcore_mod_host( args: T.handle, arg_type_ids: T.Buffer((3,), "int32"), num_args: T.int32, out_ret_value: T.handle, out_ret_tcode: T.handle, resource_handle: T.handle, ) -> T.int32: # function attr dict T.func_attr( { "tir.noalias": True, "global_symbol": "default_function", "tir.is_entry_func": True, "calling_conv": 1, } ) # body stack_tcode_data: T.handle("int32") = T.tvm_stack_alloca("arg_tcode", 10, dtype="handle") stack_tcode = T.Buffer([9], "int32", data=stack_tcode_data) stack_value: T.handle = T.tvm_stack_alloca("arg_value", 10, dtype="handle") assert num_args == 3, "default_function: num_args should be 3" arg0: T.handle = T.tvm_struct_get(args, 0, 12, dtype="handle") arg0_code: T.int32 = arg_type_ids[0] arg1: T.handle = T.tvm_struct_get(args, 1, 12, dtype="handle") arg1_code: T.int32 = arg_type_ids[1] arg2: T.handle = T.tvm_struct_get(args, 2, 12, dtype="handle") arg2_code: T.int32 = arg_type_ids[2] A: T.handle = T.tvm_struct_get(arg0, 0, 1, dtype="handle") T.attr(A, "storage_alignment", 128) arg0_shape_data: T.handle("int64") = T.tvm_struct_get(arg0, 0, 2, dtype="handle") arg0_shape = T.Buffer([6], "int64", data=arg0_shape_data) arg0_strides_data: T.handle("int64") = T.tvm_struct_get(arg0, 0, 3, dtype="handle") arg0_strides = T.Buffer([6], "int64", data=arg0_strides_data) dev_id: T.int32 = T.tvm_struct_get(arg0, 0, 9, dtype="int32") W: T.handle = T.tvm_struct_get(arg1, 0, 1, dtype="handle") T.attr(W, "storage_alignment", 128) arg1_shape_data: T.handle("int64") = T.tvm_struct_get(arg1, 0, 2, dtype="handle") arg1_shape = T.Buffer([6], "int64", data=arg1_shape_data) arg1_strides_data: T.handle("int64") = T.tvm_struct_get(arg1, 0, 3, dtype="handle") arg1_strides = T.Buffer([6], "int64", data=arg1_strides_data) Conv: T.handle = T.tvm_struct_get(arg2, 0, 1, dtype="handle") T.attr(Conv, "storage_alignment", 128) arg2_shape_data: T.handle("int64") = T.tvm_struct_get(arg2, 0, 2, dtype="handle") arg2_shape = T.Buffer([6], "int64", data=arg2_shape_data) arg2_strides_data: T.handle("int64") = T.tvm_struct_get(arg2, 0, 3, dtype="handle") arg2_strides = T.Buffer([6], "int64", data=arg2_strides_data) assert (((arg0_code == 3) or (arg0_code == 13)) or (arg0_code == 7)) or ( arg0_code == 4 ), "default_function: Expect arg[0] to be pointer" assert (((arg1_code == 3) or (arg1_code == 13)) or (arg1_code == 7)) or ( arg1_code == 4 ), "default_function: Expect arg[1] to be pointer" assert (((arg2_code == 3) or (arg2_code == 13)) or (arg2_code == 7)) or ( arg2_code == 4 ), "default_function: Expect arg[2] to be pointer" assert 6 == T.tvm_struct_get(arg0, 0, 4, dtype="int32"), "arg0.ndim is expected to equal 6" assert 6 == T.tvm_struct_get(arg0, 0, 4, dtype="int32"), "arg0.ndim is expected to equal 6" assert ( (T.tvm_struct_get(arg0, 0, 5, dtype="uint8") == T.uint8(2)) and (T.tvm_struct_get(arg0, 0, 6, dtype="uint8") == T.uint8(16)) ) and ( T.tvm_struct_get(arg0, 0, 7, dtype="uint16") == T.uint16(1) ), "arg0.dtype is expected to be float16" assert 16 == T.cast( arg0_shape[0], "int32" ), "Argument arg0.shape[0] has an unsatisfied constraint" assert 14 == T.cast( arg0_shape[1], "int32" ), "Argument arg0.shape[1] has an unsatisfied constraint" assert 14 == T.cast( arg0_shape[2], "int32" ), "Argument arg0.shape[2] has an unsatisfied constraint" assert 16 == T.cast( arg0_shape[3], "int32" ), "Argument arg0.shape[3] has an unsatisfied constraint" assert 16 == T.cast( arg0_shape[4], "int32" ), "Argument arg0.shape[4] has an unsatisfied constraint" assert 16 == T.cast( arg0_shape[5], "int32" ), "Argument arg0.shape[5] has an unsatisfied constraint" if not (T.isnullptr(arg0_strides.data, dtype="bool")): assert ( ( ( ( (1 == T.cast(arg0_strides[5], "int32")) and (16 == T.cast(arg0_strides[4], "int32")) ) and (256 == T.cast(arg0_strides[3], "int32")) ) and (4096 == T.cast(arg0_strides[2], "int32")) ) and (57344 == T.cast(arg0_strides[1], "int32")) ) and ( 802816 == T.cast(arg0_strides[0], "int32") ), "arg0.strides: expected to be compact array" T.evaluate(0) assert T.uint64(0) == T.tvm_struct_get( arg0, 0, 8, dtype="uint64" ), "Argument arg0.byte_offset has an unsatisfied constraint" assert 2 == T.tvm_struct_get( arg0, 0, 10, dtype="int32" ), "Argument arg0.device_type has an unsatisfied constraint" assert 6 == T.tvm_struct_get(arg1, 0, 4, dtype="int32"), "arg1.ndim is expected to equal 6" assert 6 == T.tvm_struct_get(arg1, 0, 4, dtype="int32"), "arg1.ndim is expected to equal 6" assert ( (T.tvm_struct_get(arg1, 0, 5, dtype="uint8") == T.uint8(2)) and (T.tvm_struct_get(arg1, 0, 6, dtype="uint8") == T.uint8(16)) ) and ( T.tvm_struct_get(arg1, 0, 7, dtype="uint16") == T.uint16(1) ), "arg1.dtype is expected to be float16" assert 3 == T.cast( arg1_shape[0], "int32" ), "Argument arg1.shape[0] has an unsatisfied constraint" assert 3 == T.cast( arg1_shape[1], "int32" ), "Argument arg1.shape[1] has an unsatisfied constraint" assert 16 == T.cast( arg1_shape[2], "int32" ), "Argument arg1.shape[2] has an unsatisfied constraint" assert 32 == T.cast( arg1_shape[3], "int32" ), "Argument arg1.shape[3] has an unsatisfied constraint" assert 16 == T.cast( arg1_shape[4], "int32" ), "Argument arg1.shape[4] has an unsatisfied constraint" assert 16 == T.cast( arg1_shape[5], "int32" ), "Argument arg1.shape[5] has an unsatisfied constraint" if not (T.isnullptr(arg1_strides.data, dtype="bool")): assert ( ( ( ( (1 == T.cast(arg1_strides[5], "int32")) and (16 == T.cast(arg1_strides[4], "int32")) ) and (256 == T.cast(arg1_strides[3], "int32")) ) and (8192 == T.cast(arg1_strides[2], "int32")) ) and (131072 == T.cast(arg1_strides[1], "int32")) ) and ( 393216 == T.cast(arg1_strides[0], "int32") ), "arg1.strides: expected to be compact array" T.evaluate(0) assert T.uint64(0) == T.tvm_struct_get( arg1, 0, 8, dtype="uint64" ), "Argument arg1.byte_offset has an unsatisfied constraint" assert 2 == T.tvm_struct_get( arg1, 0, 10, dtype="int32" ), "Argument arg1.device_type has an unsatisfied constraint" assert dev_id == T.tvm_struct_get( arg1, 0, 9, dtype="int32" ), "Argument arg1.device_id has an unsatisfied constraint" assert 6 == T.tvm_struct_get(arg2, 0, 4, dtype="int32"), "arg2.ndim is expected to equal 6" assert 6 == T.tvm_struct_get(arg2, 0, 4, dtype="int32"), "arg2.ndim is expected to equal 6" assert ( (T.tvm_struct_get(arg2, 0, 5, dtype="uint8") == T.uint8(2)) and (T.tvm_struct_get(arg2, 0, 6, dtype="uint8") == T.uint8(32)) ) and ( T.tvm_struct_get(arg2, 0, 7, dtype="uint16") == T.uint16(1) ), "arg2.dtype is expected to be float32" assert 16 == T.cast( arg2_shape[0], "int32" ), "Argument arg2.shape[0] has an unsatisfied constraint" assert 14 == T.cast( arg2_shape[1], "int32" ), "Argument arg2.shape[1] has an unsatisfied constraint" assert 14 == T.cast( arg2_shape[2], "int32" ), "Argument arg2.shape[2] has an unsatisfied constraint" assert 32 == T.cast( arg2_shape[3], "int32" ), "Argument arg2.shape[3] has an unsatisfied constraint" assert 16 == T.cast( arg2_shape[4], "int32" ), "Argument arg2.shape[4] has an unsatisfied constraint" assert 16 == T.cast( arg2_shape[5], "int32" ), "Argument arg2.shape[5] has an unsatisfied constraint" if not (T.isnullptr(arg2_strides.data, dtype="bool")): assert ( ( ( ( (1 == T.cast(arg2_strides[5], "int32")) and (16 == T.cast(arg2_strides[4], "int32")) ) and (256 == T.cast(arg2_strides[3], "int32")) ) and (8192 == T.cast(arg2_strides[2], "int32")) ) and (114688 == T.cast(arg2_strides[1], "int32")) ) and ( 1605632 == T.cast(arg2_strides[0], "int32") ), "arg2.strides: expected to be compact array" T.evaluate(0) assert T.uint64(0) == T.tvm_struct_get( arg2, 0, 8, dtype="uint64" ), "Argument arg2.byte_offset has an unsatisfied constraint" assert 2 == T.tvm_struct_get( arg2, 0, 10, dtype="int32" ), "Argument arg2.device_type has an unsatisfied constraint" assert dev_id == T.tvm_struct_get( arg2, 0, 9, dtype="int32" ), "Argument arg2.device_id has an unsatisfied constraint" T.evaluate(T.tvm_struct_set(stack_value, 0, 12, T.cast(2, "int64"), dtype="int32")) stack_tcode[0] = 0 T.evaluate(T.tvm_struct_set(stack_value, 1, 12, T.cast(dev_id, "int64"), dtype="int32")) stack_tcode[1] = 0 T.evaluate( T.tvm_call_packed_lowered( "__tvm_set_device", stack_value, stack_tcode.data, 0, 2, dtype="int32" ) ) T.attr(0, "compute_scope", "default_function_compute_") T.evaluate(T.tvm_struct_set(stack_value, 0, 12, A, dtype="int32")) stack_tcode[0] = 3 T.evaluate(T.tvm_struct_set(stack_value, 1, 12, W, dtype="int32")) stack_tcode[1] = 3 T.evaluate(T.tvm_struct_set(stack_value, 2, 12, Conv, dtype="int32")) stack_tcode[2] = 3 T.evaluate(T.tvm_struct_set(stack_value, 3, 12, T.cast(196, "int64"), dtype="int32")) stack_tcode[3] = 0 T.evaluate(T.tvm_struct_set(stack_value, 4, 12, T.cast(2, "int64"), dtype="int32")) stack_tcode[4] = 0 T.evaluate(T.tvm_struct_set(stack_value, 5, 12, T.cast(4, "int64"), dtype="int32")) stack_tcode[5] = 0 T.evaluate(T.tvm_struct_set(stack_value, 6, 12, T.cast(4, "int64"), dtype="int32")) stack_tcode[6] = 0 T.evaluate(T.tvm_struct_set(stack_value, 7, 12, T.cast(2, "int64"), dtype="int32")) stack_tcode[7] = 0 T.evaluate(T.tvm_struct_set(stack_value, 8, 12, T.cast(32, "int64"), dtype="int32")) stack_tcode[8] = 0 T.evaluate( T.tvm_call_packed_lowered( "default_function_kernel0", stack_value, stack_tcode.data, 0, 9, dtype="int32" ) ) return opt_conv_tensorcore_mod_host def vthread_func(): @T.prim_func def vthread_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [256], "float32") C = T.match_buffer(c, [256], "float32") i0 = T.env_thread("blockIdx.x") i1 = T.env_thread("threadIdx.x") i2 = T.env_thread("vthread") T.launch_thread(i0, 4) T.launch_thread(i1, 2) T.launch_thread(i2, 2) B_data = T.allocate([16], "float32", "local") B = T.Buffer(shape=[16], dtype="float32", scope="local", data=B_data) for j in range(16): B[j] = A[i0 * 64 + i1 * 32 + i2 * 16 + j] + T.float32(1) for j in range(16): C[i0 * 64 + i1 * 32 + i2 * 16 + j] = B[j] * T.float32(2) return vthread_func def matmul(): @T.prim_func def matmul(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i, j, k in T.grid(128, 128, 128): with T.block("update"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = T.float32(0) C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] return matmul def matmul_original(): @T.prim_func def matmul_original(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i, j in T.grid(128, 128): with T.block("init"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = T.float32(0) for k in range(128): with T.block("update"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] return matmul_original def element_wise(): @T.prim_func def element_wise(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128), "float32") C = T.match_buffer(c, (128, 128), "float32") B = T.alloc_buffer((128, 128), "float32") for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * T.float32(2) for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + T.float32(1) return element_wise def predicate(): @T.prim_func def predicate(b: T.handle, c: T.handle) -> None: B = T.match_buffer(b, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i, jo, ji in T.grid(16, 4, 5): with T.block("update"): vi = T.axis.S(16, i) vj = T.axis.S(16, jo * 4 + ji) T.where(jo * 4 + ji < 16) C[vi, vj] = B[vi, vj] + T.float32(1) return predicate def test_module_define(): func1 = tvm.ir.IRModule({"matmul": matmul()})["matmul"] func2 = tvm.ir.IRModule({"element_wise": element_wise()})["element_wise"] func3 = tvm.ir.IRModule({"predicate": predicate()})["predicate"] mod1 = tvm.ir.IRModule({"func1": func1, "func2": func2, "func3": func3}) mod2 = tvm.ir.IRModule({"func1": matmul(), "func2": element_wise(), "func3": predicate()}) tvm.ir.assert_structural_equal(mod1, mod2) def test_matmul_original(): func = matmul_original() rt_func = tvm.script.from_source(func.script()) tvm.ir.assert_structural_equal(func, rt_func) assert isinstance(rt_func.body.block, tir.stmt.Block) assert isinstance(rt_func.body.block.body, tir.stmt.For) assert isinstance(rt_func.body.block.body.body, tir.stmt.For) assert isinstance(rt_func.body.block.body.body.body, tir.stmt.SeqStmt) assert isinstance(rt_func.body.block.body.body.body[0].block, tir.stmt.Block) assert isinstance(rt_func.body.block.body.body.body[1], tir.stmt.For) assert isinstance(rt_func.body.block.body.body.body[1].body.block, tir.stmt.Block) def test_element_wise(): func = element_wise() rt_func = tvm.script.from_source(func.script()) tvm.ir.assert_structural_equal(func, rt_func) assert isinstance(rt_func.body.block, tir.stmt.Block) assert isinstance(rt_func.body.block.body, tir.stmt.SeqStmt) assert isinstance(rt_func.body.block.body[0], tir.stmt.For) assert isinstance(rt_func.body.block.body[0].body, tir.stmt.For) assert isinstance(rt_func.body.block.body[0].body.body.block, tir.stmt.Block) assert isinstance(rt_func.body.block.body[1], tir.stmt.For) assert isinstance(rt_func.body.block.body[1].body, tir.stmt.For) assert isinstance(rt_func.body.block.body[1].body.body.block, tir.stmt.Block) def test_predicate(): func = predicate() rt_func = tvm.script.from_source(func.script()) tvm.ir.assert_structural_equal(func, rt_func) assert isinstance(rt_func.body.block, tir.stmt.Block) assert isinstance(rt_func.body.block.body, tir.stmt.For) assert isinstance(rt_func.body.block.body.body, tir.stmt.For) assert isinstance(rt_func.body.block.body.body.body, tir.stmt.For) assert isinstance(rt_func.body.block.body.body.body.body.block, tir.stmt.Block) def for_thread_binding(): @T.prim_func def for_thread_binding(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") B = T.match_buffer(b, (16, 16), "float32") for i in T.thread_binding(0, 16, thread="threadIdx.x"): for j in T.thread_binding( 0, 16, thread="threadIdx.y", annotations={"attr_key": "attr_value"} ): A[i, j] = B[i, j] + T.float32(1) return for_thread_binding def test_for_thread_binding(): func = for_thread_binding() rt_func = tvm.script.from_source(func.script()) tvm.ir.assert_structural_equal(func, rt_func) assert isinstance(rt_func.body, tir.stmt.For) assert rt_func.body.kind == 4 assert rt_func.body.thread_binding.thread_tag == "threadIdx.x" assert isinstance(rt_func.body.body, tir.stmt.For) assert rt_func.body.body.kind == 4 assert rt_func.body.body.thread_binding.thread_tag == "threadIdx.y" assert rt_func.body.body.annotations["attr_key"] == "attr_value" def match_buffer_region(): @T.prim_func def match_buffer_region(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (16, 16, 16), "float32") B = T.match_buffer(b, (1), "float32") for i, j in T.grid(16, 4): with T.block(): vi, vj = T.axis.remap("SS", [i, j]) C = T.match_buffer(A[0:16, vi, vj * 4 : vj * 4 + 4], (16, 1, 4)) for ii in range(4): with T.block(): vii = T.axis.S(4, ii) D = T.match_buffer(C[vii * 4 : vii * 4 + 4, 0, 0:4], (4, 1, 4)) for i, j in T.grid(4, 4): B[0] += D[i, 0, j] return match_buffer_region def test_match_buffer_region(): func = match_buffer_region() rt_func = tvm.script.from_source(func.script()) tvm.ir.assert_structural_equal(func, rt_func) assert isinstance(rt_func.body, tir.stmt.BlockRealize) root = rt_func.body.block assert isinstance(root.body, tir.stmt.For) assert isinstance(root.body.body, tir.stmt.For) assert isinstance(root.body.body.body, tir.stmt.BlockRealize) outer_block = root.body.body.body.block assert len(outer_block.match_buffers) == 1 buffer_C = outer_block.match_buffers[0].buffer tvm.ir.assert_structural_equal(buffer_C.shape, [16, 1, 4]) assert isinstance(outer_block.body, tir.stmt.For) assert isinstance(outer_block.body.body, tir.stmt.BlockRealize) inner_block = outer_block.body.body.block assert len(inner_block.match_buffers) == 1 buffer_D = inner_block.match_buffers[0].buffer tvm.ir.assert_structural_equal(buffer_D.shape, [4, 1, 4]) def block_elements(): @T.prim_func def block_elements(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") B = T.match_buffer(b, (1, 1), "float32") with T.block("update"): vi = T.axis.S(1, 0) T.where(True) T.reads(A[0:16, 0:16]) T.writes(B[0, 0]) T.block_attr({"attr_key": "attr_value"}) C = T.alloc_buffer((4, 4), dtype="float32") D = T.match_buffer(A[0:4, 0], (4, 1)) with T.init(): B[0, 0] = T.float32(0) B[0, 0] = A[0, 0] + B[0, 0] + C[1, 1] + D[2, 0] return block_elements def test_block_elements(): func = block_elements() rt_func = tvm.script.from_source(func.script()) tvm.ir.assert_structural_equal(func, rt_func) assert isinstance(rt_func.body.block, tir.stmt.Block) assert isinstance(rt_func.body.block.body, tir.stmt.BlockRealize) assert isinstance(rt_func.body.block.body.block, tir.stmt.Block) block = rt_func.body.block.body.block assert isinstance(block.body, tir.stmt.BufferStore) assert isinstance(block.init, tir.stmt.BufferStore) assert len(block.annotations) == 1 assert block.annotations["attr_key"] == "attr_value" def opaque_block(): @T.prim_func def opaque_block(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") B = T.match_buffer(b, (16, 16), "float32") for i in range(16): for j in range(16): with T.block(): T.reads([]) T.writes(A[i, j]) A[i, j] = T.float32(0) with T.block(): T.reads([A[i, 0:16]]) T.writes([B[i, 0:16]]) for j in range(16): B[i, j] = A[i, j] return opaque_block def test_opaque_block(): func = opaque_block() rt_func = tvm.script.from_source(func.script()) tvm.ir.assert_structural_equal(func, rt_func) root_block = rt_func.body.block assert isinstance(root_block, tir.stmt.Block) assert isinstance(root_block.body, tir.stmt.For) assert isinstance(root_block.body.body[0], tir.stmt.For) assert isinstance(root_block.body.body[0].body, tir.stmt.BlockRealize) assert isinstance(root_block.body.body[0].body.block, tir.stmt.Block) assert len(root_block.body.body[0].body.block.iter_vars) == 0 assert isinstance(root_block.body.body[1], tir.stmt.BlockRealize) assert isinstance(root_block.body.body[1].block, tir.stmt.Block) assert len(root_block.body.body[1].block.iter_vars) == 0 def module_const(): @tvm.script.ir_module class Module4: # There is an ongoing (python)dict->(c++)Map->(python)dict issue which potentially # changes order of the items in dict after roundtrip due to map not support order # of insertion while dict does. Hence func 'def A(a: T.handle, c: T.handle) -> None' # is commented # # test: # d = {"B": 1, "A": 2} # m = tvm.runtime.convert(d) # assert d.keys() == m.keys(), f"Order changed from {list(d.keys())} to {list(m.keys())}" """ @T.prim_func def A(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (10), "int32") C = T.match_buffer(c, (10), "int32") B = T.alloc_buffer((10), "int32") K1 = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10]) for x in T.serial(0, 10): B[x] = A[x] + T.load("int32", K1, x) for x in T.serial(0, 10): C[x] = B[x] """ @T.prim_func def B(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (10), "int32") C = T.match_buffer(c, (10), "int32") B = T.alloc_buffer((10), "int32") K1_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10]) K1 = T.Buffer(shape=[10], dtype="int32", data=K1_data) for x in T.serial(0, 10): B[x] = A[x] + K1[x] K2_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10]) K2 = T.Buffer(shape=[10], dtype="int32", data=K2_data) for x in T.serial(0, 10): B[x] = B[x] + K2[x] for x in T.serial(0, 10): C[x] = B[x] return Module4 def constant(): @T.prim_func def constant(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (10), "int32") C = T.match_buffer(c, (10), "int32") B = T.alloc_buffer((10), "int32") K_data = T.allocate_const([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "int32", [10]) K = T.Buffer(shape=[10], dtype="int32", data=K_data) for x in T.serial(0, 10): B[x] = A[x] + K[x] for x in T.serial(0, 10): C[x] = B[x] return constant def rank0(): @T.prim_func def rank0(a: T.handle) -> None: A = T.match_buffer(a, (), "float32") B = T.alloc_buffer((), "float32") A[()] = 2 B[()] = A[()] return rank0 def rank0_block(): @T.prim_func def rank0_block(a: T.handle) -> None: A = T.match_buffer(a, (), "float32") B = T.alloc_buffer((), "float32") B[()] = A[()] with T.block("update"): T.reads([A[()]]) T.writes([B[()]]) for i in range(1): B[()] = A[()] return rank0_block def select(): @T.prim_func def select(a: T.handle) -> None: A = T.match_buffer(a, (), "float32") A[()] = T.Select(True, 1, 2) return select def minmax(): @T.prim_func def minmax(a: T.handle) -> None: A = T.match_buffer(a, (), "float32") A[()] = T.min(1, 2) A[()] = T.max(1, 2) return minmax def abs(): @T.prim_func def abs(a: T.handle) -> None: A = T.match_buffer(a, (128, 128), "float32") for i, j in T.grid(128, 128): with T.block("A"): vi, vj = T.axis.remap("SS", [i, j]) A[vi, vj] = T.abs(A[vi, vj]) return abs def constant_folding(): @T.prim_func def constant_folding(a: T.handle) -> None: A = T.match_buffer(a, (), "float32") A[()] = T.min(2.2, 5.2) A[()] = T.max(T.float32(2.2), T.float32(T.float32(5.2))) A[()] = T.min(2.2, 5.0) return constant_folding def simplify_bracket(): @T.prim_func def simplify_bracket() -> None: a = T.int32() b = T.int32() c = T.int32() d = T.int32() T.evaluate(a + b * (c + d)) return simplify_bracket def var_with_same_name(): @T.prim_func def var_with_same_name(a: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") for i, j in T.grid(16, 16): with T.block(): vi, vj = T.axis.remap("SS", [i, j]) A[vi, vj] = 0 for i, j in T.grid(16, 16): with T.block(): vi, vj = T.axis.remap("SS", [i, j]) A[vi, vj] = 0 return var_with_same_name def test_same_name_var(): func = var_with_same_name() out_str = func.script() rt_func = tvm.script.from_source(out_str) tvm.ir.assert_structural_equal(func, rt_func) assert out_str.count("for i, j in T.grid(16, 16)") == 2 assert out_str.find("i_") == -1 assert out_str.find("i_") == -1 def while_loop(): @T.prim_func def while_loop(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (16,), "float32") B = T.match_buffer(b, (16,), "float32") i = T.alloc_buffer((), "int32", scope="local") for ii in range(16): with T.block(): vi = T.axis.S(16, ii) B[vi] = 0 while i[()] < 10: for j in range(16): B[j] += A[j] return while_loop # fmt: off def primfunc_with_allocate_annotations(): @T.prim_func def primfunc_with_allocate_annotations(placeholder_28: T.handle, T_cast_6: T.handle) -> None: # function attr dict T.func_attr({"global_symbol": "tvmgen_default_fused_nn_max_pool2d_cast", "tir.noalias": True}) placeholder_29 = T.match_buffer(placeholder_28, [802816], dtype="uint8", elem_offset=0, align=64, offset_factor=1) T_cast_7 = T.match_buffer(T_cast_6, [200704], dtype="int16", elem_offset=0, align=64, offset_factor=1) # body tensor_2_data = T.allocate([200704], "uint8", "global", annotations={"attr1_key": "attr1_value"}) tensor_2 = T.Buffer(shape=[200704], dtype="uint8", scope="global", data=tensor_2_data) for ax0_ax1_fused_4 in T.serial(0, 56): for ax2_4 in T.serial(0, 56): for ax3_init in T.serial(0, 64): tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_init)] = T.uint8(0) for rv0_rv1_fused_1, ax3_2 in T.grid(9, 64): tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)] = T.max(tensor_2[(((ax0_ax1_fused_4*3584) + (ax2_4*64)) + ax3_2)], T.if_then_else(((((ax0_ax1_fused_4*2) + T.floordiv(rv0_rv1_fused_1, 3)) < 112) and (((ax2_4*2) + T.floormod(rv0_rv1_fused_1, 3)) < 112)), placeholder_29[(((((ax0_ax1_fused_4*14336) + (T.floordiv(rv0_rv1_fused_1, 3)*7168)) + (ax2_4*128)) + (T.floormod(rv0_rv1_fused_1, 3)*64)) + ax3_2)], T.uint8(0), dtype="uint8")) for ax0_ax1_fused_5 in T.serial(0, 56): for ax2_5, ax3_3 in T.grid(56, 64): T_cast_7[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)] = T.cast(tensor_2[(((ax0_ax1_fused_5*3584) + (ax2_5*64)) + ax3_3)], "int16") return primfunc_with_allocate_annotations # fmt: on # fmt: off def comm_reducer_single_reduce_group(): @T.prim_func def comm_reducer_single_reduce_group(a: T.handle, b: T.handle) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": True}) threadIdx_x = T.env_thread("threadIdx.x") A = T.match_buffer(a, [16384], dtype="float32") for i in T.serial(0, 128): T.launch_thread(threadIdx_x, 128) reduce_temp0_data = T.allocate([1], "float32", "local") reduce_temp0 = T.Buffer(shape=[1], dtype="float32", scope="local", data=reduce_temp0_data) with T.attr(T.comm_reducer(lambda x, y: x + y, [T.float32(0)]), "reduce_scope", T.reinterpret(T.uint64(0), dtype="handle")): T.evaluate(T.tvm_thread_allreduce(T.uint32(1), A[i * 128 + threadIdx_x], True, reduce_temp0.data, threadIdx_x, dtype="handle")) return comm_reducer_single_reduce_group def comm_reducer_multiple_reduce_groups(): @T.prim_func def comm_reducer_multiple_reduce_groups(a: T.handle, b: T.handle) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": True}) threadIdx_x = T.env_thread("threadIdx.x") A = T.match_buffer(a, [16384], dtype="float32") for i in T.serial(0, 128): T.launch_thread(threadIdx_x, 128) reduce_temp0_data = T.allocate([1], "float32", "local") reduce_temp0 = T.Buffer(shape=[1], dtype="float32", scope="local", data=reduce_temp0_data) with T.attr(T.comm_reducer(lambda x0, x1, y0, y1: (T.Select((x1 >= y1), x0, y0), T.Select((x1 >= y1), x1, y1)), [T.int32(-1), T.min_value("float32")]), "reduce_scope", T.reinterpret(T.uint64(0), dtype="handle")): T.evaluate(T.tvm_thread_allreduce(T.uint32(1), A[i * 128 + threadIdx_x], True, reduce_temp0.data, threadIdx_x, dtype="handle")) return comm_reducer_multiple_reduce_groups def multiple_commreducer(): @T.prim_func def multiple_commreducer() -> None: normal_reduce_temp0 = T.Buffer([1], dtype="float32", strides=[1], scope="local") normal_reduce_temp1 = T.Buffer([1], dtype="float32", strides=[1], scope="local") reduce_temp0 = T.Buffer([1], dtype="float32", strides=[1], scope="local") reduce_temp1 = T.Buffer([1], dtype="float32", strides=[1], scope="local") for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"): with T.block("T_softmax_maxelem_cross_thread_reduction"): T.attr(T.comm_reducer(lambda x, y: T.max(x, y), [T.min_value("float32")]), "reduce_scope", T.reinterpret(T.uint64(0), dtype="handle")) T.evaluate(T.tvm_thread_allreduce(T.uint32(1), normal_reduce_temp0[0], True, reduce_temp0.data, ax0_1, dtype="handle")) for ax0_1 in T.thread_binding(0, 32, thread="threadIdx.x"): with T.block("T_softmax_expsum_cross_thread_reduction"): T.attr(T.comm_reducer(lambda x, y: x + y, [T.float32(0)]), "reduce_scope", T.reinterpret(T.uint64(0), dtype="handle")) T.evaluate(T.tvm_thread_allreduce(T.uint32(1), normal_reduce_temp1[0], True, reduce_temp1.data, ax0_1, dtype="handle")) return multiple_commreducer # fmt: on def func_div_mod(): @T.prim_func def func_div_mod(): a = T.int32() b = T.int32() T.evaluate(a // b) T.evaluate(a % b) T.evaluate(T.truncmod(a, b)) return func_div_mod def test_div_mod(): func = func_div_mod() rt_func = tvm.script.from_source(func.script()) tvm.ir.assert_structural_equal(func, rt_func, True) assert isinstance(func.body[0].value, tvm.tir.FloorDiv) assert isinstance(func.body[1].value, tvm.tir.FloorMod) assert isinstance(func.body[2].value, tvm.tir.Mod) def loop_extent_dependent(): @T.prim_func def loop_extent_dependent(a: T.handle) -> None: A = T.match_buffer(a, [], dtype="int32") for i in T.serial(0, 128): for j in T.serial(0, i): A[()] = A[()] + j return loop_extent_dependent def nontrivial_range_axis(): @T.prim_func def nontrivial_range_axis(a: T.handle) -> None: A = T.match_buffer(a, (10), "float32") for i in range(10): with T.block("block"): vi = T.axis.spatial((1, 11), i + 1) A[vi - 1] = A[vi - 1] + 1.0 return nontrivial_range_axis def func_with_target_spec_by_config(): @T.prim_func def func_with_target_spec_by_config() -> None: T.func_attr( { "kTarget": T.target( { "max_num_threads": 1024, "arch": "sm_70", "thread_warp_size": 32, "kind": "cuda", "tag": "", "keys": ["cuda", "gpu"], "host": T.target({"kind": "llvm", "tag": "", "keys": ["cpu"]}), } ) } ) T.evaluate(0) return func_with_target_spec_by_config def func_with_target_spec_by_str(): @T.prim_func def func_with_target_spec_by_str() -> None: T.func_attr({"kTarget": T.target("nvidia/nvidia-a100")}) T.evaluate(0) return func_with_target_spec_by_str def func_with_target_and_host_spec_by_str(): @T.prim_func def func(): T.func_attr({"target": T.target("nvidia/nvidia-a100", host="llvm")}) T.evaluate(0) return func def func_root_attr(): @T.prim_func def func_root_attr(): with T.block("root"): T.block_attr({"a": "0"}) T.evaluate(0) return func_root_attr def func_trivial_root_block(): @T.prim_func def func(A: T.Buffer(1, "int32")): with T.block("root"): A[0] = 0 return func def func_nested_root_block(): @T.prim_func def func(A: T.Buffer(1, "int32")): with T.block("root"): with T.block("block"): A[0] = 0 return func def func_T_ptr_let_statement(): @T.prim_func def func_T_ptr_let_statement( args: T.handle, arg_type_ids_handle: T.handle("int32"), num_args: T.int32 ) -> None: # The T.Ptr declaration in the parameter list should parse # correctly, and should be usable as the data pointer in a buffer. arg_type_ids = T.Buffer([2], dtype="int32", data=arg_type_ids_handle) arg0: T.handle = T.tvm_struct_get(args, 0, 12, dtype="handle") arg1: T.handle = T.tvm_struct_get(args, 1, 12, dtype="handle") # Functions that return a "handle" can be assigned to a T.Ptr # variable. A variable annotated with T.Ptr still has dtype of # T.handle, but has type annotation as a pointer type. A_data: T.handle("float32") = T.tvm_struct_get(arg0, 0, 1, dtype="handle") # The buffer declaration has a data pointer defined earlier in # this function. It should only be defined after the data pointer # has been defined, and should not be hoisted into the header of # the function as other buffer_decl statements can be. A = T.Buffer([1024], dtype="float32", data=A_data) B_data: T.handle("float32") = T.tvm_struct_get(arg1, 0, 1, dtype="handle") B = T.Buffer([1024], dtype="float32", data=B_data) B[0] = A[0] return func_T_ptr_let_statement def func_T_ptr_allocate(): @T.prim_func def func_T_ptr_allocate() -> None: A_data = T.allocate([1024], "float32", "global") A = T.Buffer(shape=[1024], dtype="float32", scope="global", data=A_data) A[0] = 0.0 return func_T_ptr_allocate def llvm_intrin_call(): @T.prim_func def ctpop(A: T.Buffer((16,), "uint8"), B: T.Buffer((16,), "uint8")) -> None: for i in range(0, 16): with T.block("A"): vi = T.axis.remap( "S", [ i, ], ) B[vi] = T.call_llvm_pure_intrin( T.llvm_lookup_intrinsic_id("llvm.ctpop.i8"), T.uint32(1), A[vi], dtype="uint8", ) return ctpop def parse_bufferslice_as_range_bound(): @T.prim_func def segment_sum( A_ptr: T.handle, B_ptr: T.handle, indptr_ptr: T.handle, n: T.int32, m: T.int32 ) -> None: A = T.match_buffer(A_ptr, [m], dtype="float32") B = T.match_buffer(B_ptr, [n], dtype="float32") indptr = T.match_buffer(indptr_ptr, [n + 1], dtype="int32") for i in T.serial(n): with T.block("outer"): vi = T.axis.spatial(n, i) T.reads(indptr[i : i + 2], B[vi], A[indptr[i] : indptr[i + 1]]) T.writes(B[vi]) for j in T.serial(indptr[i], indptr[i + 1]): with T.block("inner"): vj = T.axis.reduce(m, j) T.reads(B[vi], A[vj]) T.writes(B[vi]) with T.init(): B[vi] = T.float32(0) B[vi] = B[vi] + A[vj] return segment_sum def int64_support(): @T.prim_func def elementwise_shape_int64(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (T.int64(128), T.int64(128)), dtype="float32") B = T.alloc_buffer((T.int64(128), T.int64(128)), dtype="float32") C = T.match_buffer(c, (T.int64(128), T.int64(128)), dtype="float32") for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(T.int64(128), T.int64(128)): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 return elementwise_shape_int64 def string_annotation_escaping(): @T.prim_func def string_annotation_of_special_chars(): T.func_attr( { "key1": '"\'hello\t\r"', "key2": """ %1 = add i32 %0, %0 %2 = add i32 %0, %1 %3 = add i32 %1, %2 """, } ) T.evaluate(0) return string_annotation_of_special_chars def pointer_type(): @T.prim_func def func_with_ptr_type_annotations(x: T.handle("int32"), y: T.handle("int32", "shared")): xx_data = T.allocate([16], "int32", "global") xx = T.Buffer(shape=[16], dtype="int32", scope="global", data=xx_data) yy_data = T.allocate([16], "int32", "shared") yy = T.Buffer(shape=[16], dtype="int32", scope="shared", data=yy_data) a: T.handle("int32") = T.address_of(xx[0], dtype="handle") b: T.handle("int32", "shared") = T.address_of(yy[0], dtype="handle") T.evaluate(T.call_extern("copy", a, b, dtype="")) return func_with_ptr_type_annotations def buffer_axis_separator(): @T.prim_func def element_wise(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128), "float32", axis_separators=[1]) C = T.match_buffer(c, (128, 128), "float32") B = T.alloc_buffer((128, 128), "float32", axis_separators=[1]) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * T.float32(2) for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + T.float32(1) return element_wise def buffer_ramp_access_as_slice_index(): @T.prim_func def buffer_ramp_access(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128,), "float32") B = T.match_buffer(b, (128,), "float32") C = T.match_buffer(c, (128,), "float32") for i in range(128): A[i : i + 1 : 1] = i for i in range(4): B[i * 32 : i * 32 + 32] = A[i * 32 : i * 32 + 32 : 1] + T.broadcast(1.0, 32) for i in range(4): C[i : i + 128 : 4] = B[i : i + 128 : 4] + T.broadcast(1.0, 32) return buffer_ramp_access def let_expression(): @T.prim_func def func(): x = T.int32() T.evaluate(T.Let(x + 1, where={x: 1})) return func def test_void_ptr_vs_handle(): """Distinguish between void* and handle In the future, perhaps these should be de-duplicated by forbidding one of the two C++ representations. """ # Generates PointerType(PrimType(DataType::Void())) @T.prim_func def void_ptr(out_ret_value: T.handle("void")): T.evaluate(out_ret_value) # Generates PrimType(DataType::Handle()) @T.prim_func def handle(out_ret_value: T.handle): T.evaluate(out_ret_value) assert not tvm.ir.structural_equal(void_ptr, handle) def void_ptr(): @T.prim_func def func(out_ret_value: T.handle("void")): T.evaluate(out_ret_value) return func def decl_buffer(): @T.prim_func def func(A: T.Buffer((16, 16), "float32"), B: T.Buffer((16, 16), "float32")) -> None: A_flattened = T.decl_buffer(data=A.data, shape=(256,), dtype="float32") B_flattened = T.decl_buffer(data=B.data, shape=(256,), dtype="float32") C_alias = T.decl_buffer(data=A_flattened.data, shape=(256,), dtype="float32") for i in range(256): B_flattened[i] = A_flattened[i] + C_alias[i] + T.float32(1.0) return func def allocate_and_decl_buffer(): @T.prim_func def func(A: T.Buffer((16,), "float32"), B: T.Buffer((16,), "float32")) -> None: D_data = T.allocate((16,), "float32", "global") D = T.decl_buffer((16,), "float32", data=D_data) for i in range(4): with T.allocate((4,), "float32", "global") as C_data: C = T.decl_buffer((4,), "float32", data=C_data) for j in range(4): C[j] = A[i * 4 + j] + T.float32(1.0) for j in range(4): D[j] = C[j] for j in range(4): B[i * 4 + j] = D[j] return func def float_infinity(): @T.prim_func def func( placeholder: T.Buffer((1, 512, 768), "float32"), T_isinf: T.Buffer((1, 512, 768), "bool") ) -> None: # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) # body # with T.block("root") for i0, i1, i2 in T.grid(1, 512, 768): with T.block("T_isinf"): ax0, ax1, ax2 = T.axis.remap("SSS", [i0, i1, i2]) T.reads(placeholder[ax0, ax1, ax2]) T.writes(T_isinf[ax0, ax1, ax2]) T_isinf[ax0, ax1, ax2] = T.fabs( placeholder[ax0, ax1, ax2], dtype="float32" ) == T.float32("inf") and not (T.isnan(placeholder[ax0, ax1, ax2], dtype="bool")) return func def minimal_i32_literal(): @T.prim_func def func() -> None: T.evaluate(T.int32(-2147483648)) T.evaluate(-T.int64(2147483648)) return func def boolean_argument(): @T.prim_func def func(a: T.boolean) -> None: T.evaluate(a) return func def bool_argument(): @T.prim_func def func(a: T.bool) -> None: T.evaluate(a) return func def bool_variable_annotation(): @T.prim_func def func() -> None: a: T.bool = T.call_extern("dummy", dtype="bool") T.evaluate(0) return func def return_none(): @T.prim_func def func(): T.evaluate(0) return func def bool_primitive(): @T.prim_func def func() -> None: T.evaluate(T.bool(True)) return func def bool_cast(): @T.prim_func def func() -> None: a = T.bool() T.evaluate(T.bool(T.int32(0))) T.evaluate(a == T.bool(False)) return func def implicit_evaluate(): @T.prim_func def func(A: T.Buffer(1, "int32")): T.evaluate(T.assume(A[0] == 5)) A[0] = 10 return func def if_true_else(): @T.prim_func def func() -> None: if True: T.evaluate(0) else: T.evaluate(1) return func def elif_chain_without_else(): @T.prim_func def func(i: T.int32) -> None: if i == 0: T.evaluate(0) elif i == 1: T.evaluate(1) elif i == 2: T.evaluate(2) return func def elif_chain_with_else(): @T.prim_func def func(i: T.int32) -> None: if i == 0: T.evaluate(0) elif i == 1: T.evaluate(1) elif i == 2: T.evaluate(2) else: T.evaluate(3) return func def nested_boolean_expressions(): expressions = { "and_lhs_and": lambda i, j, k: tir.all(tir.all(i, j), k), "and_rhs_and": lambda i, j, k: tir.all(i, tir.all(j, k)), "and_lhs_or": lambda i, j, k: tir.all(tir.any(i, j), k), "and_rhs_or": lambda i, j, k: tir.all(i, tir.any(j, k)), "or_lhs_and": lambda i, j, k: tir.any(tir.all(i, j), k), "or_rhs_and": lambda i, j, k: tir.any(i, tir.all(j, k)), "or_lhs_or": lambda i, j, k: tir.any(tir.any(i, j), k), "or_rhs_or": lambda i, j, k: tir.any(i, tir.any(j, k)), "and_of_ors": lambda i, j, k: tir.all(tir.any(i, j), tir.any(j, k), tir.any(i, k), i, j, k), "or_of_ands": lambda i, j, k: tir.any(tir.all(i, j), tir.all(j, k), tir.all(i, k), i, j, k), } def make_ir_generator(name, expression): def inner(): @T.prim_func def func(A: T.Buffer(1, "bool"), i: T.bool, j: T.bool, k: T.bool): A[0] = expression(i, j, k) return func inner.__name__ = f"nested_boolean_expr_{name}" return inner for name, expression in expressions.items(): generator = make_ir_generator(name, expression) yield generator def multi_env_threads(): @T.prim_func def func(A: T.Buffer(128, "float32"), C: T.Buffer(128, "float32")): B = T.alloc_buffer([128], dtype="float32") for i in T.thread_binding(128, thread="threadIdx.x"): B[i] = A[i] + 1.0 for i in T.thread_binding(128, thread="threadIdx.x"): C[i] = B[i] + 2.0 mod = tvm.tir.transform.LowerOpaqueBlock()(tvm.IRModule.from_expr(func)) return mod["main"] def intrinsic_pow(): @T.prim_func def func(): T.pow(T.float32(1), T.float32(1)) return func def let_stmt_var(): @T.prim_func def func(): with T.LetStmt(0) as x: with T.LetStmt(0) as y: T.evaluate(0) T.evaluate(0) return func def let_stmt_value(): @T.prim_func def func(): y = T.int32() with T.LetStmt(y) as x: with T.LetStmt(0, var=y): T.evaluate(0) T.evaluate(0) return func def string_stride(): @T.prim_func def main(a: T.handle, b: T.handle): T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True}) n = T.int32() A = T.match_buffer(a, (n,), strides=("A_s0",), buffer_type="auto") B = T.match_buffer(b, (n,), strides=("B_s0",), buffer_type="auto") blockIdx_x = T.launch_thread("blockIdx.x", (n + 63) // 64) threadIdx_x = T.launch_thread("threadIdx.x", 64) if T.likely(blockIdx_x * 64 + threadIdx_x < n): B2 = T.Buffer((B.strides[0] * n,), data=B.data) A2 = T.Buffer((A.strides[0] * n,), data=A.data) B2[(blockIdx_x * 64 + threadIdx_x) * B.strides[0]] = A2[ (blockIdx_x * 64 + threadIdx_x) * A.strides[0] ] * T.float32(2) return main def merge_shape_var_def(): @T.prim_func def main(A: T.handle, B: T.handle): T.func_attr({"from_legacy_te_schedule": True, "global_symbol": "main", "tir.noalias": True}) m, n = T.int32(), T.int32() A_1 = T.match_buffer(A, (m, n), strides=("A_1_s0", "A_1_s1"), buffer_type="auto") B_1 = T.match_buffer(B, (m, n), strides=("B_1_s0", "B_1_s1"), buffer_type="auto") for i_outer, j_outer, i_inner in T.grid((m + 9) // 10, (n + 4) // 5, 10): if T.likely(i_outer * 10 + i_inner < m): for j_inner in range(5): if T.likely(j_outer * 5 + j_inner < n): cse_var_2: T.int32 = j_outer * 5 + j_inner cse_var_1: T.int32 = i_outer * 10 + i_inner B_2 = T.Buffer( (B_1.strides[0] * m,), data=B_1.data, strides=("B_2_s0",), buffer_type="auto", ) A_2 = T.Buffer( (A_1.strides[0] * m,), data=A_1.data, strides=("A_2_s0",), buffer_type="auto", ) B_2[cse_var_1 * B_1.strides[0] + cse_var_2 * B_1.strides[1]] = A_2[ cse_var_1 * A_1.strides[0] + cse_var_2 * A_1.strides[1] ] return main def if_then_else_var(): @T.prim_func def main(n: T.int32): if n == 0: x = 5 T.evaluate(x) else: x = 10 T.evaluate(x) return main def tvm_shfl_builtins(): @T.prim_func def func( A: T.handle("float32"), B: T.handle("float32"), C: T.handle("float32"), ): blockIdx_x = T.launch_thread("blockIdx.x", 1) threadIdx_x = T.launch_thread("threadIdx.x", 32) A_warp = T.allocate([1], "float32", "local") B_warp = T.allocate([1], "float32", "local") red_buf0 = T.allocate([1], "float32", "local") A_warp_1 = T.Buffer((32,), data=A_warp, scope="local") A_1 = T.Buffer((32,), data=A) A_warp_1[0] = A_1[threadIdx_x] B_warp_1 = T.Buffer((32,), data=B_warp, scope="local") T.tvm_storage_sync("warp") B_warp_1[0] = T.tvm_warp_shuffle( T.tvm_warp_activemask(), A_warp_1[0], threadIdx_x % 4 * 8 + threadIdx_x // 4, 32, 32 ) + T.float32(1) red_buf0_1 = T.Buffer((1,), data=red_buf0, scope="local") with T.attr( T.comm_reducer(lambda x0, y0: x0 + y0, [T.float32(0)]), "reduce_scope", T.reinterpret("handle", T.uint64(0)), ): mask = T.allocate([1], "uint32", "local") t0 = T.allocate([1], "float32", "local") red_buf0_1[0] = A_warp_1[0] mask_1 = T.Buffer((1,), "uint32", data=mask, scope="local") mask_1[0] = T.tvm_warp_activemask() t0_1 = T.Buffer((1,), data=t0, scope="local") t0_1[0] = T.tvm_warp_shuffle_down(mask_1[0], red_buf0_1[0], 16, 32, 32) red_buf0_1[0] = red_buf0_1[0] + t0_1[0] t0_1[0] = T.tvm_warp_shuffle_down(mask_1[0], red_buf0_1[0], 8, 32, 32) red_buf0_1[0] = red_buf0_1[0] + t0_1[0] t0_1[0] = T.tvm_warp_shuffle_down(mask_1[0], red_buf0_1[0], 4, 32, 32) red_buf0_1[0] = red_buf0_1[0] + t0_1[0] t0_1[0] = T.tvm_warp_shuffle_down(mask_1[0], red_buf0_1[0], 2, 32, 32) red_buf0_1[0] = red_buf0_1[0] + t0_1[0] t0_1[0] = T.tvm_warp_shuffle_down(mask_1[0], red_buf0_1[0], 1, 32, 32) red_buf0_1[0] = red_buf0_1[0] + t0_1[0] red_buf0_1[0] = T.tvm_warp_shuffle(mask_1[0], red_buf0_1[0], 0, 32, 32) # NOTE(Zihao): test tvm_warp_shuffle_up red_buf0_1[0] = T.tvm_warp_shuffle_up(mask_1[0], red_buf0_1[0], 0, 32, 32) if threadIdx_x == 0: C_1 = T.Buffer((1,), data=C) C_1[0] = red_buf0_1[0] B_1 = T.Buffer((32,), data=B) B_1[threadIdx_x] = B_warp_1[0] return func def make_packed_api_result(): @T.prim_func def func(A: T.Buffer(64, "float32")): T.func_attr({"global_symbol": "main", "target": T.target("cuda")}) bx = T.launch_thread("blockIdx.x", 64) T.evaluate(A[bx]) mod = tvm.IRModule.from_expr(func) return tvm.tir.transform.MakePackedAPI()(mod) def tvm_struct_set_generated_in_cpp(): """Ensure same dtype for tvm_struct_set in Python/C++ The TVMStructSet method in C++, used internally by LowerTVMBuiltin, and the Python method `T.tvm_struct_set`, used when parsing TVMScript should use the same dtype "int32". """ @I.ir_module class Module: @T.prim_func def tir_packed_call(A: T.Buffer(16)): T.attr(0, "device_id", 0) T.attr(0, "device_type", 0) T.evaluate( T.tvm_call_cpacked( "tvm_test_cpacked", T.tvm_stack_make_array( A.data, T.tvm_stack_make_shape(16, dtype="handle"), T.reinterpret(T.uint64(0), dtype="handle"), T.uint32(1), T.Cast("float32", 0), 0, dtype="handle", ), dtype="int32", ) ) return tvm.tir.transform.LowerTVMBuiltin()(Module) def ir_module_with_attrs(): @I.ir_module class Module: I.module_attrs({"attr": 10}) @T.prim_func def tir_func(A: T.Buffer(16, "int32"), B: T.Buffer(16, "int32")): for i in range(16): B[i] = A[i] return Module def nested_seqstmt(): """Nested SeqStmt should be normalized to flat SeqStmt Nested SeqStmt are representable in the TIR structures, but are flattened when converted to TVMScript. Previously, this could cause failures to round-trip through TVMScript, including erroneous use of TVMScript's concise-scoping rules. This was resolved by normalizing nested SeqStmt in TIR, such that the use of `tir.SeqStmt` below results in a single flat `tir.SeqStmt` containing the three `tir.Evaluate` calls. """ func = tvm.tir.PrimFunc( params=[], body=tvm.tir.SeqStmt( [ tvm.tir.SeqStmt([tvm.tir.Evaluate(0), tvm.tir.Evaluate(1)]), tvm.tir.Evaluate(2), ] ), ) return func def subroutine_call(): """A GlobalVar may reference other functions in the module""" @I.ir_module class mod: @T.prim_func def main(A: T.Buffer(16, "float32")): mod.subroutine(A.data, T.int32(16)) @T.prim_func def subroutine(A_data: T.handle("float32"), n: T.int32): T.evaluate(0) return mod def subroutine_call_returning_int(): """An internal function call may return non-void""" @I.ir_module class mod: @T.prim_func def main(A: T.Buffer(2, "float32")): mod.subroutine(A[0]) + mod.subroutine(A[1]) @T.prim_func def subroutine(x: T.float32) -> T.float32: T.ret(x * x) return mod def undefined_data_ptr_in_decl_buffer(): """The T.decl_buffer syntax should not introduce an Allocate While T.decl_buffer can be used to represent an Allocate/DeclBuffer pair, performing a round-trip through TVMScript should not introduce an Allocate node. """ @T.prim_func def func(): data_ptr = T.handle("float32") buf = T.decl_buffer(shape=[1], dtype="float32", data=data_ptr) T.evaluate(buf[0]) return func def undefined_shape_in_decl_buffer(): @T.prim_func def func(): size = T.int32() buf = T.decl_buffer(shape=[size], dtype="float32") T.evaluate(buf[0]) return func def undefined_stride_in_decl_buffer(): @T.prim_func def func(): stride = T.int32() buf = T.decl_buffer(shape=[1], dtype="float32", strides=[stride]) T.evaluate(buf[0]) return func def undefined_elem_offset_in_decl_buffer(): @T.prim_func def func(): elem_offset = T.int32() buf = T.decl_buffer(shape=[1], dtype="float32", elem_offset=elem_offset) T.evaluate(buf[0]) return func def subroutine_call_without_arguments(): @I.ir_module class mod: @T.prim_func def main(): # Should be equivalent to the bare "mod.subroutine()", but # that relies on `GlobalVar.__call__` returning the # correct IR type. Previously, this instead returned a # `relay.Call` object. tir.call_tir(mod.subroutine) @T.prim_func def subroutine(): T.evaluate(0) return mod def return_zero(): @T.prim_func def func() -> T.int32: T.ret(0) return func def op_of_literal(): op_list = [ (T.exp, 0), (T.exp2, 0), (T.exp10, 0), (T.erf, 0.0), (T.tanh, 0.0), (T.sigmoid, 0.0), (T.log, 0.0), (T.log2, 0.0), (T.log1p, 0.0), (T.tan, 0.0), (T.cos, 0.0), (T.acos, 0.0), (T.acosh, 0.0), (T.sin, 0.0), (T.sinh, 0.0), (T.asin, 0.0), (T.asinh, 0.0), (T.atan, 0.0), (T.atanh, 0.0), (T.atan2, (1.0, 0.0)), (T.sqrt, 0.0), (T.rsqrt, 1.0), (T.nextafter, (0.0, 1.0)), (T.hypot, (1.0, 1.0)), (T.copysign, (1.0, 1.0)), (T.popcount, 0), (T.fmod, (1.0, 1.0)), ] def make_ir_generator(op, arg): def inner(): call_expr = op(*arg) if isinstance(arg, tuple) else op(arg) @T.prim_func def func(): T.evaluate(call_expr) return func inner.__name__ = f"{op.__name__}_of_literal" return inner for op, arg in op_list: yield make_ir_generator(op, arg) ir_generator = tvm.testing.parameter( launch_env_thread, opt_gemm_normalize, opt_gemm_lower, opt_gemm_mod_host, opt_conv_tensorcore_normalize, opt_conv_tensorcore_lower, opt_conv_tensorcore_mod_host, vthread_func, matmul, module_const, constant, rank0, rank0_block, select, minmax, abs, constant_folding, simplify_bracket, while_loop, primfunc_with_allocate_annotations, comm_reducer_single_reduce_group, comm_reducer_multiple_reduce_groups, multiple_commreducer, loop_extent_dependent, nontrivial_range_axis, func_with_target_spec_by_config, func_with_target_spec_by_str, func_with_target_and_host_spec_by_str, func_root_attr, func_trivial_root_block, func_nested_root_block, func_T_ptr_let_statement, func_T_ptr_allocate, llvm_intrin_call, parse_bufferslice_as_range_bound, int64_support, string_annotation_escaping, pointer_type, buffer_axis_separator, buffer_ramp_access_as_slice_index, let_expression, void_ptr, decl_buffer, allocate_and_decl_buffer, float_infinity, minimal_i32_literal, boolean_argument, bool_argument, bool_variable_annotation, bool_primitive, bool_cast, return_none, implicit_evaluate, if_true_else, elif_chain_without_else, elif_chain_with_else, *nested_boolean_expressions(), multi_env_threads, intrinsic_pow, let_stmt_var, let_stmt_value, string_stride, merge_shape_var_def, if_then_else_var, tvm_shfl_builtins, make_packed_api_result, tvm_struct_set_generated_in_cpp, ir_module_with_attrs, nested_seqstmt, subroutine_call, subroutine_call_returning_int, undefined_data_ptr_in_decl_buffer, undefined_shape_in_decl_buffer, undefined_stride_in_decl_buffer, undefined_elem_offset_in_decl_buffer, subroutine_call_without_arguments, return_zero, *op_of_literal(), ) def test_roundtrip(ir_generator): original = ir_generator() after_roundtrip = tvm.script.from_source(original.script(show_meta=True)) tvm.ir.assert_structural_equal(original, after_roundtrip, True) def test_return_none_no_trailing_type(): func = return_none() script = func.script() assert "-> None" not in script if __name__ == "__main__": tvm.testing.main()
165,507
39.835924
459
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_lower_warp_memory.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import pytest import tvm import tvm.testing from tvm import te from tvm.contrib.nvcc import have_fp16 def _run_passes(mod): cuda_target = tvm.target.Target("cuda", host="llvm") assert cuda_target.thread_warp_size == 32 mod = tvm.tir.transform.Apply(lambda f: f.with_attr("target", cuda_target))(mod) mod = tvm.tir.transform.AnnotateDeviceRegions()(mod) mod = tvm.tir.transform.SplitHostDevice()(mod) mod = tvm.tir.transform.LowerWarpMemory()(mod) return mod @tvm.testing.requires_cuda def test_lower_warp_memory_local_scope(): m = 128 A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i] + 3, name="B") s = te.create_schedule(B.op) AA = s.cache_read(A, "warp", [B]) xo, xi = s[B].split(B.op.axis[0], 64) xi0, xi1 = s[B].split(xi, factor=32) tx = te.thread_axis("threadIdx.x") s[B].bind(xi1, tx) s[B].bind(xo, te.thread_axis("blockIdx.x")) s[AA].compute_at(s[B], xo) xo, xi = s[AA].split(s[AA].op.axis[0], 32) s[AA].bind(xi, tx) # lowering with the CSE pass disabled as otherwise it would do some commoning with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]): mod = tvm.lower(s, [A, B], name="f") mod = _run_passes(mod) fdevice = mod["f_kernel"] allocate = fdevice.body.body assert allocate.buffer_var.type_annotation.storage_scope == "local" assert fdevice.body.body.extents[0].value == 2 @tvm.testing.requires_cuda def test_lower_warp_memory_correct_indices(): n = 32 A = te.placeholder((2, n, n), name="A", dtype="float32") C = te.compute((2, n, n), lambda x, i, j: A(x, i, (j + 1) % n), name="C") s = te.create_schedule(C.op) bk_x = te.thread_axis("blockIdx.x") th_y = te.thread_axis("threadIdx.y") th_x = te.thread_axis("threadIdx.x") B = s.cache_read(A, "warp", [C]) cx, ci, cj = C.op.axis bx, bi, bj = B.op.axis s[C].bind(cj, th_x) s[C].bind(cx, bk_x) s[B].compute_at(s[C], cx) s[B].bind(bi, th_y) s[B].bind(bj, th_x) bounds = tvm.te.schedule.InferBound(s) ir = tvm.te.schedule.ScheduleOps(s, bounds) inner_func = ir.body.body.body store_A_warp = inner_func.seq[0].body.body indices = list(store_A_warp.indices) # A.warp is actually many buffers, one for each warp, although they are all called A.warp # 1. If we are accessing from different threads within a same warp (different # threadIdx.x), we need to distinguish between each elements using threadIdx.x, # so threadIdx.x is one if the indices. # 2. If we are accessing from different warps (different threadIdx.y), we are actually # assessing different buffers, so there is no need to distinguish from elements, # and therefore threadIdx.y is NOT a index. idx_names = map(lambda x: x.name, filter(lambda x: type(x) is tvm.tir.expr.Var, indices)) assert "threadIdx.x" in idx_names assert "threadIdx.y" not in idx_names @tvm.testing.requires_gpu @tvm.testing.requires_cuda def test_lower_warp_memory_cuda_end_to_end(): def check_cuda(dtype): if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version): print("Skip because gpu does not have fp16 support") return m = 128 A = te.placeholder((m,), name="A", dtype=dtype) B = te.compute((m,), lambda i: A[i // 32 * 32 + (i + 1) % 32], name="B") cuda_target = tvm.target.Target("cuda", host="llvm") assert cuda_target.thread_warp_size == 32 with cuda_target: s = te.create_schedule(B.op) AA = s.cache_read(A, "warp", [B]) xo, xi = s[B].split(B.op.axis[0], 64) xi0, xi1 = s[B].split(xi, factor=32) tx = te.thread_axis("threadIdx.x") s[B].bind(xi1, tx) s[B].bind(xo, te.thread_axis("blockIdx.x")) s[AA].compute_at(s[B], xo) xo, xi = s[AA].split(s[AA].op.axis[0], 32) s[AA].bind(xi, tx) dev = tvm.cuda(0) # building with the CSE pass disabled as otherwise it would do some commoning with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]): func = tvm.build(s, [A, B], "cuda") A_np = np.array(list(range(m)), dtype=dtype) B_np = np.array( list(range(1, 32)) + [0] + list(range(33, 64)) + [32] + list(range(65, 96)) + [64] + list(range(97, 128)) + [96], dtype=dtype, ) A_nd = tvm.nd.array(A_np, dev) B_nd = tvm.nd.array(np.zeros(B_np.shape, dtype=B_np.dtype), dev) func(A_nd, B_nd) tvm.testing.assert_allclose(B_nd.numpy(), B_np, rtol=1e-3) check_cuda("float32") check_cuda("float16") @tvm.testing.requires_gpu @tvm.testing.requires_cuda def test_lower_warp_memory_cuda_half_a_warp(): def check_cuda(dtype): if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version): print("Skip because gpu does not have fp16 support") return n, m = 16, 16 A = te.placeholder( ( n, m, ), name="A", dtype=dtype, ) B = te.compute( ( n, m, ), lambda j, i: A[j, (i + 1) % m], name="B", ) cuda_target = tvm.target.Target("cuda", host="llvm") assert cuda_target.thread_warp_size == 2 * m with cuda_target: s = te.create_schedule(B.op) tx = te.thread_axis("threadIdx.x") ty = te.thread_axis("threadIdx.y") bx = te.thread_axis("blockIdx.x") AA = s.cache_read(A, "warp", [B]) y, x = B.op.axis z, y = s[B].split(y, nparts=2) s[B].bind(x, tx) s[B].bind(y, ty) s[B].bind(z, bx) s[AA].compute_at(s[B], y) _, x = AA.op.axis s[AA].bind(x, tx) dev = tvm.cuda(0) # building with the CSE pass disabled as otherwise it would do some commoning with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]): func = tvm.build(s, [A, B], "cuda") A_np = np.array([list(range(i, m + i)) for i in range(n)], dtype=dtype) B_np = np.array([list(range(1 + i, m + i)) + [i] for i in range(n)], dtype=dtype) A_nd = tvm.nd.array(A_np, dev) B_nd = tvm.nd.array(np.zeros(B_np.shape, dtype=B_np.dtype), dev) func(A_nd, B_nd) tvm.testing.assert_allclose(B_nd.numpy(), B_np, rtol=1e-3) check_cuda("float32") check_cuda("float16") @tvm.testing.requires_gpu @tvm.testing.requires_cuda def test_lower_warp_memory_cuda_2_buffers(): def check_cuda(dtype): if dtype == "float16" and not have_fp16(tvm.cuda(0).compute_version): print("Skip because gpu does not have fp16 support") return m = 32 A = te.placeholder((m,), name="A", dtype=dtype) B = te.placeholder((m,), name="B", dtype=dtype) C = te.compute((m,), lambda i: A[(i + 1) % m] + B[(i + 1) % m], name="C") cuda_target = tvm.target.Target("cuda", host="llvm") assert m <= cuda_target.thread_warp_size with cuda_target: s = te.create_schedule(C.op) tx = te.thread_axis("threadIdx.x") bx = te.thread_axis("blockIdx.x") AA = s.cache_read(A, "warp", [C]) BB = s.cache_read(B, "warp", [C]) xo, xi = s[C].split(C.op.axis[0], nparts=1) s[C].bind(xi, tx) s[C].bind(xo, bx) s[AA].compute_at(s[C], xo) s[BB].compute_at(s[C], xo) xo, xi = s[AA].split(s[AA].op.axis[0], nparts=1) s[AA].bind(xo, bx) s[AA].bind(xi, tx) xo, xi = s[BB].split(s[BB].op.axis[0], nparts=1) s[BB].bind(xo, bx) s[BB].bind(xi, tx) dev = tvm.cuda(0) # building with the CSE pass disabled as otherwise it would do some commoning with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]): func = tvm.build(s, [A, B, C], "cuda") AB_np = np.array(list(range(m)), dtype=dtype) C_np = np.array(list(range(1, m)) + [0], dtype=dtype) * 2 A_nd = tvm.nd.array(AB_np, dev) B_nd = tvm.nd.array(AB_np, dev) C_nd = tvm.nd.array(np.zeros(C_np.shape, dtype=C_np.dtype), dev) func(A_nd, B_nd, C_nd) tvm.testing.assert_allclose(C_nd.numpy(), C_np, rtol=1e-3) check_cuda("float32") check_cuda("float16") @tvm.testing.requires_gpu def test_lower_warp_memory_roundup(): def check(device, m): A = te.placeholder((m,), name="A") B = te.compute((m,), lambda i: A[i] + 1, name="B") with tvm.target.Target(device): s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], factor=32) tx = te.thread_axis("threadIdx.x") s[B].bind(xo, te.thread_axis("blockIdx.x")) s[B].bind(xi, tx) AA = s.cache_read(A, "warp", [B]) _, yi = s[AA].split(s[AA].op.axis[0], factor=32) s[AA].bind(yi, tx) s[AA].compute_at(s[B], xo) dev = tvm.device(device, 0) # building with the CSE pass disabled as otherwise it would do some commoning with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]): func = tvm.build(s, [A, B], device) A_np = np.random.uniform(size=(m,)).astype(A.dtype) B_np = np.zeros(shape=(m,)).astype(B.dtype) A_nd = tvm.nd.array(A_np, dev) B_nd = tvm.nd.array(B_np, dev) func(A_nd, B_nd) B_np = A_np + 1 tvm.testing.assert_allclose(B_nd.numpy(), B_np) for device in ["cuda", "rocm"]: if not tvm.testing.device_enabled(device): print("skip because", device, "is not enabled..") continue check(device, m=31) check(device, m=32) check(device, m=33) check(device, m=63) check(device, m=64) check(device, m=65) @tvm.testing.requires_cuda def test_lower_warp_memory_same_thread(): m = n = 128 A = te.placeholder((m, n), name="A") k = te.reduce_axis((0, n), name="k") B = te.compute((m,), lambda i: te.sum(A[i, k], axis=[k])) s = te.create_schedule(B.op) BB = s.cache_write(B, "warp") tx = te.thread_axis("threadIdx.x") xo, xi = s[B].split(B.op.axis[0], factor=32) s[B].bind(xi, tx) s[B].bind(xo, te.thread_axis("blockIdx.x")) s[BB].compute_at(s[B], xo) xo, xi = s[BB].split(s[BB].op.axis[0], factor=32) s[BB].bind(xi, tx) # lowering with the CSE pass disabled as otherwise it would do some commoning with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]): mod = tvm.lower(s, [A, B], name="f") mod = _run_passes(mod) fdevice = mod["f_kernel"] assert "tvm_warp_shuffle" not in fdevice.script() @tvm.testing.requires_cuda def test_lower_warp_memory_divide_by_factor(): ib = tvm.tir.ir_builder.IRBuilder() bx = te.thread_axis("blockIdx.x") tx = te.thread_axis("threadIdx.x") with ib.new_scope(): ib.scope_attr(bx, "thread_extent", 32) ib.scope_attr(tx, "thread_extent", 32) t = ib.allocate("float32", 16, name="t", scope="warp") n = ib.allocate("float32", 16, name="n", scope="local") n[0] = t[0] stmt = ib.get() func = tvm.tir.PrimFunc([], stmt) func = func.with_attr("from_legacy_te_schedule", True) # lowering with the CSE pass disabled as otherwise it would do some commoning with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]): mod = tvm.lower(func, name="f") with pytest.raises(tvm.error.TVMError, match="Divide by zero") as cm: _run_passes(mod) if __name__ == "__main__": tvm.testing.main()
13,191
36.371105
100
py
tvm
tvm-main/tests/python/unittest/test_tir_analysis_stmt_finding.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest import tvm from tvm import te, topi from tvm.meta_schedule.testing.te_workload import conv2d_winograd_nhwc, matmul from tvm.tir.analysis import find_anchor_block def test_matmul_add(): n = m = k = 128 A, B, C = matmul(n, m, k) mod = tvm.IRModule() mod["main"] = te.create_prim_func([A, B, C + A]) block = find_anchor_block(mod) assert block.name_hint == "C" def test_winograd(): mod = tvm.IRModule() mod["main"] = te.create_prim_func(conv2d_winograd_nhwc(1, 14, 14, 128, 128, 6)) block = find_anchor_block(mod) assert block.name_hint == "bgemm" def test_no_anchor_block(): inp = te.placeholder((10,), name="input") out = topi.nn.relu(inp + 1.0) mod = tvm.IRModule() mod["main"] = te.create_prim_func([inp, out]) assert find_anchor_block(mod) is None if __name__ == "__main__": tvm.testing.main()
1,672
29.418182
83
py
tvm
tvm-main/tests/python/unittest/test_te_tensor_overload.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import tvm from tvm import te from tvm import topi import tvm.topi.testing from tvm.topi.utils import get_const_tuple import tvm.testing def test_operator_type_and_tags(): k = 1 n = te.var("n") A = te.placeholder((), name="A") B = te.placeholder((10, 5), name="B") B1 = B[0] B2 = B[0, 0] assert isinstance(k + n, tvm.tir.PrimExpr) assert isinstance(n + n, tvm.tir.PrimExpr) assert isinstance(k + A, te.tensor.Tensor) assert isinstance(A + k, te.tensor.Tensor) assert isinstance(n + A, te.tensor.Tensor) assert isinstance(A + n, te.tensor.Tensor) assert isinstance(A + A, te.tensor.Tensor) assert isinstance(k + B, te.tensor.Tensor) assert isinstance(B + k, te.tensor.Tensor) assert isinstance(n + B, te.tensor.Tensor) assert isinstance(B + n, te.tensor.Tensor) assert isinstance(A + B, te.tensor.Tensor) assert isinstance(B + A, te.tensor.Tensor) assert isinstance(B + B, te.tensor.Tensor) assert (k + B).op.tag == topi.tag.ELEMWISE assert (B + k).op.tag == topi.tag.ELEMWISE assert (n + B).op.tag == topi.tag.ELEMWISE assert (B + n).op.tag == topi.tag.ELEMWISE assert (A + B).op.tag == topi.tag.BROADCAST assert (B + A).op.tag == topi.tag.BROADCAST assert (B + B).op.tag == topi.tag.BROADCAST assert isinstance(k + B2, tvm.tir.PrimExpr) assert isinstance(B2 + k, tvm.tir.PrimExpr) assert isinstance(n + B2, tvm.tir.PrimExpr) assert isinstance(B2 + n, tvm.tir.PrimExpr) assert isinstance(B2 + B2, tvm.tir.PrimExpr) assert isinstance(B2 + A, te.tensor.Tensor) assert isinstance(A + B2, te.tensor.Tensor) assert isinstance(B2 + B, te.tensor.Tensor) assert isinstance(B + B2, te.tensor.Tensor) def test_combination(): k = 3 n = 5 m = 10 x = te.var("x") A = te.placeholder((n, m), name="A") B = te.placeholder((n, m), name="B") C = te.placeholder((n, m), name="C") D = k + A - B * C + x s = te.create_schedule(D.op) foo = tvm.build(s, [x, A, B, C, D], "llvm") dev = tvm.cpu(0) x = 2 a = tvm.nd.array(np.random.uniform(size=(n, m)).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=(n, m)).astype(B.dtype), dev) c = tvm.nd.array(np.random.uniform(size=(n, m)).astype(C.dtype), dev) d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), dev) foo(x, a, b, c, d) tvm.testing.assert_allclose(d.numpy(), k + a.numpy() - b.numpy() * c.numpy() + x) def verify_tensor_scalar_bop(shape, typ="add"): """Verify non-constant Tensor and scalar binary operations.""" sh = [te.size_var("n%d" % i) for i in range(0, len(shape))] k = te.var("k") A = te.placeholder(sh, name="A") if typ == "add": B = A + k elif typ == "sub": B = A - k elif typ == "mul": B = A * k elif typ == "div": B = A / k else: raise NotImplementedError() def check_device(device): if not tvm.testing.device_enabled(device): print("Skip because %s is not enabled" % device) return dev = tvm.device(device, 0) print("Running on target: %s" % device) with tvm.target.Target(device): s = tvm.topi.testing.get_elemwise_schedule(device)(B) k_ = 2 foo = tvm.build(s, [A, B, k] + sh, device, name="tensor_scalar_" + typ) a_npy = np.random.uniform(size=shape).astype(A.dtype) if typ == "add": b_npy = a_npy + k_ elif typ == "sub": b_npy = a_npy - k_ elif typ == "mul": b_npy = a_npy * k_ elif typ == "div": b_npy = a_npy / k_ else: raise NotImplementedError() a_nd = tvm.nd.array(a_npy, dev) b_nd = tvm.nd.array(np.empty(b_npy.shape).astype(B.dtype), dev) foo(a_nd, b_nd, k_, *shape) tvm.testing.assert_allclose(b_nd.numpy(), b_npy, rtol=1e-5) for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan"]: check_device(device) def verify_broadcast_bop(lhs_shape, rhs_shape, typ="add"): A = te.placeholder(shape=lhs_shape, name="A") B = te.placeholder(shape=rhs_shape, name="B") if typ == "add": C = A + B elif typ == "sub": C = A - B elif typ == "mul": C = A * B elif typ == "div": C = A / B else: raise NotImplementedError() def check_device(device): dev = tvm.device(device, 0) if not tvm.testing.device_enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) with tvm.target.Target(device): s = tvm.topi.testing.get_broadcast_schedule(device)(C) foo = tvm.build(s, [A, B, C], device, name="broadcast_binary" + "_" + typ) lhs_npy = np.random.uniform(size=lhs_shape).astype(A.dtype) rhs_npy = np.random.uniform(size=rhs_shape).astype(A.dtype) if typ == "add": out_npy = lhs_npy + rhs_npy elif typ == "sub": out_npy = lhs_npy - rhs_npy elif typ == "mul": out_npy = lhs_npy * rhs_npy elif typ == "div": rhs_npy = np.abs(rhs_npy) + 0.001 out_npy = lhs_npy / rhs_npy else: raise NotImplementedError() lhs_nd = tvm.nd.array(lhs_npy, dev) rhs_nd = tvm.nd.array(rhs_npy, dev) out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), dev) for _ in range(1): foo(lhs_nd, rhs_nd, out_nd) tvm.testing.assert_allclose(out_nd.numpy(), out_npy, rtol=1e-4, atol=1e-4) for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan"]: check_device(device) @tvm.testing.uses_gpu def verify_conv2d_scalar_bop( batch, in_size, in_channel, num_filter, kernel, stride, padding, typ="add" ): def check_device(device): dev = tvm.device(device, 0) if not tvm.testing.device_enabled(device): print("Skip because %s is not enabled" % device) return print("Running on target: %s" % device) conv2d_nchw, schedule_conv2d_nchw = tvm.topi.testing.get_conv2d_nchw_implement(device) k = 10.0 dilation = (1, 1) with tvm.target.Target(device): A = te.placeholder((batch, in_channel, in_size, in_size), name="A") W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W") B = conv2d_nchw(A, W, stride, padding, dilation, A.dtype) if typ == "add": C = B + k elif typ == "sub": C = B - k elif typ == "mul": C = B * k elif typ == "div": C = B / k else: raise NotImplementedError() s = schedule_conv2d_nchw([C]) foo = tvm.build(s, [A, W, B, C], device, name="conv2d_scalar_" + typ) a_npy = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype) w_npy = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype) b_npy = tvm.topi.testing.conv2d_nchw_python(a_npy, w_npy, stride, padding) c_npy = np.random.uniform(size=get_const_tuple(B.shape)).astype(B.dtype) if typ == "add": c_npy = b_npy + k elif typ == "sub": c_npy = b_npy - k elif typ == "mul": c_npy = b_npy * k elif typ == "div": c_npy = b_npy / k else: raise NotImplementedError() a_nd = tvm.nd.array(a_npy, dev) w_nd = tvm.nd.array(w_npy, dev) b_nd = tvm.nd.array(np.empty(b_npy.shape).astype(B.dtype), dev) c_nd = tvm.nd.array(np.empty(c_npy.shape).astype(C.dtype), dev) foo(a_nd, w_nd, b_nd, c_nd) tvm.testing.assert_allclose(c_nd.numpy(), c_npy, rtol=1e-4, atol=1e-4) for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan"]: check_device(device) @tvm.testing.uses_gpu def test_tensor_scalar_bop(): verify_tensor_scalar_bop((1,), typ="add") verify_tensor_scalar_bop((3, 5), typ="sub") verify_tensor_scalar_bop((1, 3, 5), typ="mul") verify_tensor_scalar_bop((2, 3, 1, 32), typ="div") @tvm.testing.uses_gpu def test_broadcast_bop(): verify_broadcast_bop((2, 3), (), typ="add") verify_broadcast_bop((5, 2, 3), (1,), typ="add") verify_broadcast_bop((1, 32), (64, 32), typ="sub") verify_broadcast_bop((5, 64, 128), (2, 5, 64, 1), typ="mul") verify_broadcast_bop((2, 3, 1, 32), (64, 32), typ="div") @tvm.testing.uses_gpu def test_conv2d_scalar_bop(): verify_conv2d_scalar_bop(1, 16, 4, 4, 3, 1, 1, typ="add") verify_conv2d_scalar_bop(1, 32, 2, 1, 3, 1, 1, typ="sub") verify_conv2d_scalar_bop(1, 32, 1, 1, 3, 1, 1, typ="mul") verify_conv2d_scalar_bop(1, 16, 2, 1, 3, 1, 1, typ="div") if __name__ == "__main__": test_operator_type_and_tags() test_combination() test_tensor_scalar_bop() test_broadcast_bop() test_conv2d_scalar_bop()
9,869
34.631769
94
py
tvm
tvm-main/tests/python/unittest/test_tir_lower_match_buffer.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest import tvm import tvm.testing from tvm.script import tir as T def _check(original, transformed): mod = tvm.IRModule.from_expr(original) mod = tvm.tir.transform.LowerMatchBuffer()(mod) mod = tvm.tir.transform.Simplify()(mod) tvm.ir.assert_structural_equal(mod["main"], transformed) def _check_fail(original): mod = tvm.IRModule.from_expr(original) with pytest.raises(tvm.TVMError): mod = tvm.tir.transform.LowerMatchBuffer()(mod) @T.prim_func def buffer_load_store(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16, 16)) C = T.match_buffer(c, (16, 16)) for i, j, k in T.grid(4, 16, 8): with T.block(): T.reads(C[i * 4 : i * 4 + 4, k * 2 : k * 2 + 2]) T.writes(A[i * 4 : i * 4 + 4, j, k * 2 : k * 2 + 2]) sub_A = T.match_buffer( A[i * 4 : i * 4 + 4, j, k * 2 : k * 2 + 2], (4, 1, 2), offset_factor=1 ) sub_C = T.match_buffer(C[i * 4 : i * 4 + 4, k * 2 : k * 2 + 2], (4, 2), offset_factor=1) for ii, kk in T.grid(4, 2): sub_A[ii, 0, kk] += sub_C[ii, kk] @T.prim_func def transformed_buffer_load_store(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16, 16)) C = T.match_buffer(c, (16, 16)) for i, j, k in T.grid(4, 16, 8): with T.block(): T.reads(C[i * 4 : i * 4 + 4, k * 2 : k * 2 + 2]) T.writes(A[i * 4 : i * 4 + 4, j, k * 2 : k * 2 + 2]) for ii, kk in T.grid(4, 2): A[i * 4 + ii, j, k * 2 + kk] += C[i * 4 + ii, k * 2 + kk] @tvm.ir.register_op_attr("tir.intrin_test", "") def intrin_test(data, elem_offset, stride_0, stride_1, shape_0, shape_1): return 0 @T.prim_func def opaque_access(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (32, 64, 128)) B = T.match_buffer(b, (64, 64, 64)) for i, j, k in T.grid(2, 64, 8): with T.block(): T.reads([]) T.writes(A[i * 16 : i * 16 + 16, j, k * 16 : k * 16 + 16]) sub_A = T.match_buffer( A[i * 16 : i * 16 + 16, j, k * 16 : k * 16 + 16], (16, 1, 16), strides=[8192, 128, 1], offset_factor=1, ) T.evaluate( intrin_test( sub_A.data, sub_A.elem_offset, sub_A.strides[0], sub_A.strides[1], sub_A.shape[0], sub_A.shape[1], ) ) for i, j, k in T.grid(64, 2, 8): with T.block(): Bs_0 = T.int32() Bs_1 = T.int32() T.reads([]) T.writes(B[i, j * 32 : j * 32 + 32, k * 8 : k * 8 + 8]) sub_B = T.match_buffer( B[i, j * 32 : j * 32 + 32, k * 8 : k * 8 + 8], (32, 8), strides=[Bs_0, Bs_1], offset_factor=1, ) T.evaluate( intrin_test( sub_B.data, sub_B.elem_offset, sub_B.strides[0], sub_B.strides[1], sub_B.shape[0], sub_B.shape[1], ) ) @T.prim_func def transformed_opaque_access(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (32, 64, 128)) B = T.match_buffer(b, (64, 64, 64)) for i, j, k in T.grid(2, 64, 8): with T.block(): T.reads([]) T.writes(A[i * 16 : i * 16 + 16, j, k * 16 : k * 16 + 16]) T.evaluate( intrin_test( A.data, i * 131072 + j * 128 + k * 16, 8192, 128, 16, 1, ) ) for i, j, k in T.grid(64, 2, 8): with T.block(): T.reads([]) T.writes(B[i, j * 32 : j * 32 + 32, k * 8 : k * 8 + 8]) T.evaluate( intrin_test( B.data, i * 4096 + j * 2048 + k * 8, 64, 1, 32, 8, ) ) @T.prim_func def high_dim_opaque_access(a: T.handle) -> None: A = T.match_buffer(a, (16, 32, 64)) for i, j, k in T.grid(16, 2, 4): with T.block(): As_0 = T.int32() As_1 = T.int32() T.reads([]) T.writes(A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16]) sub_A = T.match_buffer( A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16], (16, 16), strides=[As_0, As_1], offset_factor=1, ) T.evaluate( intrin_test( sub_A.data, sub_A.elem_offset, sub_A.strides[0], sub_A.strides[1], sub_A.shape[0], sub_A.shape[1], ) ) @T.prim_func def transformed_high_dim_opaque_access(a: T.handle) -> None: A = T.match_buffer(a, (16, 32, 64)) for i, j, k in T.grid(16, 2, 4): with T.block(): T.reads([]) T.writes(A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16]) T.evaluate( intrin_test( A.data, i * 2048 + j * 1024 + k * 16, 64, 1, 16, 16, ) ) @T.prim_func def high_dim_opaque_access_with_source_strides(a: T.handle) -> None: A = T.match_buffer(a, (16, 32, 64), strides=[2576, 80, 1]) for i, j, k in T.grid(16, 2, 4): with T.block(): As_0 = T.int32() As_1 = T.int32() T.reads([]) T.writes(A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16]) sub_A = T.match_buffer( A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16], (16, 16), strides=[As_0, As_1], offset_factor=1, ) T.evaluate( intrin_test( sub_A.data, sub_A.elem_offset, sub_A.strides[0], sub_A.strides[1], sub_A.shape[0], sub_A.shape[1], ) ) @T.prim_func def transformed_high_dim_opaque_access_with_source_strides(a: T.handle) -> None: A = T.match_buffer(a, (16, 32, 64), strides=[2576, 80, 1]) for i, j, k in T.grid(16, 2, 4): with T.block(): T.reads([]) T.writes(A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16]) T.evaluate( intrin_test( A.data, i * 2576 + j * 1280 + k * 16, 80, 1, 16, 16, ) ) @T.prim_func def recursive_match(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (64, 64, 64)) B = T.match_buffer(b, (64, 64, 64)) for i, j, k in T.grid(64, 4, 4): with T.block(): T.reads([]) T.writes( [ A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16], B[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16], ] ) As_0 = T.int32() As_1 = T.int32() sub_A = T.match_buffer( A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16], (16, 16), strides=[As_0, As_1], offset_factor=1, ) sub_B = T.match_buffer( B[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16], (16, 16), offset_factor=1, ) for jj, kk in T.grid(4, 4): with T.block(): T.reads([]) T.writes( [ sub_A[jj * 4 : jj * 4 + 4, kk * 4 : kk * 4 + 4], sub_B[jj * 4 : jj * 4 + 4, kk * 4 : kk * 4 + 4], ] ) Ass_0 = T.int32() Ass_1 = T.int32() sub_sub_A = T.match_buffer( sub_A[jj * 4 : jj * 4 + 4, kk * 4 : kk * 4 + 4], (4, 4), strides=[Ass_0, Ass_1], offset_factor=1, ) sub_sub_B = T.match_buffer( sub_B[jj * 4 : jj * 4 + 4, kk * 4 : kk * 4 + 4], (4, 4), offset_factor=1, ) T.evaluate( intrin_test( sub_sub_A.data, sub_sub_A.elem_offset, sub_sub_A.strides[0], sub_sub_A.strides[1], sub_sub_A.shape[0], sub_sub_A.shape[1], ) ) for jjj, kkk in T.grid(4, 4): sub_sub_B[jjj, kkk] = 1 @T.prim_func def transformed_recursive_match(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (64, 64, 64)) B = T.match_buffer(b, (64, 64, 64)) for i, j, k in T.grid(64, 4, 4): with T.block(): T.reads([]) T.writes( [ A[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16], B[i, j * 16 : j * 16 + 16, k * 16 : k * 16 + 16], ] ) for jj, kk in T.grid(4, 4): with T.block(): T.reads([]) T.writes( [ A[ i, j * 16 + jj * 4 : j * 16 + jj * 4 + 4, k * 16 + kk * 4 : k * 16 + kk * 4 + 4, ], B[ i, j * 16 + jj * 4 : j * 16 + jj * 4 + 4, k * 16 + kk * 4 : k * 16 + kk * 4 + 4, ], ] ) T.evaluate( intrin_test( A.data, i * 4096 + j * 1024 + jj * 256 + k * 16 + kk * 4, 64, 1, 4, 4, ) ) for jjj, kkk in T.grid(4, 4): B[i, j * 16 + jj * 4 + jjj, k * 16 + kk * 4 + kkk] = 1 @T.prim_func def symbolic_match(a: T.handle, b: T.handle, n: T.int32, m: T.int32) -> None: A = T.match_buffer(a, (n * m, m)) B = T.match_buffer(b, (n * 2, m * 4)) for i in range(0, n): with T.block(): T.reads([]) T.writes([A[i * m : i * m + n, 0:m], B[i * n : i * n + 2, 0 : m * 4]]) Bs_0 = T.int32() Bs_1 = T.int32() sub_A = T.match_buffer(A[i * m : i * m + m, 0:m], (m, m), offset_factor=1) sub_B = T.match_buffer( B[i * n : i * n + 2, 0 : m * 4], (2, m * 4), strides=[Bs_0, Bs_1], offset_factor=1 ) for ii, jj in T.grid(m, m): sub_A[ii, jj] = 1 for j in range(0, 4): T.evaluate( intrin_test( sub_B.data, sub_B.elem_offset, sub_B.strides[0], sub_B.strides[1], sub_B.shape[0], sub_B.shape[1], ) ) @T.prim_func def transformed_symbolic_match(a: T.handle, b: T.handle, n: T.int32, m: T.int32) -> None: A = T.match_buffer(a, (n * m, m)) B = T.match_buffer(b, (n * 2, m * 4)) for i in range(0, n): with T.block(): T.reads([]) T.writes([A[i * m : i * m + n, 0:m], B[i * n : i * n + 2, 0 : m * 4]]) for ii, jj in T.grid(m, m): A[i * m + ii, jj] = 1 for j in range(0, 4): T.evaluate( intrin_test( B.data, i * n * (m * 4), m * 4, 1, 2, m * 4, ) ) @T.prim_func def rank0_buffer(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (8, 8)) B = T.match_buffer(b, (8, 8)) for i, j in T.grid(8, 8): with T.block(): T.reads([]) T.writes([A[i, j], B[i, j]]) sub_A = T.match_buffer(A[i, j], (), offset_factor=1) sub_B = T.match_buffer(B[i, j], (), offset_factor=1) sub_A[()] = 1 T.evaluate( intrin_test( sub_B.data, sub_B.elem_offset, 0, 0, 0, 0, ) ) @T.prim_func def transformed_rank0_buffer(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, (8, 8)) B = T.match_buffer(b, (8, 8)) for i, j in T.grid(8, 8): with T.block(): T.reads([]) T.writes([A[i, j], B[i, j]]) A[i, j] = 1 T.evaluate( intrin_test( B.data, i * 8 + j, 0, 0, 0, 0, ) ) @T.prim_func def fail_match_load(a: T.handle) -> None: A = T.match_buffer(a, (8, 8)) for i, j in T.grid(8, 8): with T.block(): T.reads(A[i, j]) T.writes([]) sub_A = T.match_buffer(A[i, j], (), elem_offset=0) T.evaluate(sub_A[()]) @T.prim_func def fail_match_store(a: T.handle) -> None: A = T.match_buffer(a, (8, 8)) for i, j in T.grid(8, 8): with T.block(): T.reads([]) T.writes(A[i, j]) sub_A = T.match_buffer(A[i, j], (), elem_offset=0) sub_A[()] = 1 @T.prim_func def fail_buffer_bind(a: T.handle) -> None: A = T.match_buffer(a, (8, 8)) for i, j in T.grid(8, 2): with T.block(): stride = T.int32() sub_A = T.match_buffer( A[i, j * 4 : j * 4 + 4], (1, 4), strides=[stride, stride], offset_factor=1 ) for jj in range(0, 4): sub_A[i, j * 4 + jj] = 1 @T.prim_func def fail_match_func_param(a: T.handle, m: T.handle, n: T.handle) -> None: A = T.match_buffer(a, (8, 8)) for i, j in T.grid(8, 2): with T.block(): sub_A = T.match_buffer(A[i, j * 4 : j * 4 + 4], (1, 4), strides=[m, n], offset_factor=1) for jj in range(0, 4): sub_A[i, j * 4 + jj] = 1 def test_buffer_load_store(): _check(buffer_load_store, transformed_buffer_load_store) def test_opaque_access(): _check(opaque_access, transformed_opaque_access) def test_high_dim_opaque_access(): _check(high_dim_opaque_access, transformed_high_dim_opaque_access) _check( high_dim_opaque_access_with_source_strides, transformed_high_dim_opaque_access_with_source_strides, ) def test_recursive_match(): _check(recursive_match, transformed_recursive_match) def test_symbolic_match(): _check(symbolic_match, transformed_symbolic_match) def test_rank0_buffer(): _check(rank0_buffer, transformed_rank0_buffer) def test_fail_load_store(): _check_fail(fail_match_load) _check_fail(fail_match_store) def test_fail_buffer_bind(): _check_fail(fail_buffer_bind) def test_fail_match_func_param(): _check_fail(fail_match_func_param) if __name__ == "__main__": tvm.testing.main()
17,120
31.001869
100
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_unify_thread_binding.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest import sys import tvm import tvm.testing from tvm import te from tvm.script import tir as T def _check(original, transformed): mod = tvm.IRModule.from_expr(original) mod = tvm.tir.transform.UnifyThreadBinding()(mod) mod = tvm.tir.transform.Simplify()(mod) tvm.ir.assert_structural_equal(mod["main"], transformed, True) def _check_fail(original): mod = tvm.IRModule.from_expr(original) with pytest.raises(ValueError): tvm.tir.transform.UnifyThreadBinding()(mod) @T.prim_func def element_wise_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i in T.thread_binding(0, 128, "blockIdx.x"): for j0_0 in T.thread_binding(0, 4, "threadIdx.x"): for j0_1 in T.serial(0, 32): with T.block(""): B[i, j0_0 * 32 + j0_1] = A[i, j0_0 * 32 + j0_1] * 2.0 for j1_0 in T.thread_binding(0, 4, "threadIdx.x"): for j1_1 in T.serial(0, 32): with T.block(""): C[i, j1_0 * 32 + j1_1] = B[i, j1_0 * 32 + j1_1] + 1.0 @T.prim_func def unified_element_wise_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for blockIdx_x in T.thread_binding(0, 128, "blockIdx.x"): for threadIdx_x in T.thread_binding(0, 4, "threadIdx.x"): for j0_1 in T.serial(0, 32): with T.block(""): B[blockIdx_x, threadIdx_x * 32 + j0_1] = ( A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0 ) for j1_1 in T.serial(0, 32): with T.block(""): C[blockIdx_x, threadIdx_x * 32 + j1_1] = ( B[blockIdx_x, threadIdx_x * 32 + j1_1] + 1.0 ) @T.prim_func def element_wise_thread_x_different_dtype( A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32"), ) -> None: for i in T.thread_binding(128, "blockIdx.x"): for j0_0 in T.thread_binding(4, "threadIdx.x"): for j0_1 in T.serial(0, 32): with T.block(""): B[i, j0_0 * 32 + j0_1] = A[i, j0_0 * 32 + j0_1] * 2.0 for j1_0 in T.thread_binding(T.int64(4), "threadIdx.x"): for j1_1 in T.serial(T.int64(32)): with T.block(""): C[i, j1_0 * T.int64(32) + j1_1] = B[i, j1_0 * T.int64(32) + j1_1] + 1.0 @T.prim_func def unified_element_wise_thread_x_different_dtype( A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32"), ) -> None: for blockIdx_x in T.thread_binding(128, "blockIdx.x"): for threadIdx_x in T.thread_binding(4, "threadIdx.x"): for j0_1 in T.serial(0, 32): with T.block(""): B[blockIdx_x, threadIdx_x * 32 + j0_1] = ( A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0 ) for j1_1 in T.serial(T.int64(32)): with T.block(""): C[blockIdx_x, T.cast(threadIdx_x, "int64") * T.int64(32) + j1_1] = ( B[blockIdx_x, T.cast(threadIdx_x, "int64") * T.int64(32) + j1_1] + 1.0 ) @T.prim_func def element_wise_env_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None: j1_0 = T.env_thread("threadIdx.x") j0_0 = T.env_thread("threadIdx.x") i = T.env_thread("blockIdx.x") A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) T.launch_thread(i, 128) T.launch_thread(j0_0, 4) T.launch_thread(j1_0, 4) for j0_1 in T.serial(0, 32): with T.block(""): B[i, j0_0 * 32 + j0_1] = A[i, j0_0 * 32 + j0_1] * 2.0 for j1_1 in T.serial(0, 32): with T.block(""): C[i, j1_0 * 32 + j1_1] = B[i, j1_0 * 32 + j1_1] + 1.0 @T.prim_func def unified_element_wise_env_thread_x(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for blockIdx_x in T.thread_binding(0, 128, "blockIdx.x"): for threadIdx_x in T.thread_binding(0, 4, "threadIdx.x"): for j0_1 in T.serial(0, 32): with T.block(""): B[blockIdx_x, threadIdx_x * 32 + j0_1] = ( A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0 ) for j1_1 in T.serial(0, 32): with T.block(""): C[blockIdx_x, threadIdx_x * 32 + j1_1] = ( B[blockIdx_x, threadIdx_x * 32 + j1_1] + 1.0 ) @T.prim_func def element_wise_vthread_x(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) for i_0 in T.thread_binding(0, 2, "vthread.x"): for i_1 in T.thread_binding(0, 64, "threadIdx.x"): for j_0 in T.thread_binding(0, 2, "vthread.x"): for j_1 in T.serial(0, 64): with T.block(""): B[i_0 * 64 + i_1, j_0 * 64 + j_1] = A[i_0 * 64 + i_1, j_0 * 64 + j_1] * 2.0 @T.prim_func def unified_element_wise_vthread_x(a: T.handle, b: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) for vthread_x in T.thread_binding(0, 2, "vthread.x"): for threadIdx_x in T.thread_binding(0, 64, "threadIdx.x"): for j_1 in T.serial(0, 64): with T.block(""): B[vthread_x * 64 + threadIdx_x, vthread_x * 64 + j_1] = ( A[vthread_x * 64 + threadIdx_x, vthread_x * 64 + j_1] * 2.0 ) @T.prim_func def element_wise_two_thread_x_in_same_kernel_not_equal( a: T.handle, b: T.handle, c: T.handle ) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 64]) for i in T.thread_binding(0, 128, "blockIdx.x"): for j0 in T.thread_binding(0, 128, "threadIdx.x"): B[i, j0] = A[i, j0] * 2.0 for j1 in T.thread_binding(0, 64, "threadIdx.x"): C[i, j1] = A[i, j1] + 1.0 @T.prim_func def element_wise_kernels_with_different_size( a: T.handle, b: T.handle, c: T.handle, d: T.handle ) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [256, 256]) D = T.match_buffer(d, [256, 256]) for i0 in T.thread_binding(0, 128, "blockIdx.x"): for j0 in T.thread_binding(0, 128, "threadIdx.x"): B[i0, j0] = A[i0, j0] * 2.0 for i1 in T.thread_binding(0, 256, "blockIdx.x"): for j1 in T.thread_binding(0, 256, "threadIdx.x"): D[i1, j1] = C[i1, j1] + 1.0 @T.prim_func def unified_element_wise_kernels_with_different_size( a: T.handle, b: T.handle, c: T.handle, d: T.handle ) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [256, 256]) D = T.match_buffer(d, [256, 256]) for blockIdx_x in T.thread_binding(0, 128, "blockIdx.x"): for threadIdx_x in T.thread_binding(0, 128, "threadIdx.x"): B[blockIdx_x, threadIdx_x] = A[blockIdx_x, threadIdx_x] * 2.0 for blockIdx_x in T.thread_binding(0, 256, "blockIdx.x"): for threadIdx_x in T.thread_binding(0, 256, "threadIdx.x"): D[blockIdx_x, threadIdx_x] = C[blockIdx_x, threadIdx_x] + 1.0 @T.prim_func def element_wise_implicit_block(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i in T.thread_binding(0, 128, "threadIdx.y"): for j0_0 in T.thread_binding(0, 4, "threadIdx.x"): for j0_1 in T.serial(0, 32): with T.block(""): B[i, j0_0 * 32 + j0_1] = A[i, j0_0 * 32 + j0_1] * 2.0 for j1_0 in T.thread_binding(0, 4, "threadIdx.x"): for j1_1 in T.serial(0, 32): with T.block(""): C[i, j1_0 * 32 + j1_1] = B[i, j1_0 * 32 + j1_1] + 1.0 @T.prim_func def unified_element_wise_implicit_block(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for blockIdx_x in T.thread_binding(0, 128, "threadIdx.y"): for threadIdx_x in T.thread_binding(0, 4, "threadIdx.x"): for j0_1 in T.serial(0, 32): with T.block(""): B[blockIdx_x, threadIdx_x * 32 + j0_1] = ( A[blockIdx_x, threadIdx_x * 32 + j0_1] * 2.0 ) for j1_1 in T.serial(0, 32): with T.block(""): C[blockIdx_x, threadIdx_x * 32 + j1_1] = ( B[blockIdx_x, threadIdx_x * 32 + j1_1] + 1.0 ) @T.prim_func def match_buffer_with_elem_offset( A: T.Buffer((8, 10, 8), "float32"), I: T.Buffer((4,), "int32"), offset: T.int32 ) -> None: for i in T.thread_binding(0, 4, "blockIdx.x"): for j in range(2): with T.block(): T.writes(A[I[i], offset, j * 4 : j * 4 + 4]) sub_A = T.match_buffer( A[I[i], offset, j * 4 : j * 4 + 4], (4), elem_offset=I[i] * 80 + offset * 8 + j * 4, ) for ji in range(0, 4): sub_A[j * 4 + ji] = 1 @T.prim_func def unified_match_buffer_with_elem_offset( A: T.Buffer((8, 10, 8), "float32"), I: T.Buffer((4,), "int32"), offset: T.int32 ) -> None: for blockIdx_x in T.thread_binding(4, thread="blockIdx.x"): for j in range(2): with T.block(""): T.reads(I[blockIdx_x]) T.writes(A[I[blockIdx_x], offset, j * 4 : j * 4 + 4]) sub_A = T.match_buffer( A[I[blockIdx_x], offset, j * 4 : j * 4 + 4], (4,), elem_offset=I[blockIdx_x] * 80 + offset * 8 + j * 4, ) for ji in range(4): i = T.int32() sub_A_1 = T.Buffer( (4,), data=sub_A.data, elem_offset=I[i] * 80 + offset * 8 + j * 4 ) sub_A_1[j * 4 + ji] = T.float32(1) def test_thread_x(): _check(element_wise_thread_x, unified_element_wise_thread_x) def test_thread_x_different_dtype(): _check(element_wise_thread_x_different_dtype, unified_element_wise_thread_x_different_dtype) def test_env_thread_x(): _check(element_wise_env_thread_x, unified_element_wise_env_thread_x) def test_vthread_x(): _check(element_wise_vthread_x, unified_element_wise_vthread_x) def test_two_thread_x_in_same_kernel_not_equal(): _check_fail(element_wise_two_thread_x_in_same_kernel_not_equal) def test_kernels_with_different_size(): _check( element_wise_kernels_with_different_size, unified_element_wise_kernels_with_different_size ) def test_implicit_block(): _check(element_wise_implicit_block, unified_element_wise_implicit_block) def test_match_buffer_with_elem_offset(): _check(match_buffer_with_elem_offset, unified_match_buffer_with_elem_offset) def test_inner_binding_with_annotation(): @T.prim_func def inner_binding_with_annotation(A: T.Buffer((64,), "float32"), B: T.Buffer((64,), "float32")): for bx in T.thread_binding(32, "blockIdx.x"): for tx in T.thread_binding(2, "threadIdx.x", annotations={"my_annotation": 1}): with T.block("block"): v = T.axis.spatial(64, bx * 2 + tx) B[v] = A[v] @T.prim_func def unified_inner_binding_with_annotation( A: T.Buffer((64,), "float32"), B: T.Buffer((64,), "float32") ): for blockIdx_x in T.thread_binding(32, thread="blockIdx.x"): for threadIdx_x in T.thread_binding(2, thread="threadIdx.x"): for var in T.serial(1, annotations={"my_annotation": 1}): with T.block("block"): v = T.axis.spatial(64, blockIdx_x * 2 + threadIdx_x) T.reads(A[v]) T.writes(B[v]) B[v] = A[v] _check(inner_binding_with_annotation, unified_inner_binding_with_annotation) def test_lower_te(): a = te.placeholder((32, 2, 2)) b = te.compute((32, 2, 2), lambda i, j, k: a[i, j, k] * 2.0) s = te.create_schedule(b.op) s[b].bind(b.op.axis[1], te.thread_axis("threadIdx.x")) s[b].bind(b.op.axis[2], te.thread_axis("threadIdx.x")) orig_mod = tvm.driver.build_module.schedule_to_module(s, [a, b]) mod = tvm.tir.transform.UnifyThreadBinding()(orig_mod) tvm.ir.assert_structural_equal(mod, orig_mod) # UnifyThreadBinding should do nothing on TE if __name__ == "__main__": tvm.testing.main()
14,101
37.113514
100
py
tvm
tvm-main/tests/python/unittest/test_tir_schedule_trace.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring # mypy: ignore-errors import sys import pytest import tvm import tvm.testing from tvm import tir from tvm.script import tir as T from tvm.tir.schedule import BlockRV, Instruction, InstructionKind, LoopRV, Trace # pylint: disable=no-member,invalid-name,unused-variable @T.prim_func def elementwise(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def elementwise_inlined(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = A[vi, vj] * 2.0 + 1.0 # pylint: enable=no-member,invalid-name,unused-variable def _make_get_block(name, output): return Instruction( kind=InstructionKind.get("GetBlock"), inputs=[], attrs=[name, "main"], outputs=[output], ) def _make_get_loops(input, outputs): # pylint: disable=redefined-builtin return Instruction( kind=InstructionKind.get("GetLoops"), inputs=[input], attrs=[], outputs=outputs, ) def _make_compute_inline(input): # pylint: disable=redefined-builtin return Instruction( kind=InstructionKind.get("ComputeInline"), inputs=[input], attrs=[], outputs=[], ) def _make_split(inputs, outputs): # pylint: disable=redefined-builtin return Instruction( kind=InstructionKind.get("Split"), inputs=inputs, attrs=[True], outputs=outputs, ) def _make_enter_postproc(): return Instruction( kind=InstructionKind.get("EnterPostproc"), inputs=[], attrs=[], outputs=[], ) def _make_annotate(block: BlockRV, annotation: str): return Instruction( kind=InstructionKind.get("Annotate"), inputs=[block, annotation], attrs=["meta_schedule.auto_tensorize"], outputs=[], ) def _make_trace_1(b0, l1, l2): # pylint: disable=invalid-name return Trace( insts=[ _make_get_block(name="block", output=b0), _make_get_loops(input=b0, outputs=[l1, l2]), ], decisions={}, ) def _make_trace_2(b0): # pylint: disable=invalid-name return Trace( insts=[ _make_get_block(name="B", output=b0), _make_compute_inline(input=b0), ], decisions={}, ) def _make_trace_3(b0, b1, add_postproc): # pylint: disable=invalid-name if add_postproc: insts = [ _make_get_block(name="B", output=b0), _make_compute_inline(input=b0), _make_get_block(name="C", output=b1), _make_enter_postproc(), _make_compute_inline(input=b1), ] else: insts = [ _make_get_block(name="B", output=b0), _make_compute_inline(input=b0), _make_get_block(name="C", output=b1), ] return Trace(insts=insts, decisions={}) def _make_trace_4(b0, l1, l2, l3): # pylint: disable=invalid-name return Trace( insts=[ _make_get_block(name="B", output=b0), _make_get_loops(input=b0, outputs=[l1]), _make_split([l1, None, 32], [l2, l3]), ], decisions={}, ) def test_trace_construct_1(): trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV()) assert str(trace) == "\n".join( ( "# from tvm import tir", "def apply_trace(sch: tir.Schedule) -> None:", ' b0 = sch.get_block(name="block", func_name="main")', " l1, l2 = sch.get_loops(block=b0)", ) ) assert len(trace.insts) == 2 assert len(trace.decisions) == 0 def test_trace_construct_get_decision_1(): trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV()) assert trace.get_decision(trace.insts[0]) is None assert trace.get_decision(trace.insts[1]) is None def test_trace_construct_append_1(): trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV()) trace.append(inst=_make_get_block("block2", BlockRV())) assert str(trace) == "\n".join( ( "# from tvm import tir", "def apply_trace(sch: tir.Schedule) -> None:", ' b0 = sch.get_block(name="block", func_name="main")', " l1, l2 = sch.get_loops(block=b0)", ' b3 = sch.get_block(name="block2", func_name="main")', ) ) def test_trace_construct_pop_1(): trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV()) last_inst = trace.insts[-1] assert trace.pop().same_as(last_inst) assert str(trace) == "\n".join( ( "# from tvm import tir", "def apply_trace(sch: tir.Schedule) -> None:", ' b0 = sch.get_block(name="block", func_name="main")', ) ) def test_trace_construct_pop_2(): trace = Trace([], {}) assert str(trace) == "\n".join( ( "# from tvm import tir", "def apply_trace(sch: tir.Schedule) -> None:", " pass", ) ) assert trace.pop() is None assert str(trace) == "\n".join( ( "# from tvm import tir", "def apply_trace(sch: tir.Schedule) -> None:", " pass", ) ) def test_trace_apply_to_schedule(): trace = _make_trace_2(BlockRV()) sch = tir.Schedule(elementwise, debug_mask="all") trace.apply_to_schedule(sch, remove_postproc=False, decision_provider=None) tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"]) def test_trace_as_json_1(): trace = _make_trace_1(BlockRV(), LoopRV(), LoopRV()) obj = trace.as_json() assert obj == [ [ ["GetBlock", [], ["block", "main"], ["b0"]], ["GetLoops", ["b0"], [], ["l1", "l2"]], ], [], ] def test_trace_simplified_1(): trace = _make_trace_3(BlockRV(), BlockRV(), add_postproc=True) assert str(trace) == "\n".join( ( "# from tvm import tir", "def apply_trace(sch: tir.Schedule) -> None:", ' b0 = sch.get_block(name="B", func_name="main")', " sch.compute_inline(block=b0)", ' b1 = sch.get_block(name="C", func_name="main")', " sch.enter_postproc()", " sch.compute_inline(block=b1)", ) ) trace = trace.simplified(remove_postproc=True) assert str(trace) == "\n".join( ( "# from tvm import tir", "def apply_trace(sch: tir.Schedule) -> None:", ' b0 = sch.get_block(name="B", func_name="main")', " sch.compute_inline(block=b0)", ) ) def test_trace_simplified_2(): trace = _make_trace_3(BlockRV(), BlockRV(), add_postproc=True) assert str(trace) == "\n".join( ( "# from tvm import tir", "def apply_trace(sch: tir.Schedule) -> None:", ' b0 = sch.get_block(name="B", func_name="main")', " sch.compute_inline(block=b0)", ' b1 = sch.get_block(name="C", func_name="main")', " sch.enter_postproc()", " sch.compute_inline(block=b1)", ) ) trace = trace.simplified(remove_postproc=False) assert str(trace) == "\n".join( ( "# from tvm import tir", "def apply_trace(sch: tir.Schedule) -> None:", ' b0 = sch.get_block(name="B", func_name="main")', " sch.compute_inline(block=b0)", ' b1 = sch.get_block(name="C", func_name="main")', " sch.enter_postproc()", " sch.compute_inline(block=b1)", ) ) def test_trace_simplified_3(): trace = _make_trace_4(BlockRV(), LoopRV(), LoopRV(), LoopRV()).simplified(remove_postproc=False) assert str(trace) == "\n".join( ( "# from tvm import tir", "def apply_trace(sch: tir.Schedule) -> None:", ' b0 = sch.get_block(name="B", func_name="main")', " l1, = sch.get_loops(block=b0)", " l2, l3 = sch.split(loop=l1, factors=[None, 32], preserve_unit_iters=True)", ) ) def test_apply_json_to_schedule_1(): trace = _make_trace_2(BlockRV()) json_obj = trace.as_json() sch = tir.Schedule(elementwise, debug_mask="all") Trace.apply_json_to_schedule(json_obj, sch) tvm.ir.assert_structural_equal(elementwise_inlined, sch.mod["main"]) def test_apply_json_to_schedule_sample_categorical(): var = tir.Var("v", "int32") trace1 = Trace( insts=[ Instruction( kind=InstructionKind.get("SampleCategorical"), inputs=[], attrs=[[tvm.tir.IntImm("int32", 3)], [tvm.tir.FloatImm("float32", 1.0)]], outputs=[var], ) ], decisions={}, ) json = trace1.as_json() assert json == [[["SampleCategorical", [], [[3], [1]], ["v0"]]], []] sch = tir.Schedule(elementwise, debug_mask="all") # As long as the application does not fail, it is fine. Trace.apply_json_to_schedule(json, sch) python_str = sch.trace.as_python() assert len(python_str) == 1 assert python_str[0] == "v0 = sch.sample_categorical(candidates=[3], probs=[1], decision=0)" def _test_apply_annotation_trace_from_json(annotation: str): """Test applying an annotation works without crashing. Designed to handle some previously failing edge cases like the empty string. """ b0 = BlockRV() trace = Trace( insts=[ _make_get_block(name="B", output=b0), _make_annotate(block=b0, annotation=annotation), ], decisions={}, ) json_obj = trace.as_json() sch = tir.Schedule(elementwise, debug_mask="all") Trace.apply_json_to_schedule(json_obj, sch) @T.prim_func def elementwise_expected(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.alloc_buffer((128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("B"): T.block_attr({"meta_schedule.auto_tensorize": annotation}) vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 tvm.ir.assert_structural_equal(elementwise_expected, sch.mod["main"]) def test_apply_annotation_from_json(): # Something reasonable _test_apply_annotation_trace_from_json("SSRSSR") # The empty string _test_apply_annotation_trace_from_json("") # A string of two quotation marks _test_apply_annotation_trace_from_json('""') # A string of one quotation mark _test_apply_annotation_trace_from_json('"') if __name__ == "__main__": tvm.testing.main()
12,194
29.873418
100
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_compact_buffer_region.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm import te from tvm import tir from tvm.script import tir as T class BaseCompactTest: """Base testcase class. The inherit testcase should include: - `before` and `expected` primfunc used to check structural equality for the transformation. - `is_lower_order_free` tag, defaults to True, denotes that we would check (LowerOpaqueBlock . CompactBufferAllocation)(before) == (CompactBufferAllocation . LowerOpaqueBlock)(before) - `is_strict` tag, defaults to True, controls the `is_strict` option of the compaction pass. """ def test_compact(self): is_lower_order_free = getattr(self, "is_lower_order_free", True) is_strict = getattr(self, "is_strict_mode", True) before = tvm.IRModule.from_expr(self.before) expected = tvm.IRModule.from_expr(self.expected) simplify = tvm.transform.Sequential([tir.transform.Simplify(), tir.transform.RemoveNoOp()]) after = simplify(tir.transform.CompactBufferAllocation(is_strict=is_strict)(before)) expected = simplify(expected) try: tvm.ir.assert_structural_equal(after, expected) except ValueError as err: script = tvm.IRModule( {"expected": expected["main"], "after": after["main"], "before": before["main"]} ).script() raise ValueError( f"Function after simplification did not match expected:\n{script}" ) from err if not is_lower_order_free: return lower_before_compact = tir.transform.LowerOpaqueBlock()(before) lower_before_compact = tir.transform.CompactBufferAllocation(is_strict=is_strict)( lower_before_compact ) lower_before_compact = simplify(lower_before_compact) lower_after_compact = tir.transform.LowerOpaqueBlock()(after) lower_after_compact = simplify(lower_after_compact) try: tvm.ir.assert_structural_equal(lower_before_compact, lower_after_compact) except ValueError as err: script = tvm.IRModule( { "lower_before_compact": lower_before_compact["main"], "lower_after_compact": lower_after_compact["main"], "before": before["main"], } ).script() raise ValueError( f"Function after simplification did not match expected:\n{script}" ) from err class TestElemwise(BaseCompactTest): @T.prim_func def before(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer((16, 16), "float32") for j in range(0, 16): with T.block(): T.reads(A[i, j]) T.writes(B[i, j]) B[i, j] = A[i, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[i, j]) T.writes(C[i, j]) C[i, j] = B[i, j] * 2.0 @T.prim_func def expected(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer((1, 16), "float32") for j in range(0, 16): with T.block(): T.reads(A[i, j]) T.writes(B[0, j]) B[0, j] = A[i, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[0, j]) T.writes(C[i, j]) C[i, j] = B[0, j] * 2.0 class TestUnschedulableFunc(BaseCompactTest): @T.prim_func def before(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer((16, 16), "float32") for j in range(0, 16): T.evaluate(T.call_extern("dummy_extern_function", B.data, dtype="int32")) B[i, j] = A[i, j] + 1.0 for j in range(0, 16): C[i, j] = B[i, j] * 2.0 expected = before class TestParamBufferAccess(BaseCompactTest): @T.prim_func def before(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (20, 20), "float32") B = T.match_buffer(c, (20, 20), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(B[i, 0:16]) for j in range(0, 16): with T.block(): T.reads(A[i, j]) T.writes(B[i, j]) B[i, j] = A[i, j] + 1.0 expected = before class TestSharedMem(BaseCompactTest): @T.prim_func def before(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i0 in T.thread_binding(0, 2, thread="blockIdx.x"): for i1 in T.thread_binding(0, 2, thread="vthread"): for i2 in T.thread_binding(0, 4, thread="threadIdx.x"): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, 0:16]) T.writes(C[i0 * 8 + i1 * 4 + i2, 0:16]) B = T.alloc_buffer((16, 16), "float32", scope="shared") for j in range(0, 16): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, j]) T.writes(B[i0 * 8 + i1 * 4 + i2, j]) B[i0 * 8 + i1 * 4 + i2, j] = A[i0 * 8 + i1 * 4 + i2, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[i0 * 8 + i1 * 4 + i2, j]) T.writes(C[i0 * 8 + i1 * 4 + i2, j]) C[i0 * 8 + i1 * 4 + i2, j] = B[i0 * 8 + i1 * 4 + i2, j] * 2.0 @T.prim_func def expected(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i0 in T.thread_binding(0, 2, thread="blockIdx.x"): for i1 in T.thread_binding(0, 2, thread="vthread"): for i2 in T.thread_binding(0, 4, thread="threadIdx.x"): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, 0:16]) T.writes(C[i0 * 8 + i1 * 4 + i2, 0:16]) B = T.alloc_buffer((8, 16), "float32", scope="shared") for j in range(0, 16): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, j]) T.writes(B[i1 * 4 + i2, j]) B[i1 * 4 + i2, j] = A[i0 * 8 + i1 * 4 + i2, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[i1 * 4 + i2, j]) T.writes(C[i0 * 8 + i1 * 4 + i2, j]) C[i0 * 8 + i1 * 4 + i2, j] = B[i1 * 4 + i2, j] * 2.0 class TestWrapMem(BaseCompactTest): @T.prim_func def before(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i0 in T.thread_binding(0, 2, thread="blockIdx.x"): for i1 in T.thread_binding(0, 2, thread="vthread"): for i2 in T.thread_binding(0, 4, thread="threadIdx.x"): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, 0:16]) T.writes(C[i0 * 8 + i1 * 4 + i2, 0:16]) B = T.alloc_buffer((16, 16), "float32", scope="warp") for j in range(0, 16): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, j]) T.writes(B[i0 * 8 + i1 * 4 + i2, j]) B[i0 * 8 + i1 * 4 + i2, j] = A[i0 * 8 + i1 * 4 + i2, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[i0 * 8 + i1 * 4 + i2, j]) T.writes(C[i0 * 8 + i1 * 4 + i2, j]) C[i0 * 8 + i1 * 4 + i2, j] = B[i0 * 8 + i1 * 4 + i2, j] * 2.0 @T.prim_func def expected(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i0 in T.thread_binding(0, 2, thread="blockIdx.x"): for i1 in T.thread_binding(0, 2, thread="vthread"): for i2 in T.thread_binding(0, 4, thread="threadIdx.x"): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, 0:16]) T.writes(C[i0 * 8 + i1 * 4 + i2, 0:16]) B = T.alloc_buffer((4, 16), "float32", scope="warp") for j in range(0, 16): with T.block(): T.reads(A[i0 * 8 + i1 * 4 + i2, j]) T.writes(B[i2, j]) B[i2, j] = A[i0 * 8 + i1 * 4 + i2, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[i2, j]) T.writes(C[i0 * 8 + i1 * 4 + i2, j]) C[i0 * 8 + i1 * 4 + i2, j] = B[i2, j] * 2.0 class TestSymbolic(BaseCompactTest): @T.prim_func def before(a: T.handle, c: T.handle, n: T.int32) -> None: A = T.match_buffer(a, (n * 8,), "float32") C = T.match_buffer(c, (n * 8,), "float32") for i in range(0, n): with T.block(): T.reads(A[i * 8 : i * 8 + 8]) T.writes(C[i * 8 : i * 8 + 8]) B = T.alloc_buffer((n * 8,), "float32") for j in range(0, 8): with T.block(): T.reads(A[i * 8 + j]) T.writes(B[i * 8 + j]) B[i * 8 + j] = A[i * 8 + j] + 1.0 for j in range(0, 8): with T.block(): T.reads(B[i * 8 + j]) T.writes(C[i * 8 + j]) C[i * 8 + j] = B[i * 8 + j] * 2.0 @T.prim_func def expected(a: T.handle, c: T.handle, n: T.int32) -> None: A = T.match_buffer(a, (n * 8,), "float32") C = T.match_buffer(c, (n * 8,), "float32") for i in range(0, n): with T.block(): T.reads(A[i * 8 : i * 8 + 8]) T.writes(C[i * 8 : i * 8 + 8]) B = T.alloc_buffer((8,), "float32") for j in range(0, 8): with T.block(): T.reads(A[i * 8 + j]) T.writes(B[j]) B[j] = A[i * 8 + j] + 1.0 for j in range(0, 8): with T.block(): T.reads(B[j]) T.writes(C[i * 8 + j]) C[i * 8 + j] = B[j] * 2.0 class TestComplexFunc(BaseCompactTest): @T.prim_func def before(a: T.handle, c: T.handle, n: T.int32) -> None: A = T.match_buffer(a, (8, 8), "float32") C = T.match_buffer(c, (8, 8), "float32") for i in range(0, 8): with T.block(): T.reads(A[0, 8]) T.writes(C[0, 8]) B = T.alloc_buffer((8, 8), "float32") for j in range(0, 4): with T.block(): D = T.alloc_buffer((8, 8), "float32") T.reads(A[i, j]) T.writes(B[i, j]) for k in range(4, 8): D[k, j] = 1.0 for k in range(2, 4): B[i, j] = A[i, j] + D[k, j] for j in range(3, 5): with T.block(): T.reads(B[i, j]) T.writes(C[i, j]) C[i, j] = B[i, j] for j in range(6, 8): with T.block(): T.reads(B[i, j]) T.writes(C[i, j]) C[i, j] = B[i, j] @T.prim_func def expected(a: T.handle, c: T.handle, n: T.int32) -> None: A = T.match_buffer(a, (8, 8), "float32") C = T.match_buffer(c, (8, 8), "float32") for i in range(0, 8): with T.block(): T.reads(A[0, 8]) T.writes(C[0, 8]) B = T.alloc_buffer((1, 8), "float32") for j in range(0, 4): with T.block(): D = T.alloc_buffer((6, 1), "float32") T.reads(A[i, j]) T.writes(B[0, j]) for k in range(4, 8): D[k - 2, 0] = 1.0 for k in range(2, 4): B[0, j] = A[i, j] + D[k - 2, 0] for j in range(3, 5): with T.block(): T.reads(B[0, j]) T.writes(C[i, j]) C[i, j] = B[0, j] for j in range(6, 8): with T.block(): T.reads(B[0, j]) T.writes(C[i, j]) C[i, j] = B[0, j] class TestMatchBuffer(BaseCompactTest): is_lower_order_free = False @T.prim_func def before(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16)) C = T.match_buffer(c, (16, 16)) for i in range(0, 16): with T.block(): A0 = T.match_buffer(A[i, 0:16], (16)) C0 = T.match_buffer(C[i, 0:16], (16)) B = T.alloc_buffer((16, 16)) with T.block(): B0 = T.match_buffer(B[i, 0:16], (16)) for j in range(0, 16): with T.block(): A1 = T.match_buffer(A0[j], ()) B1 = T.match_buffer(B0[j], ()) B1[()] = A1[()] + 1.0 for j in range(0, 16): with T.block(): C1 = T.match_buffer(C0[j], ()) B2 = T.match_buffer(B[i, j], ()) C1[()] = B2[()] * 2.0 @T.prim_func def expected(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16)) C = T.match_buffer(c, (16, 16)) for i in range(0, 16): with T.block(): A0 = T.match_buffer(A[i, 0:16], (16)) C0 = T.match_buffer(C[i, 0:16], (16)) B = T.alloc_buffer((1, 16)) with T.block(): B0 = T.match_buffer(B[0, 0:16], (16)) for j in range(0, 16): with T.block(): A1 = T.match_buffer(A0[j], ()) B1 = T.match_buffer(B0[j], ()) B1[()] = A1[()] + 1.0 for j in range(0, 16): with T.block(): C1 = T.match_buffer(C0[j], ()) B2 = T.match_buffer(B[0, j], ()) C1[()] = B2[()] * 2.0 class TestStorageAlign(BaseCompactTest): @T.prim_func def before(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer((16, 16), "float32") for j in range(0, 16): with T.block(): T.reads(A[i, j]) T.writes(B[i, j]) T.block_attr({"buffer_dim_align": [[0, 0, 16, 15]]}) B[i, j] = A[i, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[i, j]) T.writes(C[i, j]) C[i, j] = B[i, j] * 2.0 @T.prim_func def expected(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer((1, 16), strides=(31, 1), dtype="float32") for j in range(0, 16): with T.block(): T.reads(A[i, j]) T.writes(B[0, j]) T.block_attr({"buffer_dim_align": [[0, 0, 16, 15]]}) B[0, j] = A[i, j] + 1.0 for j in range(0, 16): with T.block(): T.reads(B[0, j]) T.writes(C[i, j]) C[i, j] = B[0, j] * 2.0 class TestPaddingPattern(BaseCompactTest): @T.prim_func def before(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (20, 20), "float32") with T.block(): B = T.alloc_buffer((20, 20), dtype="float32") for i, j in T.grid(16, 16): with T.block(): B[i, j] = A[i, j] for i, j in T.grid(20, 20): with T.block(): C[i, j] = T.if_then_else( 2 <= i and i < 18 and 2 <= j and j < 18, B[i - 2, j - 2], 0.0, dtype="float32", ) @T.prim_func def expected(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [16, 16], dtype="float32") C = T.match_buffer(c, [20, 20], dtype="float32") with T.block(): B = T.alloc_buffer([16, 16], dtype="float32") for i, j in T.grid(16, 16): with T.block(): B[i, j] = A[i, j] for i, j in T.grid(20, 20): with T.block(): C[i, j] = T.if_then_else( 2 <= i and i < 18 and 2 <= j and j < 18, B[i - 2, j - 2], 0.0, dtype="float32", ) class TestPaddingPatternInlined(BaseCompactTest): @T.prim_func def before(a: T.handle, b: T.handle) -> None: X = T.match_buffer(a, [224, 224], dtype="float32") Y = T.match_buffer(b, [224, 224], dtype="float32") cache = T.alloc_buffer([224, 224], dtype="float32") for h, w in T.grid(224, 224): with T.block("cache"): cache[h, w] = X[h, w] for h, w, kh, kw in T.grid(224, 224, 3, 3): with T.block("compute"): Y[h, w] = T.max( Y[h, w], T.if_then_else( T.likely(1 <= h + kh, dtype="bool") and T.likely(h + kh < 225, dtype="bool") and T.likely(1 <= w + kw, dtype="bool") and T.likely(w + kw < 225, dtype="bool"), cache[h + kh - 1, w + kw - 1], 0.0, dtype="float32", ), ) @T.prim_func def expected(X: T.Buffer((224, 224), "float32"), Y: T.Buffer((224, 224), "float32")) -> None: cache = T.alloc_buffer([224, 224], dtype="float32") for h, w in T.grid(224, 224): with T.block("cache"): cache[h, w] = X[h, w] for h, w, kh, kw in T.grid(224, 224, 3, 3): with T.block("compute"): Y[h, w] = T.max( Y[h, w], T.if_then_else( T.likely(1 <= h + kh, dtype="bool") and T.likely(h + kh < 225, dtype="bool") and T.likely(1 <= w + kw, dtype="bool") and T.likely(w + kw < 225, dtype="bool"), cache[h + kh - 1, w + kw - 1], 0.0, dtype="float32", ), ) class TestMemAccessInBranch(BaseCompactTest): @T.prim_func def before(a: T.handle) -> None: A = T.match_buffer(a, (224, 224), "float32") with T.block(): B1 = T.alloc_buffer((224, 224), dtype="float32") B2 = T.alloc_buffer((224, 224), dtype="float32") B3 = T.alloc_buffer((224, 224), dtype="float32") B4 = T.alloc_buffer((224, 224), dtype="float32") for i in range(0, 224): for j in range(0, 224): with T.block(): if i < 112 and j < 112: B1[i, j] = A[i, j] * 2.0 else: B2[i, j] = A[i, j] + 3.0 for i in range(0, 224): for j in range(0, 224): with T.block(): if i < 112 or j < 112: B3[i, j] = A[i, j] * 2.0 else: B4[i, j] = A[i, j] + 3.0 @T.prim_func def expected(a: T.handle) -> None: A = T.match_buffer(a, [224, 224], dtype="float32") with T.block(): B1 = T.alloc_buffer([112, 112], dtype="float32") B2 = T.alloc_buffer([224, 224], dtype="float32") B3 = T.alloc_buffer([224, 224], dtype="float32") B4 = T.alloc_buffer([112, 112], dtype="float32") for i, j in T.grid(224, 224): with T.block(): if i < 112 and j < 112: B1[i, j] = A[i, j] * 2.0 else: B2[i, j] = A[i, j] + 3.0 for i, j in T.grid(224, 224): with T.block(): if i < 112 or j < 112: B3[i, j] = A[i, j] * 2.0 else: B4[i - 112, j - 112] = A[i, j] + 3.0 class TestAnnotatedOpaqueAccess(BaseCompactTest): is_lower_order_free = False @T.prim_func def before(a: T.handle) -> None: A = T.match_buffer(a, (1024,), "float32") with T.block(): B = T.alloc_buffer((1024,), dtype="float32") C = T.alloc_buffer((1024,), dtype="float32") for i in range(0, 512): with T.block(): # no annotation, opaque access will cover full region T.reads([]) T.writes([]) T.evaluate( T.call_extern("opaque_extern_function", A.data, B.data, dtype="int32") ) B[i] = A[i] with T.block(): # treat opaque access only access annotated regions, even if # they are not compatible with actual buffer accesses. T.reads([B[i]]) T.writes([C[i : i + 9]]) T.evaluate( T.call_extern("opaque_extern_function", B.data, C.data, dtype="int32") ) C[i] = B[i] @T.prim_func def expected(a: T.handle) -> None: A = T.match_buffer(a, (1024,), "float32") with T.block(): B = T.alloc_buffer((1024,), dtype="float32") C = T.alloc_buffer((520,), dtype="float32") for i in range(0, 512): with T.block(): # no annotation, opaque access will cover full region T.reads([]) T.writes([]) T.evaluate( T.call_extern("opaque_extern_function", A.data, B.data, dtype="int32") ) B[i] = A[i] with T.block(): # treat opaque access only access annotated regions, even if # they are not compatible with actual buffer accesses. T.reads([B[i]]) T.writes([C[i : i + 9]]) T.evaluate( T.call_extern("opaque_extern_function", B.data, C.data, dtype="int32") ) C[i] = B[i] class TestSparseReadCache(BaseCompactTest): @T.prim_func def before( A_data: T.Buffer((819,), "float32"), B: T.Buffer((128,), "float32"), A_indptr: T.Buffer((129,), "int32"), ) -> None: for i in T.serial(128): with T.block("rowsum_outer"): T.reads( A_indptr[i : i + 1], A_data[A_indptr[i] + 0 : A_indptr[i] + (A_indptr[i + 1] - A_indptr[i])], ) T.writes(B[i]) with T.block("rowsum_init"): T.reads() T.writes(B[i]) B[i] = T.float32(0) for k in T.serial(A_indptr[i + 1] - A_indptr[i]): with T.block(): T.reads(A_indptr[i], A_data[A_indptr[i] + k], B[i]) T.writes(B[i]) A_data_local = T.alloc_buffer([819], dtype="float32", scope="local") with T.block("A_data_cache_read"): T.reads(A_indptr[i], A_data[A_indptr[i] + k]) T.writes(A_data_local[A_indptr[i] + k]) A_data_local[A_indptr[i] + k] = A_data[A_indptr[i] + k] with T.block("rowsum_inner"): T.reads(B[i], A_indptr[i], A_data[A_indptr[i] + k]) T.writes(B[i]) B[i] = B[i] + A_data_local[A_indptr[i] + k] @T.prim_func def expected( A_data: T.Buffer((819,), "float32"), B: T.Buffer((128,), "float32"), A_indptr: T.Buffer((129,), "int32"), ) -> None: for i in T.serial(128): with T.block("rowsum_outer"): T.reads( A_indptr[i : i + 1], A_data[A_indptr[i] + 0 : A_indptr[i] + 0 + (A_indptr[i + 1] - A_indptr[i])], ) T.writes(B[i]) with T.block("rowsum_init"): T.reads() T.writes(B[i]) B[i] = T.float32(0) for k in T.serial(A_indptr[i + 1] - A_indptr[i]): with T.block(): T.reads(A_indptr[i], A_data[A_indptr[i] + k], B[i]) T.writes(B[i]) A_data_local = T.alloc_buffer([1], dtype="float32", scope="local") with T.block("A_data_cache_read"): T.reads(A_indptr[i], A_data[A_indptr[i] + k]) T.writes(A_data_local[T.min(A_indptr[i] + k, 0)]) A_data_local[T.min(A_indptr[i] + k, 0)] = A_data[A_indptr[i] + k] with T.block("rowsum_inner"): T.reads(B[i], A_indptr[i], A_data[A_indptr[i] + k]) T.writes(B[i]) B[i] = B[i] + A_data_local[T.min(A_indptr[i] + k, 0)] class TestDataDependentRegion(BaseCompactTest): """Partial code of NMS, the `argsort_nms_cpu`'s region depends on inner allocated buffer `nkeep`'s value, thus the buffer should not be compacted with data dependent region extent.""" @T.prim_func def before( p0: T.Buffer((30,), "float32"), p1: T.Buffer((1,), "int32"), hybrid_nms: T.Buffer((30,), "float32"), ): argsort_nms_cpu = T.decl_buffer([5], "int32", scope="global") for i in range(1): nkeep = T.decl_buffer([1], "int32", scope="global") if 0 < p1[i]: nkeep[0] = p1[i] if 2 < nkeep[0]: nkeep[0] = 2 for j in T.parallel(nkeep[0]): for k in range(6): hybrid_nms[i * 30 + j * 6 + k] = p0[ i * 30 + argsort_nms_cpu[i * 5 + j] * 6 + k ] hybrid_nms[i * 5 + j] = argsort_nms_cpu[i * 5 + j] if 2 < p1[i]: for j in T.parallel(p1[i] - nkeep[0]): for k in range(6): hybrid_nms[i * 30 + j * 6 + nkeep[0] * 6 + k] = T.float32(-1) hybrid_nms[i * 5 + j + nkeep[0]] = -1 expected = before class TestNarrowShape(BaseCompactTest): @T.prim_func def before(A: T.Buffer((10,), "float32"), B: T.Buffer((10,), "float32")) -> None: B_cache = T.alloc_buffer(10, "float32") for j in T.serial(3): for k in T.serial(4): with T.block("B_cache"): T.where(j * 4 + k < 10) B_cache[j * 4 + k] = B[j] for i in T.serial(10): A[i] = B_cache[i] + T.float32(1) @T.prim_func def expected(A: T.Buffer((10,), "float32"), B: T.Buffer((10,), "float32")) -> None: B_cache = T.alloc_buffer([10], dtype="float32") for j, k in T.grid(3, 4): with T.block("B_cache"): T.where(j * 4 + k < 10) T.reads(B[j]) T.writes(B_cache[j * 4 + k]) B_cache[j * 4 + k] = B[j] for i in T.serial(10): A[i] = B_cache[i] + T.float32(1) class TestLetBinding(BaseCompactTest): @T.prim_func def before(): A = T.alloc_buffer((64, 8), "float32") B = T.alloc_buffer((64, 8), "float32") C = T.alloc_buffer((8, 8), "float32") for rk in range(64): for rii, rjj in T.grid(8, 8): C[rii, rjj] = T.float32(0) for riijj in T.serial(8 * 8): rii: T.int32 = riijj // 8 rjj: T.int32 = riijj % 8 C[rii, rjj] += A[rk, rii] * B[rk, rjj] expected = before class TestNonIndexLetBinding(BaseCompactTest): @T.prim_func def before(): A = T.alloc_buffer((64), "float32") x1 = T.call_extern("get", dtype="float16") x2 = T.call_extern("get", dtype="float32") x3 = T.call_extern("get", dtype="float64") x4 = T.call_extern("get", dtype="uint8") x5 = T.call_extern("get", dtype="int32x16") x6 = T.call_extern("get", dtype="handle") x7 = T.call_extern("get", dtype="") for rk in range(64): A[rk] = T.call_extern("load_ptr", x1, x2, x3, x4, x5, x6, x7, dtype="float32") expected = before class TestSpatialTiledPadPooling(BaseCompactTest): @T.prim_func def before(X: T.Buffer((64, 112, 112), "int32"), Y: T.Buffer((64, 56, 56), "int32")) -> None: for h_o, w_o in T.grid(14, 14): with T.block(): X_cache = T.alloc_buffer([112, 112, 64], dtype="int32") for ax0, ax1, ax2 in T.grid(64, 9, 9): with T.block("cache"): T.where(1 <= h_o * 8 + ax1 and 1 <= w_o * 8 + ax2) T.reads(X[ax0, h_o * 8 - 1 + ax1, w_o * 8 - 1 + ax2]) T.writes(X_cache[h_o * 8 - 1 + ax1, w_o * 8 - 1 + ax2, ax0]) X_cache[h_o * 8 - 1 + ax1, w_o * 8 - 1 + ax2, ax0] = X[ ax0, h_o * 8 - 1 + ax1, w_o * 8 - 1 + ax2 ] for h_i, w_i, kh, kw, c in T.grid(4, 4, 3, 3, 64): with T.block("compute"): T.reads( X_cache[(h_o * 4 + h_i) * 2 + kh - 1, (w_o * 4 + w_i) * 2 + kw - 1, c] ) T.writes(Y[h_o * 4 + h_i, w_o * 4 + w_i, c]) if kh == 0 and kw == 0: Y[h_o * 4 + h_i, w_o * 4 + w_i, c] = 0 Y[h_o * 4 + h_i, w_o * 4 + w_i, c] = T.max( Y[h_o * 4 + h_i, w_o * 4 + w_i, c], T.if_then_else( T.likely(1 <= (h_o * 4 + h_i) * 2 + kh, dtype="bool") and T.likely((h_o * 4 + h_i) * 2 + kh < 113, dtype="bool") and T.likely(1 <= (w_o * 4 + w_i) * 2 + kw, dtype="bool") and T.likely((w_o * 4 + w_i) * 2 + kw < 113, dtype="bool"), X_cache[ (h_o * 4 + h_i) * 2 + kh - 1, (w_o * 4 + w_i) * 2 + kw - 1, c, ], 0, dtype="int32", ), ) @T.prim_func def expected(X: T.Buffer((64, 112, 112), "int32"), Y: T.Buffer((64, 56, 56), "int32")) -> None: for h_o, w_o in T.grid(14, 14): with T.block(): T.reads(X[0:64, h_o * 8 - 1 : h_o * 8 + 8, w_o * 8 - 1 : w_o * 8 + 8]) T.writes(Y[h_o * 4 : h_o * 4 + 4, w_o * 4 : w_o * 4 + 4, 0:64]) X_cache = T.alloc_buffer([9, 9, 64], dtype="int32") for ax0, ax1, ax2 in T.grid(64, 9, 9): with T.block("cache"): T.where(1 <= h_o * 8 + ax1 and 1 <= w_o * 8 + ax2) T.reads(X[ax0, h_o * 8 + ax1 - 1, w_o * 8 + ax2 - 1]) T.writes( X_cache[ h_o * 8 + ax1 - T.max(0, h_o * 8 - 1) - 1, w_o * 8 + ax2 - T.max(0, w_o * 8 - 1) - 1, ax0, ] ) X_cache[ h_o * 8 + ax1 - T.max(0, h_o * 8 - 1) - 1, w_o * 8 + ax2 - T.max(0, w_o * 8 - 1) - 1, ax0, ] = X[ax0, h_o * 8 + ax1 - 1, w_o * 8 + ax2 - 1] for h_i, w_i, kh, kw, c in T.grid(4, 4, 3, 3, 64): with T.block("compute"): T.reads( X_cache[ h_o * 8 + h_i * 2 + kh - T.max(0, h_o * 8 - 1) - 1, w_o * 8 + w_i * 2 + kw - T.max(0, w_o * 8 - 1) - 1, c, ] ) T.writes(Y[h_o * 4 + h_i, w_o * 4 + w_i, c]) if kh == 0 and kw == 0: Y[h_o * 4 + h_i, w_o * 4 + w_i, c] = 0 Y[h_o * 4 + h_i, w_o * 4 + w_i, c] = T.max( Y[h_o * 4 + h_i, w_o * 4 + w_i, c], T.if_then_else( T.likely(1 <= h_o * 8 + h_i * 2 + kh, dtype="bool") and T.likely(1 <= w_o * 8 + w_i * 2 + kw, dtype="bool"), X_cache[ h_o * 8 + h_i * 2 + kh - T.max(0, h_o * 8 - 1) - 1, w_o * 8 + w_i * 2 + kw - T.max(0, w_o * 8 - 1) - 1, c, ], 0, dtype="int32", ), ) class TestComplexCase1(BaseCompactTest): """Meta-schedule matmul case for compact shared A, B matrix""" # fmt: off @T.prim_func def before(A: T.Buffer((960, 770), "float32"), B: T.Buffer((770, 2304), "float32"), C: T.Buffer((960, 2304), "float32")) -> None: for bx in T.thread_binding(144, thread="blockIdx.x"): for vx in T.thread_binding(2, thread="vthread.x"): for tx_p in T.thread_binding(256, thread="threadIdx.x"): with T.block(): for k_0 in T.serial(193): with T.block(): A_shared = T.alloc_buffer([960, 770], dtype="float32", scope="shared") B_shared = T.alloc_buffer([770, 2304], dtype="float32", scope="shared") for _u in T.serial(1): for tx in T.thread_binding(256, thread="threadIdx.x"): for vec in T.vectorized(3): with T.block("A_shared"): T.where(bx // 18 * 128 + ((_u * 256 + tx) * 3 + vec) // 4 < 960 and k_0 * 4 + ((_u * 256 + tx) * 3 + vec) % 4 < 770 and (_u * 256 + tx) * 3 + vec < 512) A_shared[bx // 18 * 128 + (_u * 768 + tx * 3 + vec) // 4, k_0 * 4 + (_u * 768 + tx * 3 + vec) % 4] = A[bx // 18 * 128 + (_u * 768 + tx * 3 + vec) // 4, k_0 * 4 + (_u * 768 + tx * 3 + vec) % 4] for _u in T.serial(1): for tx in T.thread_binding(256, thread="threadIdx.x"): for vec in T.vectorized(4): with T.block("B_shared"): T.where(k_0 * 4 + ((_u * 256 + tx) * 4 + vec) // 128 < 770 and (_u * 256 + tx) * 4 + vec < 512) B_shared[k_0 * 4 + (_u * 1024 + tx * 4 + vec) // 128, bx % 18 * 128 + (_u * 1024 + tx * 4 + vec) % 128] = B[k_0 * 4 + (_u * 1024 + tx * 4 + vec) // 128, bx % 18 * 128 + (_u * 1024 + tx * 4 + vec) % 128] for k_1, i_3, j_3, k_2, i_4, j_4 in T.grid(1, 8, 1, 4, 2, 2): with T.block("update_update"): C[(((bx // 18 + 0) * 8 + tx_p // 32) * 8 + i_3) * 2 + i_4, ((bx % 18 * 2 + vx % 2) * 32 + tx_p % 32 + j_3) * 2 + j_4] = C[(((bx // 18 + 0) * 8 + tx_p // 32) * 8 + i_3) * 2 + i_4, ((bx % 18 * 2 + vx % 2) * 32 + tx_p % 32 + j_3) * 2 + j_4] + A_shared[(((bx // 18 + 0) * 8 + tx_p // 32) * 8 + i_3) * 2 + i_4, (k_0 + k_1) * 4 + k_2] * B_shared[(k_0 + k_1) * 4 + k_2, ((bx % 18 * 2 + vx % 2) * 32 + tx_p % 32 + j_3) * 2 + j_4] @T.prim_func def expected(A: T.Buffer((960, 770), "float32"), B: T.Buffer((770, 2304), "float32"), C: T.Buffer((960, 2304), "float32")) -> None: for bx in T.thread_binding(144, thread="blockIdx.x"): for vx in T.thread_binding(2, thread="vthread.x"): for tx_p in T.thread_binding(256, thread="threadIdx.x"): with T.block(): for k_0 in T.serial(193): with T.block(): A_shared = T.alloc_buffer([128, 4], dtype="float32", scope="shared") B_shared = T.alloc_buffer([4, 128], dtype="float32", scope="shared") for v_u in T.serial(1): for tx in T.thread_binding(256, thread="threadIdx.x"): for vec in T.vectorized(3): with T.block("A_shared"): T.where(bx // 18 * 128 + (tx * 3 + vec) // 4 < 960 and k_0 * 4 + (tx * 3 + vec) % 4 < 770 and tx * 3 + vec < 512) A_shared[(tx * 3 + vec) // 4, (tx * 3 + vec) % 4] = A[bx // 18 * 128 + (tx * 3 + vec) // 4, k_0 * 4 + (tx * 3 + vec) % 4] for v_u in T.serial(1): for tx in T.thread_binding(256, thread="threadIdx.x"): for vec in T.vectorized(4): with T.block("B_shared"): T.where(k_0 * 4 + tx // 32 < 770 and tx * 4 + vec < 512) B_shared[tx // 32, tx % 32 * 4 + vec] = B[k_0 * 4 + tx // 32, bx % 18 * 128 + tx % 32 * 4 + vec] for k_1, i_3, j_3, k_2, i_4, j_4 in T.grid(1, 8, 1, 4, 2, 2): with T.block("update_update"): C[bx // 18 * 128 + tx_p // 32 * 16 + i_3 * 2 + i_4, bx % 18 * 128 + vx * 64 + tx_p % 32 * 2 + j_4] = C[bx // 18 * 128 + tx_p // 32 * 16 + i_3 * 2 + i_4, bx % 18 * 128 + vx * 64 + tx_p % 32 * 2 + j_4] + A_shared[tx_p // 32 * 16 + i_3 * 2 + i_4, k_2] * B_shared[k_2, vx * 64 + tx_p % 32 * 2 + j_4] # fmt: on class TestDependentBufferIndices(BaseCompactTest): """Check the upper bound on different indices could be independently estimated.""" @T.prim_func def before(): """This is a diagnal buffer access pattern""" for i in range(8): with T.block(): A = T.alloc_buffer((256, 256), "float32") for j, k in T.grid(8, 8): with T.block(): T.where(j * 8 + k < 60) A[i * 64 + j * 8 + k, i * 64 + j * 8 + k] = 1.0 @T.prim_func def expected() -> None: for i in T.serial(8): with T.block(): A = T.alloc_buffer([60, 60], dtype="float32") for j, k in T.grid(8, 8): with T.block(): T.where(j * 8 + k < 60) A[j * 8 + k, j * 8 + k] = 1.0 class TestDependentBufferIndicesOfPackedMatmul(BaseCompactTest): """Check the outer dimension of the packed M-dim should be compacted to 1 wrt split condition.""" @T.prim_func def before( A: T.Buffer((1020, 64), "float32"), B: T.Buffer((1000, 64), "float32"), C: T.Buffer((1020, 1000), "float32"), ): for i0, i1 in T.grid(4, 1): with T.block(): C_local2 = T.alloc_buffer([4, 1, 16, 1000, 16], dtype="float32", scope="local") C_local1 = T.alloc_buffer([1020, 1000], dtype="float32", scope="local") for ax0, ax1, ax2 in T.grid(255, 1000, 64): with T.block("matmul"): if ax2 == 0: C_local1[i0 * 255 + ax0, ax1] = 0 C_local1[i0 * 255 + ax0, ax1] = ( C_local1[i0 * 255 + ax0, ax1] + A[i0 * 255 + ax0, ax2] * B[ax1, ax2] ) for ax0, ax1 in T.grid(255, 1000): with T.block("st1"): C_local2[ (i0 * 255 + ax0) // 255, 0, (i0 * 255 + ax0) % 255 // 16, ax1, (i0 * 255 + ax0) % 255 % 16, ] = C_local1[i0 * 255 + ax0, ax1] for ax0, ax1, ax2 in T.grid(16, 16, 1000): with T.block("st2"): T.where(ax0 * 16 + ax1 < 255) C[i0 * 255 + (ax0 * 16 + ax1), i1 * 1000 + ax2] = C_local2[ (i0 * 255 + ax0 * 16 + ax1) // 255, 0, (i0 * 255 + ax0 * 16 + ax1) % 255 // 16, i1 * 1000 + ax2, (i0 * 255 + ax0 * 16 + ax1) % 255 % 16, ] @T.prim_func def expected( A: T.Buffer((1020, 64), "float32"), B: T.Buffer((1000, 64), "float32"), C: T.Buffer((1020, 1000), "float32"), ) -> None: for i0, i1 in T.grid(4, 1): with T.block(): C_local2 = T.alloc_buffer([1, 1, 15, 1000, 16], dtype="float32", scope="local") C_local1 = T.alloc_buffer([255, 1000], dtype="float32", scope="local") for ax0, ax1, ax2 in T.grid(255, 1000, 64): with T.block("matmul"): if ax2 == 0: C_local1[ax0, ax1] = 0 C_local1[ax0, ax1] = ( C_local1[ax0, ax1] + A[i0 * 255 + ax0, ax2] * B[ax1, ax2] ) for ax0, ax1 in T.grid(255, 1000): with T.block("st1"): C_local2[0, 0, ax0 // 16, ax1, ax0 % 16] = C_local1[ax0, ax1] for ax0, ax1, ax2 in T.grid(16, 16, 1000): with T.block("st2"): T.where(ax0 * 16 + ax1 < 255) C[i0 * 255 + ax0 * 16 + ax1, ax2] = C_local2[ (ax0 * 16 + ax1) // 255, 0, (ax0 * 16 + ax1) % 255 // 16, ax2, (ax0 * 16 + ax1) % 255 % 16, ] class TestTileAwareCompaction(BaseCompactTest): """Each partitioned tile could be independently compacted.""" # it is not an opaque block case intentionally is_lower_order_free = False @T.prim_func def before( A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32"), ): for i_0 in range(5, annotations={"pragma_loop_partition_hint": 1}): for j_0 in range(5, annotations={"pragma_loop_partition_hint": 1}): A_local = T.decl_buffer((26, 128), scope="local") B_local = T.decl_buffer((128, 26), scope="local") C_local = T.decl_buffer((26, 26), scope="local") for ax0, ax1 in T.grid(26, 128): if i_0 * 26 + ax0 < 128: A_local[ax0, ax1] = A[i_0 * 26 + ax0, ax1] for ax0, ax1 in T.grid(128, 26): if j_0 * 26 + ax1 < 128: B_local[ax0, ax1] = B[ax0, j_0 * 26 + ax1] for i_1, j_1, k in T.grid(26, 26, 128): if i_0 * 26 + i_1 < 128 and j_0 * 26 + j_1 < 128: if k == 0: C_local[i_1, j_1] = T.float32(0) C_local[i_1, j_1] = C_local[i_1, j_1] + A_local[i_1, k] * B_local[k, j_1] for ax0, ax1 in T.grid(26, 26): if i_0 * 26 + ax0 < 128 and j_0 * 26 + ax1 < 128: C[i_0 * 26 + ax0, j_0 * 26 + ax1] = C_local[ax0, ax1] # Get partitioned workload to compact before_mod = tvm.IRModule.from_expr(before) with tvm.transform.PassContext(config={"tir.LoopPartition": {"partition_const_loop": True}}): before_mod = tvm.tir.transform.LowerOpaqueBlock()(before_mod) before_mod = tvm.tir.transform.LoopPartition()(before_mod) before = before_mod["main"] @T.prim_func def expected( A: T.Buffer((128, 128), "float32"), B: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32"), ): for i_0 in range(4): for j_0 in range(4): A_local_tile0 = T.decl_buffer((26, 128), scope="local") B_local_tile0 = T.decl_buffer((128, 26), scope="local") C_local_tile0 = T.decl_buffer((26, 26), scope="local") for ax0, ax1 in T.grid(26, 128): A_local_tile0[ax0, ax1] = A[i_0 * 26 + ax0, ax1] for ax0, ax1 in T.grid(128, 26): B_local_tile0[ax0, ax1] = B[ax0, j_0 * 26 + ax1] for i_1, j_1, k in T.grid(26, 26, 128): if k == 0: C_local_tile0[i_1, j_1] = T.float32(0) C_local_tile0[i_1, j_1] = ( C_local_tile0[i_1, j_1] + A_local_tile0[i_1, k] * B_local_tile0[k, j_1] ) for ax0, ax1 in T.grid(26, 26): C[i_0 * 26 + ax0, j_0 * 26 + ax1] = C_local_tile0[ax0, ax1] A_local_tile1 = T.decl_buffer((26, 128), scope="local") B_local_tile1 = T.decl_buffer((128, 24), scope="local") C_local_tile1 = T.decl_buffer((26, 24), scope="local") for ax0, ax1 in T.grid(26, 128): A_local_tile1[ax0, ax1] = A[i_0 * 26 + ax0, ax1] for ax0, ax1 in T.grid(128, 26): if ax1 < 24: B_local_tile1[ax0, ax1] = B[ax0, ax1 + 104] for i_1, j_1, k in T.grid(26, 26, 128): if j_1 < 24: if k == 0: C_local_tile1[i_1, j_1] = T.float32(0) C_local_tile1[i_1, j_1] = ( C_local_tile1[i_1, j_1] + A_local_tile1[i_1, k] * B_local_tile1[k, j_1] ) for ax0, ax1 in T.grid(26, 26): if ax1 < 24: C[i_0 * 26 + ax0, ax1 + 104] = C_local_tile1[ax0, ax1] for j_0 in range(4): A_local_tile2 = T.decl_buffer((24, 128), scope="local") B_local_tile2 = T.decl_buffer((128, 26), scope="local") C_local_tile2 = T.decl_buffer((24, 26), scope="local") for ax0, ax1 in T.grid(26, 128): if ax0 < 24: A_local_tile2[ax0, ax1] = A[ax0 + 104, ax1] for ax0, ax1 in T.grid(128, 26): B_local_tile2[ax0, ax1] = B[ax0, j_0 * 26 + ax1] for i_1, j_1, k in T.grid(26, 26, 128): if i_1 < 24: if k == 0: C_local_tile2[i_1, j_1] = T.float32(0) C_local_tile2[i_1, j_1] = ( C_local_tile2[i_1, j_1] + A_local_tile2[i_1, k] * B_local_tile2[k, j_1] ) for ax0, ax1 in T.grid(26, 26): if ax0 < 24: C[ax0 + 104, j_0 * 26 + ax1] = C_local_tile2[ax0, ax1] A_local_tile3 = T.decl_buffer((24, 128), scope="local") B_local_tile3 = T.decl_buffer((128, 24), scope="local") C_local_tile3 = T.decl_buffer((24, 24), scope="local") for ax0, ax1 in T.grid(26, 128): if ax0 < 24: A_local_tile3[ax0, ax1] = A[ax0 + 104, ax1] for ax0, ax1 in T.grid(128, 26): if ax1 < 24: B_local_tile3[ax0, ax1] = B[ax0, ax1 + 104] for i_1, j_1, k in T.grid(26, 26, 128): if i_1 < 24 and j_1 < 24: if k == 0: C_local_tile3[i_1, j_1] = T.float32(0) C_local_tile3[i_1, j_1] = ( C_local_tile3[i_1, j_1] + A_local_tile3[i_1, k] * B_local_tile3[k, j_1] ) for ax0, ax1 in T.grid(26, 26): if ax0 < 24 and ax1 < 24: C[ax0 + 104, ax1 + 104] = C_local_tile3[ax0, ax1] class TestNonStrictCompactionForPaddedMatmul(BaseCompactTest): is_strict_mode = False @T.prim_func def before( A: T.Buffer((127, 127), "float32"), B: T.Buffer((127, 127), "float32"), C: T.Buffer((127, 127), "float32"), ): """A mock workload where the intermediate buffer allocation is not enought originally""" for i_0, j_0 in T.grid(4, 4): with T.block(""): T.reads(A[i_0 * 32 : i_0 * 32 + 32, 0:128], B[0:128, j_0 * 32 : j_0 * 32 + 32]) T.writes(C[i_0 * 32 : i_0 * 32 + 32, j_0 * 32 : j_0 * 32 + 32]) A_local = T.alloc_buffer((127, 127), scope="local") B_local = T.alloc_buffer((127, 127), scope="local") C_local = T.alloc_buffer((127, 127), scope="local") for ax0, ax1 in T.grid(32, 128): with T.block("A_local"): A_local[i_0 * 32 + ax0, ax1] = T.if_then_else( i_0 * 32 + ax0 < 127, A[i_0 * 32 + ax0, ax1], 0.0 ) for ax0, ax1 in T.grid(128, 32): with T.block("B_local"): B_local[ax0, j_0 * 32 + ax1] = T.if_then_else( j_0 * 32 + ax1 < 127, B[ax0, j_0 * 32 + ax1], 0.0 ) for i_1, j_1, k in T.grid(32, 32, 128): with T.block("compute"): T.where(i_0 * 32 + i_1 < 127 and j_0 * 32 + j_1 < 127) if k == 0: C_local[i_0 * 32 + i_1, j_0 * 32 + j_1] = T.float32(0) C_local[i_0 * 32 + i_1, j_0 * 32 + j_1] = ( C_local[i_0 * 32 + i_1, j_0 * 32 + j_1] + A_local[i_0 * 32 + i_1, k] * B_local[k, j_0 * 32 + j_1] ) for ax0, ax1 in T.grid(32, 32): with T.block("C_local"): T.where(i_0 * 32 + ax0 < 127 and j_0 * 32 + ax1 < 127) C[i_0 * 32 + ax0, j_0 * 32 + ax1] = C_local[i_0 * 32 + ax0, j_0 * 32 + ax1] @T.prim_func def expected( A: T.Buffer((127, 127), "float32"), B: T.Buffer((127, 127), "float32"), C: T.Buffer((127, 127), "float32"), ): for i_0, j_0 in T.grid(4, 4): with T.block(""): T.reads(A[i_0 * 32 : i_0 * 32 + 32, 0:128], B[0:128, j_0 * 32 : j_0 * 32 + 32]) T.writes(C[i_0 * 32 : i_0 * 32 + 32, j_0 * 32 : j_0 * 32 + 32]) A_local = T.alloc_buffer((32, 128), scope="local") B_local = T.alloc_buffer((128, 32), scope="local") C_local = T.alloc_buffer((32, 32), scope="local") for ax0, ax1 in T.grid(32, 128): with T.block("A_local"): A_local[ax0, ax1] = T.if_then_else( i_0 * 32 + ax0 < 127, A[i_0 * 32 + ax0, ax1], T.float32(0) ) for ax0, ax1 in T.grid(128, 32): with T.block("B_local"): B_local[ax0, ax1] = T.if_then_else( j_0 * 32 + ax1 < 127, B[ax0, j_0 * 32 + ax1], T.float32(0) ) for i_1, j_1, k in T.grid(32, 32, 128): with T.block("compute"): T.where(i_0 * 32 + i_1 < 127 and j_0 * 32 + j_1 < 127) if k == 0: C_local[i_1, j_1] = T.float32(0) C_local[i_1, j_1] = C_local[i_1, j_1] + A_local[i_1, k] * B_local[k, j_1] for ax0, ax1 in T.grid(32, 32): with T.block("C_local"): T.where(i_0 * 32 + ax0 < 127 and j_0 * 32 + ax1 < 127) C[i_0 * 32 + ax0, j_0 * 32 + ax1] = C_local[ax0, ax1] class TestNotCompactAliasBuffer(BaseCompactTest): # it is not testcase on block form is_lower_order_free = False @T.prim_func def before(): """Partially accessed buffer, but should not compact because existence of aliasing buffer B.""" data = T.allocate([1024], "int8") A = T.decl_buffer([1024], "int8", data) B = T.decl_buffer([512], "float16", data) for i in range(10): A[i] = A[i] + T.int8(1) for i in range(10): B[i] = B[i] + T.float16(1) expected = before class TestNotCompactBufferWithDifferentDtype(BaseCompactTest): # it is not testcase on block form is_lower_order_free = False @T.prim_func def before(): """Partially accessed buffer, but should not compact because existence of aliasing buffer B.""" data = T.allocate([1024], "int8") A = T.decl_buffer([256], "int32", data) for i in range(10): A[i] = A[i] + 1 expected = before class TestNonBoolCondition(BaseCompactTest): # it is not testcase on block form is_lower_order_free = False @T.prim_func def before(): data = T.allocate([12], "int32") A = T.Buffer([12], "int32", data) for i in range(10): if i: A[i] = A[i] + 1 @T.prim_func def expected(): data = T.allocate([9], "int32") A = T.Buffer([9], "int32", data) for i in range(10): if i: A[i - 1] = A[i - 1] + 1 def test_lower_te(): x = te.placeholder((1,)) y = te.compute((1,), lambda i: x[i] + 2) s = te.create_schedule(y.op) orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y]) mod = tvm.tir.transform.CompactBufferAllocation()(orig_mod) tvm.ir.assert_structural_equal(mod, orig_mod) # CompactBufferAllocation should do nothing on TE class TestCompactSymbolicBound0: """Test symbolic bound that get compacted to constant""" @T.prim_func def before(x: T.handle, y: T.handle, n: T.int64): X = T.match_buffer(x, (T.int64(8), n * T.int64(32))) Y = T.match_buffer(y, (T.int64(8), n * T.int64(32))) for i, k_0 in T.grid(T.int64(8), n): with T.block(""): X_global = T.alloc_buffer((T.int64(8), n * T.int64(32))) for ax0 in range(T.int64(32)): with T.block("X_global"): X_global[i, k_0 * T.int64(32) + ax0] = X[i, k_0 * T.int64(32) + ax0] for k_1 in range(T.int64(32)): with T.block("Y"): Y[i, k_0 * T.int64(32) + k_1] = X_global[i, k_0 * T.int64(32) + k_1] @T.prim_func def expected(x: T.handle, y: T.handle, n: T.int64): X = T.match_buffer(x, (T.int64(8), n * T.int64(32))) Y = T.match_buffer(y, (T.int64(8), n * T.int64(32))) for i, k_0 in T.grid(T.int64(8), n): with T.block(""): X_global = T.alloc_buffer((T.int64(1), T.int64(32))) for ax0 in range(T.int64(32)): with T.block("X_global"): X_global[T.int64(0), ax0] = X[i, k_0 * T.int64(32) + ax0] for k_1 in range(T.int64(32)): with T.block("Y"): Y[i, k_0 * T.int64(32) + k_1] = X_global[T.int64(0), k_1] class TestCompactSymbolicBound1: """Test symbolic bound that get compacted to constant""" @T.prim_func def before(x: T.handle, y: T.handle, n: T.int64): X = T.match_buffer(x, (T.int64(8), n * T.int64(32))) Y = T.match_buffer(y, (T.int64(8), n * T.int64(32))) for i, k_0 in T.grid(T.int64(8), n): with T.block(""): X_global = T.alloc_buffer((T.int64(8), n * T.int64(32))) with T.block("X_global"): for x0 in range(T.int64(32)): X_global[i, k_0 * T.int64(32) + x0] = X[i, k_0 * T.int64(32) + x0] with T.block("Y"): for x1 in range(T.int64(32)): Y[i, k_0 * T.int64(32) + x1] = X_global[i, k_0 * T.int64(32) + x1] @T.prim_func def expected(x: T.handle, y: T.handle, n: T.int64): X = T.match_buffer(x, (T.int64(8), n * T.int64(32))) Y = T.match_buffer(y, (T.int64(8), n * T.int64(32))) # with T.block("root"): for i, k_0 in T.grid(T.int64(8), n): with T.block(""): X_global = T.alloc_buffer((T.int64(1), T.int64(32))) with T.block("X_global"): for x0 in range(T.int64(32)): X_global[T.int64(0), x0] = X[i, k_0 * T.int64(32) + x0] with T.block("Y"): for x1 in range(T.int64(32)): Y[i, k_0 * T.int64(32) + x1] = X_global[T.int64(0), x1] class TestSymbolicDiagMaskCase: """Test symbolic allocation not too complex""" @T.prim_func def before(p_output0: T.handle, n: T.int32): A = T.match_buffer(p_output0, (1, 1, n, n)) B = T.alloc_buffer((n, n)) for i in T.thread_binding(256, thread="blockIdx.x"): for j in T.thread_binding(256, thread="threadIdx.x"): for k in range((n * n + 65535) // 65536): with T.block("make_diag_mask_te"): T.where((k * 256 + i) * 256 + j < n * n) T.reads() T.writes(B[(k * 65536 + i * 256 + j) // n, (k * 65536 + i * 256 + j) % n]) B[(k * 65536 + i * 256 + j) // n, (k * 65536 + i * 256 + j) % n] = T.Select( (k * 65536 + i * 256 + j) // n < (k * 65536 + i * 256 + j) % n, T.float32(-3.4028234663852886e38), T.float32(3.4028234663852886e38), ) for i in T.thread_binding(256, thread="blockIdx.x"): for j in T.thread_binding(256, thread="threadIdx.x"): for k in range((n * n + 65535) // 65536): with T.block("T_broadcast_to"): T.where((k * 256 + i) * 256 + j < n * n) T.reads(B[(k * 65536 + i * 256 + j) // n, (k * 65536 + i * 256 + j) % n]) T.writes( A[0, 0, (k * 65536 + i * 256 + j) // n, (k * 65536 + i * 256 + j) % n] ) A[0, 0, (k * 65536 + i * 256 + j) // n, (k * 65536 + i * 256 + j) % n] = B[ (k * 65536 + i * 256 + j) // n, (k * 65536 + i * 256 + j) % n ] @T.prim_func def expected(p_output0: T.handle, n: T.int32): A = T.match_buffer(p_output0, (1, 1, n, n)) B = T.alloc_buffer((n, n)) for i in T.thread_binding(256, thread="blockIdx.x"): for j in T.thread_binding(256, thread="threadIdx.x"): for k in range((n * n + 65535) // 65536): with T.block("make_diag_mask_te"): T.where(k * 65536 + i * 256 + j < n * n) T.reads() T.writes(B[(k * 65536 + i * 256 + j) // n, (k * 65536 + i * 256 + j) % n]) B[(k * 65536 + i * 256 + j) // n, (k * 65536 + i * 256 + j) % n] = T.Select( (k * 65536 + i * 256 + j) // n < (k * 65536 + i * 256 + j) % n, T.float32(-3.4028234663852886e38), T.float32(3.4028234663852886e38), ) for i in T.thread_binding(256, thread="blockIdx.x"): for k in T.thread_binding(256, thread="threadIdx.x"): for k in range((n * n + 65535) // 65536): with T.block("T_broadcast_to"): T.where(k * 65536 + i * 256 + k < n * n) T.reads(B[(k * 65536 + i * 256 + k) // n, (k * 65536 + i * 256 + k) % n]) T.writes( A[0, 0, (k * 65536 + i * 256 + k) // n, (k * 65536 + i * 256 + k) % n] ) A[0, 0, (k * 65536 + i * 256 + k) // n, (k * 65536 + i * 256 + k) % n] = B[ (k * 65536 + i * 256 + k) // n, (k * 65536 + i * 256 + k) % n ] if __name__ == "__main__": tvm.testing.main()
65,339
44.917077
461
py
tvm
tvm-main/tests/python/unittest/test_runtime_graph.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tempfile import tvm import tvm.testing from tvm import te, runtime import numpy as np import json from tvm import rpc from tvm import relay from tvm.contrib import utils, graph_executor @tvm.testing.requires_llvm def test_graph_simple(): n = 4 A = te.placeholder((n,), name="A") B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B") s = te.create_schedule(B.op) node0 = {"op": "null", "name": "x", "inputs": []} node1 = { "op": "tvm_op", "name": "add", "inputs": [[0, 0, 0]], "attrs": {"func_name": "myadd", "flatten_data": "1", "num_inputs": "1", "num_outputs": "1"}, } nodes = [node0, node1] arg_nodes = [0] node_row_ptr = [0, 1, 2] outputs = [[1, 0, 0]] shape = (4,) attrs = { "shape": ["list_shape", [shape, shape]], "dltype": ["list_str", ["float32", "float32"]], "storage_id": ["list_int", [0, 1]], } graph = { "nodes": nodes, "arg_nodes": arg_nodes, "node_row_ptr": node_row_ptr, "heads": outputs, "attrs": attrs, } graph = json.dumps(graph) def check_verify(): mlib = tvm.build(s, [A, B], "llvm", name="myadd") mod = graph_executor.create(graph, mlib, tvm.cpu(0)) a = np.random.uniform(size=(n,)).astype(A.dtype) mod.run(x=a) out = mod.get_output(0, tvm.nd.empty((n,))) np.testing.assert_equal(out.numpy(), a + 1) def check_remote(server): mlib = tvm.build(s, [A, B], "llvm", name="myadd") remote = rpc.connect(server.host, server.port) temp = utils.tempdir() dev = remote.cpu(0) path_dso = temp.relpath("dev_lib.so") mlib.export_library(path_dso) remote.upload(path_dso) mlib = remote.load_module("dev_lib.so") mod = graph_executor.create(graph, mlib, remote.cpu(0)) a = np.random.uniform(size=(n,)).astype(A.dtype) mod.run(x=tvm.nd.array(a, dev)) out = tvm.nd.empty((n,), device=dev) out = mod.get_output(0, out) np.testing.assert_equal(out.numpy(), a + 1) def check_sharing(): x = relay.var("x", shape=(1, 10)) y = relay.var("y", shape=(1, 10)) z = relay.add(x, y) func = relay.Function([x, y], z) x_in = np.ones((1, 10)).astype("float32") params = {"x": x_in} graph, lib, params = relay.build(func, target="llvm", params=params) mod_shared = graph_executor.create(graph, lib, tvm.cpu(0)) mod_shared.load_params(runtime.save_param_dict(params)) num_mods = 10 mods = [graph_executor.create(graph, lib, tvm.cpu(0)) for _ in range(num_mods)] for mod in mods: mod.share_params(mod_shared, runtime.save_param_dict(params)) a = np.random.uniform(size=(1, 10)).astype("float32") for mod in mods: mod.run(y=a) out = mod.get_output(0, tvm.nd.empty((1, 10))) np.testing.assert_equal(out.numpy(), x_in + a) # Explicitly delete the shared module and verify correctness. del mod_shared for mod in mods: mod.run(y=a) out = mod.get_output(0, tvm.nd.empty((1, 10))) np.testing.assert_equal(out.numpy(), x_in + a) del mod check_verify() check_remote(rpc.Server("127.0.0.1")) check_sharing() def test_load_unexpected_params(): # Test whether graph_executor.load_params works if parameters # are provided that are not an expected input. mod = tvm.IRModule() params = {} x = relay.var("x", shape=(1, 10)) y = relay.var("y", shape=(1, 10)) z = relay.add(x, y) mod["main"] = relay.Function([x, y], z) graph_module = relay.build(mod, target="llvm", params=params) rt_mod = tvm.contrib.graph_executor.create( graph_module.get_graph_json(), graph_module.get_lib(), tvm.cpu(0) ) new_params = graph_module.get_params() new_params.update({"y_unknown": np.ones((1,)).astype("float32")}) rt_mod.load_params(runtime.save_param_dict(new_params)) def test_save_load_file(): p = np.random.randn(10) params = {"x": p} with tempfile.NamedTemporaryFile() as fp: tvm.runtime.save_param_dict_to_file(params, fp.name) params_loaded = tvm.runtime.load_param_dict_from_file(fp.name) assert "x" in params_loaded np.testing.assert_equal(p, params_loaded["x"].numpy()) if __name__ == "__main__": tvm.testing.main()
5,291
32.923077
100
py
tvm
tvm-main/tests/python/unittest/test_tvmscript_syntax_sugar.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring,invalid-name,pointless-string-statement import sys import pytest import tvm.testing from tvm.ir import assert_structural_equal from tvm.script import from_source from tvm.script import tir as T @T.prim_func def transformed_matmul_no_syntax_sugar(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4): with T.block("update"): vi, vj = T.axis.remap("SS", [i0, i1]) vk = T.axis.R(128, i2_outer * 32 + i2_inner_outer * 4 + i2_inner_inner) T.reads([C[vi, vj], A[vi, vk], B[vj, vk]]) T.writes([C[vi, vj], A[vi, vk]]) with T.init(): C[vi, vj] = 0.0 A[vi, vk] = A[vi, vk] + B[vj, vk] C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk]) @T.prim_func def transformed_matmul_syntax_sugar(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i0, i1, i2_outer, i2_inner_outer, i2_inner_inner in T.grid(128, 128, 4, 8, 4): with T.block("update"): vi, vj = T.axis.remap("SS", [i0, i1]) vk = T.axis.R(128, i2_outer * 32 + i2_inner_outer * 4 + i2_inner_inner) T.reads(C[vi, vj], A[vi, vk], B[vj, vk]) T.writes(C[vi, vj], A[vi, vk]) with T.init(): C[vi, vj] = 0.0 A[vi, vk] = A[vi, vk] + B[vj, vk] C[vi, vj] = C[vi, vj] + (A[vi, vk] * B[vj, vk]) def test_reads_writes_syntax_sugar(): assert_structural_equal(transformed_matmul_no_syntax_sugar, transformed_matmul_syntax_sugar) @T.prim_func def loop_no_syntax_sugar(a: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128, 128)) for i in T.serial(0, 128): for j in T.parallel(0, 128): for k in T.vectorized(0, 128): for x in T.unroll(0, 128): for y in T.thread_binding(0, 128, thread="threadIdx.x"): for z in T.thread_binding(0, 128, thread="threadIdx.x"): A[i, j, k, x] = A[i, j, k, x] * 2.0 @T.prim_func def loop_syntax_sugar(a: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128, 128)) for i in T.serial(128): for j in T.parallel(128): for k in T.vectorized(128): for x in T.unroll(128): for y in T.thread_binding(128, "threadIdx.x"): for z in T.thread_binding(128, thread="threadIdx.x"): A[i, j, k, x] = A[i, j, k, x] * 2.0 def test_loop_syntax_sugar(): assert_structural_equal(loop_no_syntax_sugar, loop_syntax_sugar) # match buffer - use kwargs @T.prim_func def elementwise_handle( a: T.handle, b: T.handle, ) -> None: A = T.match_buffer(a, (128, 128, 128, 128)) B = T.match_buffer(b, (128, 128, 128, 128)) for i, j, k, l in T.grid(128, 128, 128, 128): with T.block("B"): vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l]) B[vi, vj, vk, vl] = A[vi, vj, vk, vl] * 2.0 # match buffer - use buffer with kwargs @T.prim_func def elementwise_buffer_kwargs( a: T.Buffer(shape=(128, 128, 128, 128), dtype="float32"), b: T.Buffer(shape=(128, 128, 128, 128), dtype="float32"), ) -> None: for i, j, k, l in T.grid(128, 128, 128, 128): with T.block("B"): vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l]) b[vi, vj, vk, vl] = a[vi, vj, vk, vl] * 2.0 # match buffer - use buffer without kwargs @T.prim_func def elementwise_buffer_no_kwargs( a: T.Buffer((128, 128, 128, 128), "float32"), b: T.Buffer((128, 128, 128, 128), "float32"), ) -> None: for i, j, k, l in T.grid(128, 128, 128, 128): with T.block("B"): vi, vj, vk, vl = T.axis.remap("SSSS", [i, j, k, l]) b[vi, vj, vk, vl] = a[vi, vj, vk, vl] * 2.0 def test_match_buffer_syntax_sugar(): # with kwargs assert_structural_equal(elementwise_handle, elementwise_buffer_kwargs) # without kwargs assert_structural_equal(elementwise_handle, elementwise_buffer_no_kwargs) def test_match_buffer_1d(): @T.prim_func def func_no_sugar(a: T.handle): A = T.match_buffer(a, shape=(16,)) for i in T.serial(16): A[i] = 0.0 @T.prim_func def func_with_sugar(A: T.Buffer(16, "float32")): for i in T.serial(16): A[i] = 0.0 assert_structural_equal(func_no_sugar, func_with_sugar) # dynamic shape gemm @T.prim_func def gemm_dyn_shape(a: T.handle, b: T.handle, c: T.handle): N = T.int32() M = T.int32() K = T.int32() A = T.match_buffer(a, (N, K), "float32") B = T.match_buffer(b, (K, M), "float32") C = T.match_buffer(c, (N, M), "float32") for i, j, k in T.grid(N, M, K): with T.block("gemm"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj] def test_dynamic_shape_gemm(): gemm_dyn_shape_roundtrip = from_source(gemm_dyn_shape.script()) assert_structural_equal(gemm_dyn_shape, gemm_dyn_shape_roundtrip) @T.prim_func def match_buffer_int64(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (T.int64(128), T.int64(128)), dtype="float32") B = T.alloc_buffer((T.int64(128), T.int64(128)), dtype="float32") C = T.match_buffer(c, (T.int64(128), T.int64(128)), dtype="float32") for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(T.int64(128), T.int64(128)): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def match_buffer_int64_after_roundtrip( A: T.Buffer((T.int64(128), T.int64(128)), "float32"), C: T.Buffer((T.int64(128), T.int64(128)), "float32"), ) -> None: B = T.alloc_buffer((T.int64(128), T.int64(128)), dtype="float32") for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(T.int64(128), T.int64(128)): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 def test_match_buffer_int64(): original = match_buffer_int64 after_roundtrip = match_buffer_int64_after_roundtrip assert_structural_equal(original, after_roundtrip, True) def test_match_buffer_region_has_implicit_shape_dtype(): @T.prim_func def explicit_shape_dtype(A: T.Buffer((16, 64), "int32")): with T.block(): B = T.match_buffer(A[8:16, 32:64], shape=(8, 32), dtype="int32") T.evaluate(0) @T.prim_func def implicit_shape_dtype(A: T.Buffer((16, 64), "int32")): with T.block(): B = T.match_buffer(A[8:16, 32:64]) T.evaluate(0) assert_structural_equal(explicit_shape_dtype, implicit_shape_dtype) def test_match_buffer_input_requires_shape_arg(): with pytest.raises(tvm.error.DiagnosticError): @T.prim_func def func(a: T.handle): A = T.match_buffer(a, dtype="int32") T.evaluate(0) def test_letstmt_bufferload_without_type_annotation(): # Variable assignment of PrimExpr types uses the dtype of the # PrimExpr to determine the variable's dtype. Parsing of # buf[indices] is done by generating a BufferSlice object, which # handles both store and load cases. BufferSlice is not a # PrimExpr, and implements BufferSlice.dtype explicitly. # Failure occurred during parsing of the tvmscript. @T.prim_func def func_without_type_annotation(A: T.Buffer((1,), "int32")): x = A[0] T.evaluate(x) def test_letstmt_bind_with_constant(): @T.prim_func def constant_binds(): x = T.meta_var(1) y = T.meta_var(42.0) T.evaluate(T.cast(x, "float32") + y) @T.prim_func def constant_binds_wrapped(): x = T.meta_var(T.int32(1)) y = T.meta_var(T.float32(42.0)) T.evaluate(T.cast(x, "float32") + y) assert_structural_equal(constant_binds, constant_binds_wrapped) def test_func_call(): def shared_16x16_to_ldmatrix_32x8_layout(i, j): thread_id = (i % 8) * 4 + (j % 8) // 2 return T.meta_var((thread_id, (j // 8) * 4 + (i // 8) * 2 + (j % 2))) @T.prim_func def mma_sync_m16n16k16_desc(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (32, 8), "float16", align=64, offset_factor=16, scope="warp") B = T.match_buffer(b, (32, 8), "float16", align=64, offset_factor=16, scope="warp") C = T.match_buffer(c, (32, 8), "float16", align=64, offset_factor=16, scope="warp") with T.block("root"): T.reads(C[0:32, 0:8], A[0:32, 0:8], B[0:32, 0:8]) T.writes(C[0:32, 0:8]) for i, j, k in T.grid(16, 16, 16): with T.block("C"): i, j, k = T.axis.remap("SSR", [i, j, k]) thread_id_C, local_id_C = shared_16x16_to_ldmatrix_32x8_layout(i, j) thread_id_A, local_id_A = shared_16x16_to_ldmatrix_32x8_layout(i, k) thread_id_B, local_id_B = shared_16x16_to_ldmatrix_32x8_layout(k, j) T.reads( C[thread_id_C, local_id_C], A[thread_id_A, local_id_A], B[thread_id_B, local_id_B], ) T.writes(C[thread_id_C, local_id_C]) C[thread_id_C, local_id_C] += ( A[thread_id_A, local_id_A] * B[thread_id_B, local_id_B] ) @T.prim_func def mma_sync_m16n16k16_desc_manual(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (32, 8), "float16", align=64, offset_factor=16, scope="warp") B = T.match_buffer(b, (32, 8), "float16", align=64, offset_factor=16, scope="warp") C = T.match_buffer(c, (32, 8), "float16", align=64, offset_factor=16, scope="warp") with T.block("root"): T.reads(C[0:32, 0:8], A[0:32, 0:8], B[0:32, 0:8]) T.writes(C[0:32, 0:8]) for i, j, k in T.grid(16, 16, 16): with T.block("C"): i, j, k = T.axis.remap("SSR", [i, j, k]) T.reads( C[i % 8 * 4 + j % 8 // 2, j // 8 * 4 + i // 8 * 2 + j % 2], A[i % 8 * 4 + k % 8 // 2, k // 8 * 4 + i // 8 * 2 + k % 2], B[k % 8 * 4 + j % 8 // 2, j // 8 * 4 + k // 8 * 2 + j % 2], ) T.writes(C[i % 8 * 4 + j % 8 // 2, j // 8 * 4 + i // 8 * 2 + j % 2]) C[i % 8 * 4 + j % 8 // 2, j // 8 * 4 + i // 8 * 2 + j % 2] = ( C[i % 8 * 4 + j % 8 // 2, j // 8 * 4 + i // 8 * 2 + j % 2] + A[i % 8 * 4 + k % 8 // 2, k // 8 * 4 + i // 8 * 2 + k % 2] * B[k % 8 * 4 + j % 8 // 2, j // 8 * 4 + k // 8 * 2 + j % 2] ) assert_structural_equal(mma_sync_m16n16k16_desc, mma_sync_m16n16k16_desc_manual) # The following is an example of an error message from calling an invalid function # error: Error occurred when invoking the function sqrt: # loop of ufunc does not support argument 0 of type Var which has no callable sqrt method # --> test_tvmscript_syntax_sugar.py:334:19 # | # 334 | ind = sqrt(i) # | ^^^^^^^ # note: run with `TVM_BACKTRACE=1` environment variable to display a backtrace. # Uncomment to see the error above. # def sqrt(x): # import numpy as np # return np.sqrt(x) # @T.prim_func # def loop(a: T.handle) -> None: # A = T.match_buffer(a, (128,)) # for i in T.serial(128): # ind = sqrt(i) # A[i] = A[ind] def test_int64_loop(): @T.prim_func def int64_grid( A: T.Buffer((T.int64(128), T.int64(128)), "float32"), B: T.Buffer((T.int64(128), T.int64(128)), "float32"), ) -> None: for i, j in T.grid(T.int64(128), T.int64(128)): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] + 1.0 @T.prim_func def int64_grid_expanded( A: T.Buffer((T.int64(128), T.int64(128)), "float32"), B: T.Buffer((T.int64(128), T.int64(128)), "float32"), ) -> None: for i in range(T.int64(0), T.int64(128)): for j in range(T.int64(0), T.int64(128)): with T.block("C"): vi = T.axis.spatial(T.int64(128), i) vj = T.axis.spatial(T.int64(128), j) B[vi, vj] = A[vi, vj] + 1.0 assert_structural_equal(int64_grid, int64_grid_expanded) def test_implicit_evaluate_assume(): @T.prim_func def explicit(A: T.Buffer(1, "int32")): T.evaluate(T.assume(A[0] == 5)) A[0] = 10 @T.prim_func def implicit(A: T.Buffer(1, "int32")): T.assume(A[0] == 5) A[0] = 10 assert_structural_equal(implicit, explicit) def test_implicit_evaluate_call_extern(): @T.prim_func def explicit(A: T.Buffer(1, "int32")): T.evaluate(T.call_extern("extern_func", A.data, dtype="int32")) @T.prim_func def implicit(A: T.Buffer(1, "int32")): T.call_extern("extern_func", A.data, dtype="int32") assert_structural_equal(implicit, explicit) def test_preserve_trivial_let_binding(): @T.prim_func def explicit(i: T.int32): j = T.int32() T.LetStmt(i, var=j) T.evaluate(j) @T.prim_func def implicit(i: T.int32): j = i T.evaluate(j) assert_structural_equal(implicit, explicit) def test_preserve_trivial_let_binding_of_value(): @T.prim_func def explicit(i: T.int32): j = T.int32() T.LetStmt(42, var=j) T.evaluate(j) @T.prim_func def implicit(i: T.int32): j = 42 T.evaluate(j) assert_structural_equal(implicit, explicit) def test_preserve_parameter_name(): @T.prim_func def func(i: T.int32): j = i T.evaluate(j) param_name = func.params[0].name assert param_name == "i" def test_preserve_variable_name(): """Use variable name when generating tir::LetStmt""" @T.prim_func def func(): for i in T.serial(16): j = i // 4 T.evaluate(j) var_name = func.body.body.var.name assert var_name == "j" def test_boolean_constant(): """Python booleans should become T.Bool objects""" @T.prim_func def explicit(): T.evaluate(T.bool(True)) @T.prim_func def implicit(): T.evaluate(True) assert_structural_equal(implicit, explicit) def test_foldable_boolean_in_assert(): """Foldable booleans T.Bool objects The condition of an assert statement should be a boolean expression. Previously, this test failed because the FFI does not distinguish between integer primitives and boolean primitives. """ @T.prim_func def explicit(): assert T.bool(False), "Message" T.evaluate(0) @T.prim_func def implicit(): assert 0 == 1, "Message" T.evaluate(0) assert_structural_equal(implicit, explicit) if __name__ == "__main__": tvm.testing.main()
16,595
32.731707
109
py
tvm
tvm-main/tests/python/unittest/test_te_schedule_bound_inference.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm import te def test_bound1(): m = te.var("m") l = te.var("l") A = te.placeholder((m, l), name="A") A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1") A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2") s = te.create_schedule([A2.op]) xo, xi = s[A2].split(s[A2].op.axis[0], 8) s[A1].compute_at(s[A2], xo) bounds = tvm.te.schedule.InferBound(s) assert isinstance(bounds, tvm.container.Map) assert bounds[A1.op.axis[0]].extent.value == 8 def test_bound2(): m = te.var("m") l = te.var("l") A = te.placeholder((m, l), name="A") A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1") A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2") s = te.create_schedule(A2.op) xo, yo, xi, yi = s[A2].tile(A2.op.axis[0], A2.op.axis[1], 8, 8) # test normalize not affecting schedule _ = s.normalize() s[A1].compute_at(s[A2], yo) bounds = tvm.te.schedule.InferBound(s) assert isinstance(bounds, tvm.container.Map) assert bounds[A1.op.axis[0]].extent.value == 8 assert bounds[A1.op.axis[1]].extent.value == 8 def test_bound3(): m = te.var("m") l = te.var("l") A = te.placeholder((m, l), name="A") A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1") A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2") s = te.create_schedule(A2.op) s[A1].set_scope("shared") xo, xi = s[A2].split(A2.op.axis[0], 32) xi0, xi1 = s[A2].split(xi, nparts=16) s[A2].bind(xi0, te.thread_axis("threadIdx.x")) yo, yi = s[A2].split(A2.op.axis[1], 16) # test normalize not affecting schedule _ = s.normalize() s[A2].reorder(xo, xi0, yo, xi1, yi) s[A1].compute_at(s[A2], yo) bounds = tvm.te.schedule.InferBound(s) assert isinstance(bounds, tvm.container.Map) assert bounds[A1.op.axis[0]].extent.value == 32 assert bounds[A1.op.axis[1]].extent.value == 16 def test_bound_split_ext_less_than_factor(): m = 8 I = te.placeholder((m,), name="I") EF = te.compute((m,), lambda i: I[i] * 2, name="EF") E = te.compute((m,), lambda i: EF[i] * 2, name="E") s = te.create_schedule([E.op]) xo, xi = s[E].split(s[E].op.axis[0], factor=32) s[EF].compute_at(s[E], xo) bounds = tvm.te.schedule.InferBound(s) assert isinstance(bounds, tvm.container.Map) assert bounds[xi].extent.value == m def test_bound_split_ext_less_than_naprts(): m = 8 I = te.placeholder((m,), name="I") EF = te.compute((m,), lambda i: I[i] * 2, name="EF") E = te.compute((m,), lambda i: EF[i] * 2, name="E") s = te.create_schedule([E.op]) xo, xi = s[E].split(s[E].op.axis[0], nparts=32) s[EF].compute_at(s[E], xo) bounds = tvm.te.schedule.InferBound(s) assert isinstance(bounds, tvm.container.Map) assert bounds[xo].extent.value == m def test_bound_split_divisible(): m = te.var("m") l = te.var("l") A = te.placeholder((8 * m, l), name="A") B = te.compute((8 * m, l), lambda i, j: A[i, j], name="B") s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], 8) bounds = tvm.te.schedule.InferBound(s) assert isinstance(bounds, tvm.container.Map) assert bounds[xo].extent == m assert bounds[xi].extent.value == 8 def test_bound_tile_divisible(): m = te.var("m") l = te.var("l") shape = (8 * m, 32 * l) A = te.placeholder(shape, name="A") B = te.compute(shape, lambda i, j: A[i, j], name="B") s = te.create_schedule(B.op) xo, yo, xi, yi = s[B].tile(B.op.axis[0], B.op.axis[1], 8, 32) bounds = tvm.te.schedule.InferBound(s) assert isinstance(bounds, tvm.container.Map) assert bounds[xo].extent == m assert bounds[xi].extent.value == 8 assert bounds[yo].extent == l assert bounds[yi].extent.value == 32 def test_bound_fusesplit1(): m = te.var("m") l = te.var("l") split1 = te.var("s") A = te.placeholder((m, l), name="A") A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1") A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2") s = te.create_schedule(A2.op) fused_axes = s[A2].fuse(A2.op.axis[0], A2.op.axis[1]) xo, xi = s[A2].split(fused_axes, split1) s[A1].compute_at(s[A2], xo) bounds = tvm.te.schedule.InferBound(s) assert isinstance(bounds, tvm.container.Map) idxdiv = tvm.tir.indexdiv tvm.testing.assert_prim_expr_equal(bounds[A1.op.axis[0]].min, idxdiv(xo * split1, l)) expected_extent = idxdiv((xo + 1) * split1 - 1, l) - idxdiv(xo * split1, l) + 1 for i in range(1, 6): for j in range(1, 6): for k in range(1, 6): vars = tvm.runtime.convert( { split1: tvm.tir.const(i, "int32"), l: tvm.tir.const(j, "int32"), xo.var: tvm.tir.const(k, "int32"), } ) tvm.testing.assert_prim_expr_equal( tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[0]].extent, vars), tvm.tir.stmt_functor.substitute(expected_extent, vars), ) tvm.testing.assert_prim_expr_equal(bounds[A1.op.axis[1]].extent, l) def test_bound_fusesplit2(): m = te.var("m") l = tvm.runtime.convert(6) split = tvm.runtime.convert(3) A = te.placeholder((m, l), name="A") A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1") A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2") s = te.create_schedule(A2.op) fused_axes = s[A2].fuse(A2.op.axis[0], A2.op.axis[1]) xo, xi = s[A2].split(fused_axes, split) s[A1].compute_at(s[A2], xo) bounds = tvm.te.schedule.InferBound(s) assert isinstance(bounds, tvm.container.Map) vars = tvm.runtime.convert({xo.var: tvm.tir.const(5, "int32")}) tvm.testing.assert_prim_expr_equal( tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[0]].min, vars), 2 ) tvm.testing.assert_prim_expr_equal( tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[1]].min, vars), 3 ) tvm.testing.assert_prim_expr_equal( tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[0]].extent, vars), 1 ) tvm.testing.assert_prim_expr_equal( tvm.tir.stmt_functor.substitute(bounds[A1.op.axis[1]].extent, vars), 3 ) def test_bound_warp(): m = te.var("m") l = te.var("l") A = te.placeholder((m, l), name="A") A1 = te.compute((m, l), lambda i, j: A[i, j], name="A1") A2 = te.compute((m, l), lambda i, j: A1[i, j] + 3, name="A2") s = te.create_schedule(A2.op) s[A1].set_scope("warp") xo, xi = s[A2].split(A2.op.axis[0], 32) xi0, xi1 = s[A2].split(xi, factor=16) tx = te.thread_axis("threadIdx.x") s[A2].bind(xi1, tx) s[A2].bind(xi0, te.thread_axis("threadIdx.y")) y = s[A2].op.axis[1] s[A1].compute_at(s[A2], y) xo, xi = s[A1].split(s[A1].op.axis[0], factor=16) s[A1].bind(xi, tx) bounds = tvm.te.schedule.InferBound(s) assert isinstance(bounds, tvm.container.Map) assert bounds[A1.op.axis[0]].extent.value == 16 def test_bound_scan(): m = te.var("m") n = te.var("n") X = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x") s_state = te.placeholder((m, n)) s_init = te.compute((1, n), lambda _, i: X[0, i]) s_update = te.compute((m, n), lambda t, i: s_state[t - 1, i] + X[t, i]) s_scan = tvm.te.scan(s_init, s_update, s_state) assert tuple(s_scan.shape) == (m, n) s = te.create_schedule(s_scan.op) XX = s.cache_read(X, "local", s_update) xo, xi = s[s_update].split(s_update.op.axis[1], factor=4) s[XX].compute_at(s[s_update], xo) s = s.normalize() bounds = tvm.te.schedule.InferBound(s) stmt = tvm.te.schedule.ScheduleOps(s, bounds) assert bounds[XX.op.axis[1]].extent.value == 4 def test_bound_conv1d(): n = te.var("n") A = te.compute((n + 2), lambda i: 1, name="A") def computeB(ii): i = ii + 1 return A[i - 1] + A[i] + A[i + 1] B = te.compute(n, computeB, name="B") s = te.create_schedule(B.op) s[A].compute_at(s[B], B.op.axis[0]) s = s.normalize() bounds = tvm.te.schedule.InferBound(s) assert bounds[A.op.axis[0]].extent.value == 3 def test_bound_blur(): n = tvm.runtime.convert(12) A = te.compute((n, n), lambda i, j: 1, name="A") def computeB(ii, jj): # set the correct center i = ii + 1 j = jj + 1 return A[i][j] + A[i - 1][j] + A[i + 1][j] + A[i][j + 1] + A[i][j - 1] B = te.compute((n - 2, n - 2), computeB, name="B") s = te.create_schedule(B.op) s[A].compute_at(s[B], B.op.axis[1]) s = s.normalize() bounds = tvm.te.schedule.InferBound(s) assert bounds[A.op.axis[0]].extent.value == 3 assert bounds[A.op.axis[1]].extent.value == 3 def test_bound_rfactor(): n = te.var("n") A = te.placeholder((n,), name="A") k = te.reduce_axis((0, n)) B = te.compute((1,), lambda i: te.sum(A[k], axis=k, where=(i > 1)), name="B") # schedule s = te.create_schedule(B.op) kf, ki = s[B].split(k, nparts=4) BF = s.rfactor(B, kf) s = s.normalize() bounds = tvm.te.schedule.InferBound(s) assert bounds[BF.op.axis[0]].extent.value == 4 assert bounds[BF.op.axis[1]].extent.value == 1 def test_bound_group_schedule(): m = te.var("m") n = te.var("n") x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x") x1 = te.compute(x.shape, lambda *i: x(*i) + 1, name="x1") x2 = te.compute(x.shape, lambda *i: x1(*i) + 2, name="x2") s = te.create_schedule(x2.op) g = s.create_group(outputs=x1, inputs=x, include_inputs=True) g.compute_at(s[x2], x2.op.axis[0]) assert s[x1].group == g assert s[x].group == g s = s.normalize() bounds = tvm.te.schedule.InferBound(s) assert bounds[x.op.axis[0]].extent.value == 1 assert bounds[x.op.axis[1]].extent == n def test_bound_nest_group(): m = te.var("m") n = te.var("n") x = te.compute((m, n), lambda i, j: tvm.tir.const(1, "float32"), name="x") x1 = te.compute(x.shape, lambda *i: x(*i) + 1, name="x1") x2 = te.compute(x.shape, lambda *i: x1(*i) + 2, name="x2") s = te.create_schedule(x2.op) g1 = s.create_group(outputs=x, inputs=x, include_inputs=True) g2 = s.create_group(outputs=x1, inputs=x, include_inputs=True) assert s[x].group == g1 assert s[x1].group == g2 g2.compute_at(s[x2], x2.op.axis[0]) g1.compute_at(s[x1], s[x1].op.axis[1]) s = s.normalize() bounds = tvm.te.schedule.InferBound(s) assert bounds[x.op.axis[0]].extent.value == 1 assert bounds[x.op.axis[1]].extent.value == 1 assert bounds[x1.op.axis[0]].extent.value == 1 assert bounds[x1.op.axis[1]].extent == n def test_bound_nest_thread(): m = te.var("m") A = te.placeholder((m), name="A") A1 = te.compute((m,), lambda i: A[i], name="A1") A2 = te.compute((m,), lambda i: A1[i] + 2, name="A2") A3 = te.compute((m,), lambda i: A2[i] + 3, name="A3") s = te.create_schedule(A3.op) s[A2].set_scope("shared") s[A1].set_scope("local") block_x = te.thread_axis("blockIdx.x") thread_x = te.thread_axis("threadIdx.x") bx, tx = s[A3].split(A3.op.axis[0], factor=32) s[A3].bind(bx, block_x) s[A3].bind(tx, thread_x) s[A2].compute_at(s[A3], tx) _, xi = s[A2].split(A2.op.axis[0], nparts=1) s[A2].bind(xi, thread_x) s[A1].compute_at(s[A3], tx) s = s.normalize() bounds = tvm.te.schedule.InferBound(s) assert bounds[A1.op.axis[0]].extent.value == 1 assert bounds[A2.op.axis[0]].extent.value == 32 assert bounds[A3.op.axis[0]].extent == m def test_gemm_bound(): nn = 1024 n = tvm.runtime.convert(nn) A = te.placeholder((n, n), name="A") B = te.placeholder((n, n), name="B") k = te.reduce_axis((0, n), name="k") C = te.compute((n, n), lambda ii, jj: te.sum(A[ii, k] * B[jj, k], axis=k), name="CC") # schedule s = te.create_schedule(C.op) xtile, ytile = 32, 32 scale = 8 num_thread = 8 block_factor = scale * num_thread block_x = te.thread_axis("blockIdx.x") thread_x = te.thread_axis("threadIdx.x") block_y = te.thread_axis("blockIdx.y") thread_y = te.thread_axis("threadIdx.y") CC = s.cache_write(C, "local") AA = s.cache_read(A, "shared", [CC]) BB = s.cache_read(B, "shared", [CC]) by, yi = s[C].split(C.op.axis[0], factor=block_factor) bx, xi = s[C].split(C.op.axis[1], factor=block_factor) s[C].reorder(by, bx, yi, xi) s[C].bind(by, block_y) s[C].bind(bx, block_x) ty, yi = s[C].split(yi, nparts=num_thread) tx, xi = s[C].split(xi, nparts=num_thread) s[C].reorder(ty, tx, yi, xi) s[C].bind(ty, thread_y) s[C].bind(tx, thread_x) yo, xo = CC.op.axis s[CC].reorder(k, yo, xo) s[CC].compute_at(s[C], tx) s[AA].compute_at(s[CC], k) s[BB].compute_at(s[CC], k) ty, xi = s[AA].split(s[AA].op.axis[0], nparts=num_thread) tx, xi = s[AA].split(xi, nparts=num_thread) s[AA].bind(ty, thread_y) s[AA].bind(tx, thread_x) ty, xi = s[BB].split(s[BB].op.axis[0], nparts=num_thread) tx, xi = s[BB].split(xi, nparts=num_thread) s[BB].bind(ty, thread_y) s[BB].bind(tx, thread_x) s = s.normalize() bounds = tvm.te.schedule.InferBound(s) assert bounds[BB.op.axis[0]].extent.value == 64 assert bounds[AA.op.axis[0]].extent.value == 64 assert bounds[CC.op.axis[0]].extent.value == 8 assert bounds[CC.op.axis[1]].extent.value == 8 def test_bound_tensor_compute_op(): def intrin_test(): m1 = te.var("m1") n1 = te.var("n1") a = te.placeholder((m1, n1), name="a") c = te.compute((1, n1), lambda i, j: a[0, j] + a[1, j] + a[2, j], name="c") Ab = tvm.tir.decl_buffer(a.shape, name="Abuf", offset_factor=1) Cb = tvm.tir.decl_buffer(c.shape, name="Cbuf", offset_factor=1) def intrin_func(ins, outs): aa = ins[0] cc = outs[0] def _body(): ib = tvm.tir.ir_builder.create() ib.emit( tvm.tir.call_extern("int32", "test", cc.access_ptr("w"), aa.access_ptr("r")) ) return ib.get() return _body() return te.decl_tensor_intrin(c.op, intrin_func, binds={a: Ab, c: Cb}) test_func = intrin_test() A = te.placeholder((20, 20), name="A") B = te.compute(A.shape, lambda i, j: A[i, j], name="B") C = te.compute((10, 20), lambda i: test_func(B[i:10, 0:20]), name="C") s = te.create_schedule(C.op) bounds = tvm.te.schedule.InferBound(s) assert isinstance(bounds, tvm.container.Map) assert bounds[B.op.axis[0]].extent.value == 10 def test_bound_simplification_failure(): # Check that the bounds are not expanded A = te.compute((2,), lambda j: j, "A") def _check(B, A=A): s = te.create_schedule(B.op) s = s.normalize() bounds = tvm.te.schedule.InferBound(s) stmt = tvm.lower(s, [B, A], simple_mode=True) if not bounds[A.op.axis[0]].extent.value <= 2: print(stmt) assert bounds[A.op.axis[0]].extent.value <= 2 tdiv = tvm.tir.truncdiv # These are hard to simplify, moreover we don't simplify them _check(te.compute((10,), lambda i: A[tvm.te.min(3 * i, 4 * i) + tvm.te.min(-3 * i, -2 * i)])) _check(te.compute((10,), lambda i: A[tvm.te.min(3 * i, 4 * i) + tvm.te.max(-3 * i, -4 * i)])) _check(te.compute((10,), lambda i: A[-2 * tdiv(i, 2) - tvm.te.min(i, 0 - i)])) _check(te.compute((10,), lambda i: A[i + (0 - i)])) # This would cause out of bounds, but we nevertheless include it _check(te.compute((10,), lambda i: A[i])) def test_bound_block(): def _check(shape, expected, block_size=4): N, C, H, W = shape tail = C % block_size chunks = C // block_size if tail != 0: chunks += 1 A = te.placeholder((N, C, H, W), name="A") pad_value = tvm.tir.const(0, A.dtype) def _reorder_data_nchw(*indices): condition = [] condition.append(indices[1] == chunks - 1) condition.append(indices[4] >= tail) condition = tvm.tir.all(*condition) return tvm.tir.if_then_else( condition, pad_value, A[indices[0], indices[1] * block_size + indices[4], indices[2], indices[3]], ) repack = te.compute((N, chunks, H, W, block_size), _reorder_data_nchw, name="repack") B = te.compute( (N, C, H, W), lambda n, c, h, w: repack[n, c // block_size, h, w, c % block_size], name="back_repack", ) s = te.create_schedule([B.op]) bounds = tvm.te.schedule.InferBound(s) # Block for intermediate compute function should be equal to 4 for all cases except than number of channels is less than 4 assert bounds[repack.op.axis[4]].extent.value == expected _check((1, 4, 6, 6), 4) _check((1, 7, 6, 6), 4) _check((1, 3, 6, 6), 3) if __name__ == "__main__": tvm.testing.main()
18,036
34.159844
130
py
tvm
tvm-main/tests/python/unittest/test_tvmscript_ir_builder_tir.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, missing-docstring """Unittests for tvm.script.ir_builder.tir""" import numpy as np import pytest import tvm import tvm.testing from tvm import tir from tvm.ir.base import assert_structural_equal from tvm.runtime import ndarray from tvm.script.ir_builder import IRBuilder from tvm.script.ir_builder import tir as T def test_ir_builder_tir_primfunc_base(): with IRBuilder() as ib: with T.prim_func(): T.evaluate(0) # the prim_func generated by IRBuilder prim_func_actual = ib.get() # the expected prim_func prim_func_expected = tir.PrimFunc( params=[], body=tir.Evaluate(0), ret_type=None, buffer_map=None, attrs=None, ) # Check if the generated ir is expected assert_structural_equal(prim_func_actual, prim_func_expected, map_free_vars=True) def test_ir_builder_tir_primfunc_complete(): with IRBuilder() as ib: with T.prim_func(): T.arg("a", T.handle()) T.arg("b", T.int64()) T.arg("c", T.Buffer((128, 128), "float32")) d = T.arg("d", T.handle()) e = T.arg("e", T.Buffer((1024,), "int8")) T.func_attr({"key": "value"}) T.func_ret(tvm.ir.PrimType("int64")) buffer_d = T.match_buffer(d, (64, 64), "int64") T.evaluate(0) # the prim_func generated by IRBuilder prim_func_actual = ib.get() # the expected prim_func c_handle, c_buffer = tir.Var("c_handle", "handle"), tir.decl_buffer( (128, 128), "float32", name="c" ) d_handle, d_buffer = tir.Var("d", "handle"), tir.decl_buffer((64, 64), "int64", name="d") e_handle, e_buffer = tir.Var("e_handle", "handle"), tir.decl_buffer((1024,), "int8", name="e") prim_func_expected = tir.PrimFunc( params=[ tir.Var("a", "handle"), tir.Var("b", "int64"), c_handle, d_handle, e_handle, ], body=tir.Evaluate(0), ret_type=tvm.ir.PrimType("int64"), buffer_map={c_handle: c_buffer, d_handle: d_buffer, e_handle: e_buffer}, attrs=tvm.ir.make_node("DictAttrs", key="value"), ) # Check if the generated ir is expected assert_structural_equal(prim_func_actual, prim_func_expected, map_free_vars=True) def test_ir_builder_tir_block_base(): with IRBuilder() as ib: with T.block("block"): T.evaluate(0) # the block generated by IRBuilder block_realize_actual = ib.get() # the expected block block_expected = tir.Block( iter_vars=[], reads=[], writes=[], name_hint="block", body=tir.Evaluate(0), alloc_buffers=None, match_buffers=None, annotations={"tir.script_parsing_detect_access": tir.IntImm("int64", 3)}, ) block_realize_expected = tir.BlockRealize( iter_values=[], predicate=True, block=block_expected, ) # Check if the generated ir is expected assert_structural_equal(block_realize_actual, block_realize_expected, map_free_vars=True) def test_ir_builder_tir_block_complete(): with IRBuilder() as ib: a = T.int64() b = T.Buffer((128, 128), "float32") c = T.Buffer((128, 128), "float32") d = T.int32() e = T.Buffer((128, 128), "float32") f = T.int32() with T.block("block"): T.where(a > 1) T.reads(b[0:16, 0:16]) T.writes(c[d:128, d:128]) T.block_attr({"key": "value"}) T.alloc_buffer((128, 128), "float32") T.match_buffer(e[0:32, 0:32], (32, 32), "float32") T.axis.spatial(128, f) T.evaluate(0) # the block generated by IRBuilder block_realize_actual = ib.get() # the expected block var_a = tir.Var("a", "int64") buffer_b = tir.decl_buffer((128, 128), "float32", name="b") buffer_c = tir.decl_buffer((128, 128), "float32", name="c") var_d = tir.Var("d", "int32") buffer_e = tir.decl_buffer((128, 128), "float32", name="c") var_f = tir.Var("f", "int32") block_expected = tir.Block( iter_vars=[tir.IterVar((0, 128), tir.Var("", "int32"), iter_type=tir.IterVar.DataPar)], reads=[buffer_b[0:16, 0:16]], writes=[buffer_c[var_d:128, var_d:128]], name_hint="block", body=tir.Evaluate(0), alloc_buffers=[tir.decl_buffer((128, 128), "float32")], match_buffers=[ tir.MatchBufferRegion(tir.decl_buffer((32, 32), "float32"), buffer_e[0:32, 0:32]) ], annotations={"key": "value"}, ) block_realize_expected = tir.BlockRealize( iter_values=[var_f], predicate=var_a > 1, block=block_expected, ) # Check if the generated ir is expected assert_structural_equal(block_realize_actual, block_realize_expected, map_free_vars=True) def test_ir_builder_tir_axis(): with IRBuilder() as ib: a = T.int32() b = T.int32() c = T.int32() d = T.int32() with T.block("block"): T.axis.spatial(8, a) T.axis.reduce(16, b) T.axis.scan(32, c) T.axis.opaque(64, d) T.evaluate(0) # the block generated by IRBuilder block_realize_actual = ib.get() # the expected block var_a = tir.Var("a", "int32") var_b = tir.Var("b", "int32") var_c = tir.Var("c", "int32") var_d = tir.Var("d", "int32") block_expected = tir.Block( iter_vars=[ tir.IterVar((0, 8), tir.Var("", "int32"), iter_type=tir.IterVar.DataPar), tir.IterVar((0, 16), tir.Var("", "int32"), iter_type=tir.IterVar.CommReduce), tir.IterVar((0, 32), tir.Var("", "int32"), iter_type=tir.IterVar.Ordered), tir.IterVar((0, 64), tir.Var("", "int32"), iter_type=tir.IterVar.Opaque), ], reads=[], writes=[], name_hint="block", body=tir.Evaluate(0), annotations={"tir.script_parsing_detect_access": tir.IntImm("int64", 3)}, ) block_realize_expected = tir.BlockRealize( iter_values=[var_a, var_b, var_c, var_d], predicate=True, block=block_expected, ) # Check if the generated ir is expected assert_structural_equal(block_realize_actual, block_realize_expected, map_free_vars=True) def test_ir_builder_tir_for(): with IRBuilder() as ib: with T.serial(128) as a: with T.parallel(64) as b: with T.vectorized(32) as c: with T.unroll(16) as d: with T.thread_binding(8, thread="threadIdx.x") as e: T.evaluate(0) # the for generated by IRBuilder for_actual = ib.get() # the expected for thread_binding_expected = tir.For( loop_var=tir.Var("", "int32"), min_val=0, extent=8, kind=tir.ForKind.THREAD_BINDING, body=tir.Evaluate(0), thread_binding=tir.IterVar( None, tir.Var("", "int32"), tir.IterVar.ThreadIndex, "threadIdx.x" ), ) unroll_expected = tir.For( loop_var=tir.Var("", "int32"), min_val=0, extent=16, kind=tir.ForKind.UNROLLED, body=thread_binding_expected, ) vectorized_expected = tir.For( loop_var=tir.Var("", "int32"), min_val=0, extent=32, kind=tir.ForKind.VECTORIZED, body=unroll_expected, ) parallel_expected = tir.For( loop_var=tir.Var("", "int32"), min_val=0, extent=64, kind=tir.ForKind.PARALLEL, body=vectorized_expected, ) for_expected = tir.For( loop_var=tir.Var("", "int32"), min_val=0, extent=128, kind=tir.ForKind.SERIAL, body=parallel_expected, ) # Check if the generated ir is expected assert_structural_equal(for_actual, for_expected, map_free_vars=True) def test_ir_builder_tir_assert(): with IRBuilder() as ib: with T.Assert(T.int32() == 0, message="a is 0"): T.evaluate(0) # the assert generated by IRBuilder assert_actual = ib.get() # the expected assert statement assert_expected = tir.AssertStmt(T.int32() == 0, tir.StringImm("a is 0"), tir.Evaluate(0)) # Check if the generated ir is expected assert_structural_equal(assert_actual, assert_expected, map_free_vars=True) def test_ir_builder_tir_let(): with IRBuilder() as ib: with T.LetStmt(tir.IntImm("int32", 2)) as v: T.evaluate(0) # the let binding generated by IRBuilder let_actual = ib.get() # the expected Let statement let_expected = tir.LetStmt(T.int32(), tir.IntImm("int32", 2), tir.Evaluate(0)) # Check if the generated ir is expected assert_structural_equal(let_actual, let_expected, map_free_vars=True) def test_ir_builder_tir_realize(): buffer_a = T.Buffer((128, 128), "float32") with IRBuilder() as ib: with T.realize(buffer_a[0:128, 0:128], "test_storage_scope", True): T.evaluate(0) # the buffer realization generated by IRBuilder realize_actual = ib.get() # the expected buffer realization buffer_realize = tir.BufferRealize( buffer_a, [tvm.ir.Range(0, 128), tvm.ir.Range(0, 128)], True, tir.Evaluate(0) ) expected_realize = tir.AttrStmt( buffer_a, "realize_scope", tir.StringImm("test_storage_scope"), buffer_realize ) # Check if the generated ir is expected assert_structural_equal(realize_actual, expected_realize, map_free_vars=True) def test_ir_builder_tir_thread(): with IRBuilder() as ib: with T.prim_func(): brow = T.env_thread("blockIdx.y") with T.launch_thread(brow, 1): T.evaluate(0) # the prim_func generated by IRBuilder ir_actual = ib.get() # the expected prim_func iter_var = tir.IterVar((0, 1), "v", iter_type=1, thread_tag="blockIdx.y") attr_stmt = tir.AttrStmt(iter_var, "thread_extent", 1, tir.Evaluate(0)) func = tir.PrimFunc([], attr_stmt) # Check if the generated ir is expected assert_structural_equal(ir_actual, func, map_free_vars=True) def test_ir_builder_tir_allocate(): with IRBuilder() as ib: with T.allocate([10], "float32", scope="local"): T.evaluate(1) # the allocate generated by IRBuilder ir_actual = ib.get() # the expected allocate buffer_var = tir.Var("v", tvm.ir.PointerType(tvm.ir.PrimType("float32"), "local")) ir_expected = tir.Allocate( buffer_var, "float32", [10], tvm.tir.const(1, "uint1"), tir.Evaluate(1) ) # Check if the generated ir is expected assert_structural_equal(ir_actual, ir_expected, map_free_vars=True) def test_ir_builder_tir_allocate_const(): data = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] with IRBuilder() as ib: with T.allocate_const(data, "int32", [10]): T.evaluate(1) # the allocate const generated by IRBuilder ir_actual = ib.get() # the expected allocate const buffer_var = tir.Var("v", tvm.ir.PointerType(tvm.ir.PrimType("int32"))) ir_expected = tir.AllocateConst( buffer_var, "int32", [10], ndarray.array(np.asarray(data, "int32")), tir.Evaluate(1), annotations={}, ) # Check if the generated ir is expected assert_structural_equal(ir_actual, ir_expected, map_free_vars=True) def test_ir_builder_tir_while(): with IRBuilder() as ib: with T.While(T.int32() > 0): T.evaluate(0) # the while generated by IRBuilder ir_actual = ib.get() # the expected while ir_expected = tir.While(tir.Var("x", "int32") > 0, tir.Evaluate(0)) # Check if the generated ir is expected assert_structural_equal(ir_actual, ir_expected, map_free_vars=True) def test_ir_builder_tir_if_then_else(): with IRBuilder() as ib: with T.If(T.int32() < 12): with T.Then(): T.evaluate(T.int32(0)) with T.Else(): T.evaluate(T.int32(1)) # the if_then_else generated by IRBuilder ir_actual = ib.get() # the expected if_then_else ir_expected = tir.IfThenElse( tir.Var("c", "int32") < 12, tir.Evaluate(tir.IntImm("int32", 0)), tir.Evaluate(tir.IntImm("int32", 1)), ) # Check if the generated ir is expected assert_structural_equal(ir_actual, ir_expected, map_free_vars=True) def test_ir_builder_tir_buffer_store(): buffer_a = T.Buffer((10, 10), "float32") i = T.int32() with IRBuilder() as ib: T.buffer_store(buffer_a, 0.1, [0, i]) # the buffer store generated by IRBuilder ir_actual = ib.get() # the expected buffer store ir_expected = tir.BufferStore(buffer_a, 0.1, [0, i]) # Check if the generated ir is expected assert_structural_equal(ir_actual, ir_expected, map_free_vars=True) def test_ir_builder_tir_prefetch(): with IRBuilder() as ib: buffer_a = T.Buffer((128, 128), "float32") T.prefetch(buffer_a, []) # the prefetch generated by IRBuilder ir_actual = ib.get() # the expected prefetch ir_expected = tir.Prefetch(buffer_a, []) # Check if the generated ir is expected assert_structural_equal(ir_actual, ir_expected, map_free_vars=True) def test_ir_builder_tir_evaluate(): with IRBuilder() as ib: T.evaluate(0) # the evaluate generated by IRBuilder eval_actual = ib.get() # the expected evaluate eval_expected = tir.Evaluate(0) # Check if the generated ir is expected assert_structural_equal(eval_actual, eval_expected, map_free_vars=True) def test_ir_builder_tir_decl_buffer(): with IRBuilder() as ib: with T.decl_buffer([128, 128], "float32"): T.evaluate(0) # the decl_buffer generated by IRBuilder ir_actual = ib.get() # the expected decl_buffer buffer = T.Buffer((128, 128), "float32") ir_expected = tir.Allocate( buffer.data, "float32", (128, 128), tir.IntImm("bool", True), tir.DeclBuffer(buffer, tir.Evaluate(0)), ) # Check if the generated ir is expected assert_structural_equal(ir_actual, ir_expected, map_free_vars=True) def test_ir_builder_tir_inline(): with IRBuilder() as ib: m, n = T.meta_var(1), T.meta_var(2) a, b = T.meta_var([3, 4]) T.evaluate(m.value + n.value + a.value + b.value) # the evaluate generated by IRBuilder eval_actual = ib.get() # the expected evaluate eval_expected = tir.Evaluate(10) # Check if the generated ir is expected assert_structural_equal(eval_actual, eval_expected, map_free_vars=True) if __name__ == "__main__": tvm.testing.main()
15,635
30.272
98
py
tvm
tvm-main/tests/python/unittest/test_tir_stmt_functor_ir_transform.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te def test_ir_transform(): ib = tvm.tir.ir_builder.create() n = te.var("n") with ib.for_range(0, n, name="i") as i: with ib.for_range(0, 10, name="j") as j: x = tvm.tir.call_extern("int32", "TestA", i * 3 + j * 1) ib.emit(tvm.tir.call_extern("int32", "TestB", x)) ib.emit(tvm.tir.call_extern("int32", "TestC", x)) body = ib.get() builtin_call_extern = tvm.ir.Op.get("tir.call_extern") def preorder(op): if op.op.same_as(builtin_call_extern) and op.args[0].value == "TestC": return tvm.tir.const(42, "int32") return None def postorder(op): assert isinstance(op, tvm.tir.Call) if op.op.same_as(builtin_call_extern) and op.args[0].value == "TestA": return tvm.tir.call_extern("int32", "TestB", op.args[1] + 1) return op body = tvm.tir.stmt_functor.ir_transform(body, preorder, postorder, ["tir.Call"]) stmt_list = tvm.tir.stmt_list(body.body.body) assert stmt_list[0].value.args[1].args[0].value == "TestB" assert stmt_list[1].value.value == 42 if __name__ == "__main__": test_ir_transform()
1,965
37.54902
85
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_device_kernel_launch.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm.script import tir as T, ir as I class BaseCompare(tvm.testing.CompareBeforeAfter): transform = tvm.tir.transform.LowerDeviceKernelLaunch() class TestLowerDeviceKernelLaunch(BaseCompare): """Kernel launch parameters are added at the call site The "tir.kernel_launch_params" determines which parameters belong to the runtime, and which below to the device-side PrimFunc. Parameters that are required prior to launching a kernel (e.g. the number of Cuda threads to use) are stored in the `"tir.kernel_launch_params"` attribute, and are used by the runtime prior in order to launch the generated kernel. """ def before(self): @I.ir_module class mod: @T.prim_func def main(A: T.Buffer(1, "float32")): T.func_attr({"target": T.target("llvm")}) mod.kernel(A.data) @T.prim_func def kernel(A_data: T.handle("float32")): T.func_attr({"target": T.target("cuda")}) A = T.decl_buffer(1, dtype="float32", data=A_data) A[0] = 0.0 return mod def expected(self): @I.ir_module class mod: @T.prim_func def main(A: T.Buffer(1, "float32")): T.func_attr({"target": T.target("llvm")}) T.call_packed("kernel", A.data) @T.prim_func def kernel(A_data: T.handle("float32")): T.func_attr( { "target": T.target("cuda"), "calling_conv": 2, "tir.kernel_launch_params": [], "global_symbol": "kernel", "tir.is_global_func": True, } ) A = T.decl_buffer(1, dtype="float32", data=A_data) A[0] = 0.0 return mod class TestExternallyVisibleKernelLaunch(BaseCompare): """Like TestLowerDeviceKernelLaunch, with pre-defined global_symbol Because the host and kernel will be handled by different code generators, the device-side kernel must be externally exposed for use by the host-side wrapper, even if the host-side wrapper does not directly expose the kernel. Therefore, a "global_symbol" attribute must be added for the kernel if not already present. If the kernel already has a specific name, that name should be preserved. """ def before(self): @I.ir_module class mod: @T.prim_func def main(A: T.Buffer(1, "float32")): T.func_attr({"target": T.target("llvm")}) mod.kernel(A.data) @T.prim_func def kernel(A_data: T.handle("float32")): T.func_attr({"target": T.target("cuda"), "global_symbol": "kernel_by_another_name"}) A = T.decl_buffer(1, dtype="float32", data=A_data) A[0] = 0.0 return mod def expected(self): @I.ir_module class mod: @T.prim_func def main(A: T.Buffer(1, "float32")): T.func_attr({"target": T.target("llvm")}) T.call_packed("kernel_by_another_name", A.data) @T.prim_func def kernel(A_data: T.handle("float32")): T.func_attr( { "target": T.target("cuda"), "calling_conv": 2, "tir.kernel_launch_params": [], "global_symbol": "kernel_by_another_name", "tir.is_global_func": True, } ) A = T.decl_buffer(1, dtype="float32", data=A_data) A[0] = 0.0 return mod class TestCollectLaunchParameter(BaseCompare): """Kernel launch parameters are added at the call site The "tir.kernel_launch_params" determines which parameters belong to the runtime, and which below to the device-side PrimFunc. Parameters that are required prior to launching a kernel (e.g. the number of Cuda threads to use) are stored in the `"tir.kernel_launch_params"` attribute, and are used by the runtime prior in order to launch the generated kernel. """ def before(self): @I.ir_module class mod: @T.prim_func def main(A: T.Buffer(16, "float32")): T.func_attr({"target": T.target("llvm")}) mod.kernel(A.data) @T.prim_func def kernel(A_data: T.handle("float32")): T.func_attr( { "target": T.target("cuda"), "global_symbol": "kernel", } ) A = T.decl_buffer(16, dtype="float32", data=A_data) i = T.launch_thread("threadIdx.x", 16) A[i] = 0.0 return mod def expected(self): @I.ir_module class mod: @T.prim_func def main(A: T.Buffer(16, "float32")): T.func_attr({"target": T.target("llvm")}) T.call_packed("kernel", A.data, 16) @T.prim_func def kernel(A_data: T.handle("float32")): T.func_attr( { "target": T.target("cuda"), "calling_conv": 2, "tir.kernel_launch_params": ["threadIdx.x"], "global_symbol": "kernel", "tir.is_global_func": True, } ) A = T.decl_buffer(16, dtype="float32", data=A_data) i = T.launch_thread("threadIdx.x", 16) A[i] = 0.0 return mod class TestSameDeviceDifferentTarget(BaseCompare): """Handle subroutine calls to same device, different codegen The device kernel launch is only required when the caller and callee are on different devices. However, if the caller and callee use different codegen, then the call cannot be handled as an internal call by a single codegen. Instead, it should be lowered to a `T.call_extern`. """ def before(self): @I.ir_module class mod: @T.prim_func def main(A: T.Buffer(1, "float32")): T.func_attr({"target": T.target("llvm")}) mod.kernel(A.data) @T.prim_func def kernel(A_data: T.handle("float32")): T.func_attr({"target": T.target("c")}) A = T.decl_buffer(16, dtype="float32", data=A_data) A[0] = 0.0 return mod def expected(self): @I.ir_module class mod: @T.prim_func def main(A: T.Buffer(1, "float32")): T.func_attr({"target": T.target("llvm")}) T.call_extern("kernel", A.data, dtype="void") @T.prim_func def kernel(A_data: T.handle("float32")): T.func_attr( { "target": T.target("c"), "global_symbol": "kernel", "tir.is_global_func": True, } ) A = T.decl_buffer(16, dtype="float32", data=A_data) A[0] = 0.0 return mod if __name__ == "__main__": tvm.testing.main()
8,313
33.213992
100
py
tvm
tvm-main/tests/python/unittest/test_tvmscript_printer_structural_equal.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pytest import tvm from tvm.ir import assert_structural_equal from tvm.relay.op.transform import split from tvm.runtime import ObjectPath from tvm.script import ir as I, tir as T def _error_message(exception): splitter = "ValueError: StructuralEqual" return splitter + str(exception).split(splitter)[1] def _expected_result(func1, func2, objpath1, objpath2): return f"""ValueError: StructuralEqual check failed, caused by lhs at {objpath1}: {func1.script(path_to_underline=[objpath1], syntax_sugar=False)} and rhs at {objpath2}: {func2.script(path_to_underline=[objpath2], syntax_sugar=False)}""" def test_prim_func_buffer_map(): @T.prim_func def func1(a: T.handle, b: T.handle): A = T.match_buffer(a, (128, 128)) B = T.match_buffer(b, (128, 128)) @T.prim_func def func2(a: T.handle, b: T.handle): A = T.match_buffer(a, (128, 128)) B = T.match_buffer(b, (128, 256)) with pytest.raises(ValueError) as ve: assert_structural_equal(func1, func2) assert _error_message(ve.value) == _expected_result( func1, func2, ObjectPath.root() .attr("buffer_map") .map_value(func1.params[1]) .attr("shape") .array_index(1) .attr("value"), ObjectPath.root() .attr("buffer_map") .map_value(func2.params[1]) .attr("shape") .array_index(1) .attr("value"), ) def test_evaluate(): @I.ir_module class module1: @T.prim_func def func(): T.evaluate(0) @I.ir_module class module2: @T.prim_func def func(): T.evaluate(1) with pytest.raises(ValueError) as ve: assert_structural_equal(module1, module2) assert _error_message(ve.value) == _expected_result( module1, module2, ObjectPath.root() .attr("functions") .map_value(module1.get_global_var("func")) .attr("body") .attr("value") .attr("value"), ObjectPath.root() .attr("functions") .map_value(module2.get_global_var("func")) .attr("body") .attr("value") .attr("value"), ) def test_allocate(): @T.prim_func def func1(): a_data = T.allocate((128, 128), dtype="float32") a = T.decl_buffer((128, 128), dtype="float32", data=a_data) @T.prim_func def func2(): a_data = T.allocate((256, 128), dtype="float32") a = T.decl_buffer((256, 128), dtype="float32", data=a_data) with pytest.raises(ValueError) as ve: assert_structural_equal(func1, func2) assert _error_message(ve.value) == _expected_result( func1, func2, ObjectPath.root().attr("body").attr("extents").array_index(0).attr("value"), ObjectPath.root().attr("body").attr("extents").array_index(0).attr("value"), ) def test_for(): @T.prim_func def func1(): for i, j in T.grid(128, 128): with T.block(): pass @T.prim_func def func2(): for i, j, k in T.grid(128, 128, 128): with T.block(): pass with pytest.raises(ValueError) as ve: assert_structural_equal(func1, func2) assert _error_message(ve.value) == _expected_result( func1, func2, ObjectPath.root().attr("body").attr("block").attr("body").attr("body").attr("body"), ObjectPath.root().attr("body").attr("block").attr("body").attr("body").attr("body"), ) if __name__ == "__main__": tvm.testing.main()
4,379
28.395973
92
py
tvm
tvm-main/tests/python/unittest/test_te_schedule_bound_inference_tiling.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te def test_bound_tile_mod(): def compute(M_tiles, N_tiles, factor, dtype): # Algo M = M_tiles * factor N = N_tiles * factor A = tvm.te.placeholder((N, M), name="A", dtype=dtype) C = tvm.te.compute((N, M), lambda n, m: A[n, m], name="C") s = tvm.te.create_schedule(C.op) return s, A, C def schedule(s, factor, padding, A, C): C_local = s.cache_write(C, "local") n, m = C.op.axis bn, bm, ni, mi = s[C].tile(n, m, factor, factor) nio, nii = s[C].split(ni, 2) n = s[C].fuse(nii, mi) C_shared = s.cache_write(C, "shared") bn, bm, ni, mi = C_shared.op.axis s[C_shared].storage_align(ni, factor * 2, padding) n, m = s[C].op.axis bn, bm, ni, mi = s[C].tile(n, m, factor, factor) s[C].set_scope("global") niio, niii = s[C].split(ni, 32) s[C_shared].compute_at(s[C], niio) return s s, A, C = compute(2, 2, 128, "float16") s = schedule(s, 128, 8, A, C) bounds = tvm.te.schedule.InferBound(s) check = bounds[s.stages[2].op.axis[2]].extent == 16 if not check: print(tvm.lower(s, [A, C], simple_mode=True)) assert check if __name__ == "__main__": test_bound_tile_mod()
2,091
32.206349
66
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_mutator_mutate_unroll.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring from typing import List from tvm import meta_schedule as ms from tvm.script import tir as T from tvm.target import Target from tvm.tir import Schedule # pylint: disable=invalid-name, no-member @T.prim_func def matmul(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [512, 512]) B = T.match_buffer(b, [512, 512]) C = T.match_buffer(c, [512, 512]) for i, j, k in T.grid(512, 512, 512): # type: ignore with T.block("C"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) # type: ignore with T.init(): C[vi, vj] = 0.0 # type: ignore C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] # pylint: enable=invalid-name, no-member def _sch(decisions: List[List[int]]) -> Schedule: sch = Schedule(matmul, debug_mask="all") # pylint: disable=invalid-name d0, d1, d2 = decisions b0 = sch.get_block(name="C", func_name="main") root = sch.get_block(name="root", func_name="main") sch.get_consumers(block=b0) b1 = sch.cache_write(block=b0, write_buffer_index=0, storage_scope="global") l2, l3, l4 = sch.get_loops(block=b0) v5, v6, v7, v8 = sch.sample_perfect_tile( loop=l2, n=4, max_innermost_factor=64, decision=d0, ) l9, l10, l11, l12 = sch.split(loop=l2, factors=[v5, v6, v7, v8]) v13, v14, v15, v16 = sch.sample_perfect_tile( loop=l3, n=4, max_innermost_factor=64, decision=d1, ) l17, l18, l19, l20 = sch.split(loop=l3, factors=[v13, v14, v15, v16]) v21, v22 = sch.sample_perfect_tile( loop=l4, n=2, max_innermost_factor=64, decision=d2, ) l23, l24 = sch.split(loop=l4, factors=[v21, v22]) sch.reorder(l9, l17, l10, l18, l23, l11, l19, l24, l12, l20) sch.reverse_compute_at(block=b1, loop=l18, preserve_unit_loops=True) v57 = sch.sample_categorical( candidates=[0, 16, 64, 512], probs=[0.25, 0.25, 0.25, 0.25], decision=0, ) sch.annotate(block_or_loop=root, ann_key="meta_schedule.unroll_explicit", ann_val=v57) # pylint: enable=invalid-name return sch def _make_mutator(target: Target) -> ms.Mutator: ctx = ms.TuneContext( mod=matmul, target=target, space_generator=ms.space_generator.PostOrderApply( sch_rules=[], postprocs=[], mutator_probs={ms.mutator.MutateUnroll(): 1.0}, ), ) return list(ctx.space_generator.mutator_probs.keys())[0] def test_mutate_unroll_matmul(): mutator = _make_mutator(target=Target("llvm --num-cores=16")) sch = _sch( decisions=[ [4, 32, 4, 1], [8, 4, 8, 2], [512, 1], ], ) results = set() for _ in range(100): trace = mutator.apply(sch.trace) decision = trace.decisions[trace.insts[-2]] results.add(decision) if len(results) == 3: break assert len(results) == 3 assert results == {1, 2, 3} if __name__ == """__main__""": test_mutate_unroll_matmul()
3,973
31.842975
93
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_space_cpu.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Tests for MetaSchedule search space on CPU""" from tvm import meta_schedule as ms from tvm.meta_schedule.testing.space_generation import ( check_sketches, print_sketches, generate_design_space, ) from tvm.meta_schedule.testing.te_workload import create_te_workload from tvm.script import tir as T from tvm.target import Target def _target(): return Target("aws/cpu/c5.9xlarge") def _design_space(mod): return generate_design_space( kind="llvm", mod=mod, target=_target(), types=ms.ScheduleRule, ) def test_cpu_c1d(): # fmt: off @T.prim_func def c1d_0(inputs: T.Buffer((1, 256, 64), "float32"), weight: T.Buffer((3, 64, 128), "float32"), conv1d_nlc: T.Buffer((1, 128, 128), "float32")): T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel":288, "meta_schedule.unroll_explicit":512, "meta_schedule.vectorize":64}) PadInput = T.alloc_buffer((1, 258, 64), dtype="float32") conv1d_nlc_global = T.alloc_buffer((1, 128, 128), dtype="float32") for i0, i1, i2 in T.grid(1, 258, 64): with T.block("PadInput"): v_i0, v_i1, v_i2 = T.axis.remap("SSS", [i0, i1, i2]) T.reads(inputs[v_i0, v_i1 - 1, v_i2]) T.writes(PadInput[v_i0, v_i1, v_i2]) PadInput[v_i0, v_i1, v_i2] = T.if_then_else(1 <= v_i1 and v_i1 < 257, inputs[v_i0, v_i1 - 1, v_i2], T.float32(0)) for n_0, l_0, co_0, n_1, l_1, co_1 in T.grid(1, 1, 2, 1, 1, 8): for rl_0, rc_0, n_2, l_2, co_2, rl_1, rc_1, n_3, l_3, co_3 in T.grid(1, 64, 1, 64, 8, 3, 1, 1, 2, 1): with T.block("conv1d_nlc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_l = T.axis.spatial(128, l_0 * 128 + l_1 * 128 + l_2 * 2 + l_3) v_co = T.axis.spatial(128, co_0 * 64 + co_1 * 8 + co_2 + co_3) v_rl = T.axis.reduce(3, rl_0 * 3 + rl_1) v_rc = T.axis.reduce(64, rc_0 + rc_1) T.reads(PadInput[v_n, v_l * 2 + v_rl, v_co // 128 * 64 + v_rc], weight[v_rl, v_rc, v_co]) T.writes(conv1d_nlc_global[v_n, v_l, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv1d_nlc_global[v_n, v_l, v_co] = T.float32(0) conv1d_nlc_global[v_n, v_l, v_co] = conv1d_nlc_global[v_n, v_l, v_co] + PadInput[v_n, v_l * 2 + v_rl, v_co // 128 * 64 + v_rc] * weight[v_rl, v_rc, v_co] for ax0, ax1, ax2 in T.grid(1, 128, 8): with T.block("conv1d_nlc_global"): v0, v1 = T.axis.remap("SS", [ax0, ax1]) v2 = T.axis.spatial(128, co_0 * 64 + co_1 * 8 + ax2) T.reads(conv1d_nlc_global[v0, v1, v2]) T.writes(conv1d_nlc[v0, v1, v2]) conv1d_nlc[v0, v1, v2] = conv1d_nlc_global[v0, v1, v2] @T.prim_func def c1d_1(inputs: T.Buffer((1, 256, 64), "float32"), weight: T.Buffer((3, 64, 128), "float32"), conv1d_nlc: T.Buffer((1, 128, 128), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 512, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 258, 64)) conv1d_nlc_global = T.alloc_buffer((1, 128, 128)) for n_0, l_0, co_0 in T.grid(1, 1, 2): for n_1, l_1, co_1 in T.grid(1, 1, 8): for ax0, ax1, ax2 in T.grid(1, 257, 64): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(258, ax1) v_i2 = T.axis.spatial(64, ax2) T.reads(inputs[v_i0, v_i1 - 1, v_i2]) T.writes(PadInput[v_i0, v_i1, v_i2]) PadInput[v_i0, v_i1, v_i2] = T.if_then_else(1 <= v_i1 and v_i1 < 257, inputs[v_i0, v_i1 - 1, v_i2], T.float32(0)) for rl_0, rc_0, n_2, l_2, co_2, rl_1, rc_1, n_3, l_3, co_3 in T.grid(1, 64, 1, 64, 8, 3, 1, 1, 2, 1): with T.block("conv1d_nlc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_l = T.axis.spatial(128, l_0 * 128 + l_1 * 128 + l_2 * 2 + l_3) v_co = T.axis.spatial(128, co_0 * 64 + co_1 * 8 + co_2 + co_3) v_rl = T.axis.reduce(3, rl_0 * 3 + rl_1) v_rc = T.axis.reduce(64, rc_0 + rc_1) T.reads(PadInput[v_n, v_l * 2 + v_rl, v_co // 128 * 64 + v_rc], weight[v_rl, v_rc, v_co]) T.writes(conv1d_nlc_global[v_n, v_l, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv1d_nlc_global[v_n, v_l, v_co] = T.float32(0) conv1d_nlc_global[v_n, v_l, v_co] = conv1d_nlc_global[v_n, v_l, v_co] + PadInput[v_n, v_l * 2 + v_rl, v_co // 128 * 64 + v_rc] * weight[v_rl, v_rc, v_co] for ax0, ax1, ax2 in T.grid(1, 128, 64): with T.block("conv1d_nlc_global"): v0, v1 = T.axis.remap("SS", [ax0, ax1]) v2 = T.axis.spatial(128, co_0 * 64 + ax2) T.reads(conv1d_nlc_global[v0, v1, v2]) T.writes(conv1d_nlc[v0, v1, v2]) conv1d_nlc[v0, v1, v2] = conv1d_nlc_global[v0, v1, v2] @T.prim_func def c1d_2(inputs: T.Buffer((1, 256, 64), "float32"), weight: T.Buffer((3, 64, 128), "float32"), conv1d_nlc: T.Buffer((1, 128, 128), "float32")) -> None: # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 16, "meta_schedule.vectorize": 64}) for n_0, l_0, co_0, n_1, l_1, co_1, rl_0, rc_0, n_2, l_2, co_2, rl_1, rc_1, n_3, l_3, co_3 in T.grid(1, 1, 2, 1, 1, 8, 1, 64, 1, 64, 8, 3, 1, 1, 2, 1): with T.block("conv1d_nlc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_l = T.axis.spatial(128, l_0 * 128 + l_1 * 128 + l_2 * 2 + l_3) v_co = T.axis.spatial(128, co_0 * 64 + co_1 * 8 + co_2 + co_3) v_rl = T.axis.reduce(3, rl_0 * 3 + rl_1) v_rc = T.axis.reduce(64, rc_0 + rc_1) T.reads(inputs[v_n, v_l * 2 + v_rl - 1, v_co // 128 * 64 + v_rc], weight[v_rl, v_rc, v_co]) T.writes(conv1d_nlc[v_n, v_l, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv1d_nlc[v_n, v_l, v_co] = T.float32(0) conv1d_nlc[v_n, v_l, v_co] = conv1d_nlc[v_n, v_l, v_co] + T.if_then_else(1 <= v_l * 2 + v_rl and v_l * 2 + v_rl < 257, inputs[v_n, v_l * 2 + v_rl - 1, v_co // 128 * 64 + v_rc], T.float32(0)) * weight[v_rl, v_rc, v_co] # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [1, 1, 64, 2]), ("SamplePerfectTile", [2, 8, 8, 1]), ("SamplePerfectTile", [1, 3]), ("SamplePerfectTile", [64, 1]), ("SampleCategorical", 3), ("SampleComputeLocation", -1), ] decision_1 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [1, 1, 64, 2]), ("SamplePerfectTile", [2, 8, 8, 1]), ("SamplePerfectTile", [1, 3]), ("SamplePerfectTile", [64, 1]), ("SampleCategorical", 3), ("SampleComputeLocation", 5), ] decision_2 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [1, 1, 64, 2]), ("SamplePerfectTile", [2, 8, 8, 1]), ("SamplePerfectTile", [1, 3]), ("SamplePerfectTile", [64, 1]), ("SampleCategorical", 1), ("SampleComputeLocation", -2), ] mod = create_te_workload("C1D", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[c1d_0, c1d_1, c1d_2], expected_decisions=[decision_0, decision_1, decision_2], ) def test_cpu_c2d(): # fmt: off @T.prim_func def c2d_0(inputs: T.Buffer((1, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 3, 64), "float32"), conv2d_nhwc: T.Buffer((1, 112, 112, 64), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 16, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 230, 230, 3)) conv2d_nhwc_global = T.alloc_buffer((1, 112, 112, 64)) for n_0, h_0, w_0, co_0, n_1, h_1, w_1 in T.grid(1, 7, 4, 2, 1, 1, 28): for ax0, ax1, ax2, ax3 in T.grid(1, 37, 7, 3): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(230, h_0 * 32 + ax1) v_i2 = T.axis.spatial(230, w_0 * 56 + w_1 * 2 + ax2) v_i3 = T.axis.spatial(3, ax3) T.reads(inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(3 <= v_i1 and v_i1 < 227 and 3 <= v_i2 and v_i2 < 227, inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3], T.float32(0)) for co_1 in range(8): for rh_0, rw_0, rc_0, n_2, h_2, w_2, co_2, rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3 in T.grid(7, 7, 1, 1, 2, 1, 1, 1, 1, 3, 1, 8, 1, 4): with T.block("conv2d_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(112, h_0 * 16 + h_1 * 16 + h_2 * 8 + h_3) v_w = T.axis.spatial(112, w_0 * 28 + w_1 + w_2 + w_3) v_co = T.axis.spatial(64, co_0 * 32 + co_1 * 4 + co_2 * 4 + co_3) v_rh = T.axis.reduce(7, rh_0 + rh_1) v_rw = T.axis.reduce(7, rw_0 + rw_1) v_rc = T.axis.reduce(3, rc_0 * 3 + rc_1) T.reads(PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc], weight[v_rh, v_rw, v_rc, v_co]) T.writes(conv2d_nhwc_global[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_nhwc_global[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_nhwc_global[v_n, v_h, v_w, v_co] = conv2d_nhwc_global[v_n, v_h, v_w, v_co] + PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc] * weight[v_rh, v_rw, v_rc, v_co] for ax0, ax1, ax2, ax3 in T.grid(1, 16, 1, 4): with T.block("conv2d_nhwc_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(112, h_0 * 16 + ax1) v2 = T.axis.spatial(112, w_0 * 28 + w_1 + ax2) v3 = T.axis.spatial(64, co_0 * 32 + co_1 * 4 + ax3) T.reads(conv2d_nhwc_global[v0, v1, v2, v3]) T.writes(conv2d_nhwc[v0, v1, v2, v3]) conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3] @T.prim_func def c2d_1(inputs: T.Buffer((1, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 3, 64), "float32"), conv2d_nhwc: T.Buffer((1, 112, 112, 64), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 512, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 230, 230, 3)) conv2d_nhwc_global = T.alloc_buffer((1, 112, 112, 64)) for i0, i1, i2, i3 in T.grid(1, 230, 230, 3): with T.block("PadInput"): v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(3 <= v_i1 and v_i1 < 227 and 3 <= v_i2 and v_i2 < 227, inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3], T.float32(0)) for n_0, h_0, w_0, co_0 in T.grid(1, 7, 4, 2): for n_1, h_1, w_1, co_1, rh_0, rw_0, rc_0, n_2, h_2, w_2, co_2, rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3 in T.grid(1, 1, 28, 8, 7, 7, 1, 1, 2, 1, 1, 1, 1, 3, 1, 8, 1, 4): with T.block("conv2d_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(112, h_0 * 16 + h_1 * 16 + h_2 * 8 + h_3) v_w = T.axis.spatial(112, w_0 * 28 + w_1 + w_2 + w_3) v_co = T.axis.spatial(64, co_0 * 32 + co_1 * 4 + co_2 * 4 + co_3) v_rh = T.axis.reduce(7, rh_0 + rh_1) v_rw = T.axis.reduce(7, rw_0 + rw_1) v_rc = T.axis.reduce(3, rc_0 * 3 + rc_1) T.reads(PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc], weight[v_rh, v_rw, v_rc, v_co]) T.writes(conv2d_nhwc_global[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_nhwc_global[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_nhwc_global[v_n, v_h, v_w, v_co] = conv2d_nhwc_global[v_n, v_h, v_w, v_co] + PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc] * weight[v_rh, v_rw, v_rc, v_co] for ax0, ax1, ax2, ax3 in T.grid(1, 16, 28, 32): with T.block("conv2d_nhwc_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(112, h_0 * 16 + ax1) v2 = T.axis.spatial(112, w_0 * 28 + ax2) v3 = T.axis.spatial(64, co_0 * 32 + ax3) T.reads(conv2d_nhwc_global[v0, v1, v2, v3]) T.writes(conv2d_nhwc[v0, v1, v2, v3]) conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3] @T.prim_func def c2d_2(inputs: T.Buffer((1, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 3, 64), "float32"), conv2d_nhwc: T.Buffer((1, 112, 112, 64), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 0, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 230, 230, 3)) for n_0, h_0 in T.grid(1, 7): for ax0, ax1, ax2, ax3 in T.grid(1, 37, 229, 3): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(230, h_0 * 32 + ax1) v_i2 = T.axis.spatial(230, ax2) v_i3 = T.axis.spatial(3, ax3) T.reads(inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(3 <= v_i1 and v_i1 < 227 and 3 <= v_i2 and v_i2 < 227, inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3], T.float32(0)) for w_0, co_0, n_1, h_1, w_1, co_1, rh_0, rw_0, rc_0, n_2, h_2, w_2, co_2, rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3 in T.grid(4, 2, 1, 1, 28, 8, 7, 7, 1, 1, 2, 1, 1, 1, 1, 3, 1, 8, 1, 4): with T.block("conv2d_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(112, h_0 * 16 + h_1 * 16 + h_2 * 8 + h_3) v_w = T.axis.spatial(112, w_0 * 28 + w_1 + w_2 + w_3) v_co = T.axis.spatial(64, co_0 * 32 + co_1 * 4 + co_2 * 4 + co_3) v_rh = T.axis.reduce(7, rh_0 + rh_1) v_rw = T.axis.reduce(7, rw_0 + rw_1) v_rc = T.axis.reduce(3, rc_0 * 3 + rc_1) T.reads(PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc], weight[v_rh, v_rw, v_rc, v_co]) T.writes(conv2d_nhwc[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_nhwc[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_nhwc[v_n, v_h, v_w, v_co] = conv2d_nhwc[v_n, v_h, v_w, v_co] + PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc] * weight[v_rh, v_rw, v_rc, v_co] # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [7, 1, 2, 8]), ("SamplePerfectTile", [4, 28, 1, 1]), ("SamplePerfectTile", [2, 8, 1, 4]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [1, 3]), ("SampleCategorical", 1), ("SampleComputeLocation", 6), ] decision_1 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [7, 1, 2, 8]), ("SamplePerfectTile", [4, 28, 1, 1]), ("SamplePerfectTile", [2, 8, 1, 4]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [1, 3]), ("SampleCategorical", 3), ("SampleComputeLocation", -1), ] decision_2 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [7, 1, 2, 8]), ("SamplePerfectTile", [4, 28, 1, 1]), ("SamplePerfectTile", [2, 8, 1, 4]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [1, 3]), ("SampleCategorical", 0), ("SampleComputeLocation", 1), ] mod = create_te_workload("C2D", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[c2d_0, c2d_1, c2d_2], expected_decisions=[decision_0, decision_1, decision_2], ) def test_cpu_c3d(): # fmt: off @T.prim_func def c3d_0(inputs: T.Buffer((1, 16, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 7, 3, 64), "float32"), conv3d_ndhwc: T.Buffer((1, 8, 112, 112, 64), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 512, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 22, 230, 230, 3)) conv3d_ndhwc_global = T.alloc_buffer((1, 8, 112, 112, 64)) for n_0, d_0, h_0, w_0, co_0 in T.grid(1, 2, 4, 1, 2): for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 13, 61, 229, 3): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(22, d_0 * 8 + ax1) v_i2 = T.axis.spatial(230, h_0 * 56 + ax2) v_i3 = T.axis.spatial(230, ax3) v_i4 = T.axis.spatial(3, ax4) T.reads(inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3 - 3, v_i4]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3, v_i4]) PadInput[v_i0, v_i1, v_i2, v_i3, v_i4] = T.if_then_else(3 <= v_i1 and v_i1 < 19 and 3 <= v_i2 and v_i2 < 227 and 3 <= v_i3 and v_i3 < 227, inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3 - 3, v_i4], T.float32(0)) for n_1, d_1, h_1, w_1, co_1 in T.grid(1, 4, 4, 14, 1): for rd_0, rh_0, rw_0, rc_0, n_2, d_2, h_2, w_2, co_2, rd_1, rh_1, rw_1, rc_1, n_3, d_3, h_3, w_3, co_3 in T.grid(1, 7, 7, 3, 1, 1, 1, 1, 32, 7, 1, 1, 1, 1, 1, 7, 8, 1): with T.block("conv3d_ndhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_d = T.axis.spatial(8, d_0 * 4 + d_1 + d_2 + d_3) v_h = T.axis.spatial(112, h_0 * 28 + h_1 * 7 + h_2 * 7 + h_3) v_w = T.axis.spatial(112, w_0 * 112 + w_1 * 8 + w_2 * 8 + w_3) v_co = T.axis.spatial(64, co_0 * 32 + co_1 * 32 + co_2 + co_3) v_rd = T.axis.reduce(7, rd_0 * 7 + rd_1) v_rh = T.axis.reduce(7, rh_0 + rh_1) v_rw = T.axis.reduce(7, rw_0 + rw_1) v_rc = T.axis.reduce(3, rc_0 + rc_1) T.reads(PadInput[v_n, v_d * 2 + v_rd, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc], weight[v_rd, v_rh, v_rw, v_rc, v_co]) T.writes(conv3d_ndhwc_global[v_n, v_d, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv3d_ndhwc_global[v_n, v_d, v_h, v_w, v_co] = T.float32(0) conv3d_ndhwc_global[v_n, v_d, v_h, v_w, v_co] = conv3d_ndhwc_global[v_n, v_d, v_h, v_w, v_co] + PadInput[v_n, v_d * 2 + v_rd, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc] * weight[v_rd, v_rh, v_rw, v_rc, v_co] for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 1, 7, 8, 32): with T.block("conv3d_ndhwc_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(8, d_0 * 4 + d_1 + ax1) v2 = T.axis.spatial(112, h_0 * 28 + h_1 * 7 + ax2) v3 = T.axis.spatial(112, w_1 * 8 + ax3) v4 = T.axis.spatial(64, co_0 * 32 + ax4) T.reads(conv3d_ndhwc_global[v0, v1, v2, v3, v4]) T.writes(conv3d_ndhwc[v0, v1, v2, v3, v4]) conv3d_ndhwc[v0, v1, v2, v3, v4] = conv3d_ndhwc_global[v0, v1, v2, v3, v4] @T.prim_func def c3d_1(inputs: T.Buffer((1, 16, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 7, 3, 64), "float32"), conv3d_ndhwc: T.Buffer((1, 8, 112, 112, 64), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 64, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 22, 230, 230, 3)) conv3d_ndhwc_global = T.alloc_buffer((1, 8, 112, 112, 64)) for n_0, d_0, h_0, w_0, co_0 in T.grid(1, 2, 4, 1, 2): for n_1, d_1, h_1, w_1 in T.grid(1, 4, 4, 14): for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 7, 19, 21, 3): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(22, d_0 * 8 + d_1 * 2 + ax1) v_i2 = T.axis.spatial(230, h_0 * 56 + h_1 * 14 + ax2) v_i3 = T.axis.spatial(230, w_1 * 16 + ax3) v_i4 = T.axis.spatial(3, ax4) T.reads(inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3 - 3, v_i4]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3, v_i4]) PadInput[v_i0, v_i1, v_i2, v_i3, v_i4] = T.if_then_else(3 <= v_i1 and v_i1 < 19 and 3 <= v_i2 and v_i2 < 227 and 3 <= v_i3 and v_i3 < 227, inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3 - 3, v_i4], T.float32(0)) for co_1, rd_0, rh_0, rw_0, rc_0, n_2, d_2, h_2, w_2, co_2, rd_1, rh_1, rw_1, rc_1, n_3, d_3, h_3, w_3, co_3 in T.grid(1, 1, 7, 7, 3, 1, 1, 1, 1, 32, 7, 1, 1, 1, 1, 1, 7, 8, 1): with T.block("conv3d_ndhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_d = T.axis.spatial(8, d_0 * 4 + d_1 + d_2 + d_3) v_h = T.axis.spatial(112, h_0 * 28 + h_1 * 7 + h_2 * 7 + h_3) v_w = T.axis.spatial(112, w_0 * 112 + w_1 * 8 + w_2 * 8 + w_3) v_co = T.axis.spatial(64, co_0 * 32 + co_1 * 32 + co_2 + co_3) v_rd = T.axis.reduce(7, rd_0 * 7 + rd_1) v_rh = T.axis.reduce(7, rh_0 + rh_1) v_rw = T.axis.reduce(7, rw_0 + rw_1) v_rc = T.axis.reduce(3, rc_0 + rc_1) T.reads(PadInput[v_n, v_d * 2 + v_rd, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc], weight[v_rd, v_rh, v_rw, v_rc, v_co]) T.writes(conv3d_ndhwc_global[v_n, v_d, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv3d_ndhwc_global[v_n, v_d, v_h, v_w, v_co] = T.float32(0) conv3d_ndhwc_global[v_n, v_d, v_h, v_w, v_co] = conv3d_ndhwc_global[v_n, v_d, v_h, v_w, v_co] + PadInput[v_n, v_d * 2 + v_rd, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc] * weight[v_rd, v_rh, v_rw, v_rc, v_co] for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 4, 28, 112, 32): with T.block("conv3d_ndhwc_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(8, d_0 * 4 + ax1) v2 = T.axis.spatial(112, h_0 * 28 + ax2) v3 = T.axis.spatial(112, ax3) v4 = T.axis.spatial(64, co_0 * 32 + ax4) T.reads(conv3d_ndhwc_global[v0, v1, v2, v3, v4]) T.writes(conv3d_ndhwc[v0, v1, v2, v3, v4]) conv3d_ndhwc[v0, v1, v2, v3, v4] = conv3d_ndhwc_global[v0, v1, v2, v3, v4] @T.prim_func def c3d_2(inputs: T.Buffer((1, 16, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 7, 3, 64), "float32"), conv3d_ndhwc: T.Buffer((1, 8, 112, 112, 64), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 16, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 22, 230, 230, 3)) for n_0, d_0, h_0, w_0, co_0, n_1, d_1, h_1, w_1 in T.grid(1, 2, 4, 1, 2, 1, 4, 4, 14): for ax0, ax1, ax2, ax3, ax4 in T.grid(1, 7, 19, 21, 3): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(22, d_0 * 8 + d_1 * 2 + ax1) v_i2 = T.axis.spatial(230, h_0 * 56 + h_1 * 14 + ax2) v_i3 = T.axis.spatial(230, w_1 * 16 + ax3) v_i4 = T.axis.spatial(3, ax4) T.reads(inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3 - 3, v_i4]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3, v_i4]) PadInput[v_i0, v_i1, v_i2, v_i3, v_i4] = T.if_then_else(3 <= v_i1 and v_i1 < 19 and 3 <= v_i2 and v_i2 < 227 and 3 <= v_i3 and v_i3 < 227, inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3 - 3, v_i4], T.float32(0)) for co_1, rd_0, rh_0, rw_0, rc_0, n_2, d_2, h_2, w_2, co_2, rd_1, rh_1, rw_1, rc_1, n_3, d_3, h_3, w_3, co_3 in T.grid(1, 1, 7, 7, 3, 1, 1, 1, 1, 32, 7, 1, 1, 1, 1, 1, 7, 8, 1): with T.block("conv3d_ndhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_d = T.axis.spatial(8, d_0 * 4 + d_1 + d_2 + d_3) v_h = T.axis.spatial(112, h_0 * 28 + h_1 * 7 + h_2 * 7 + h_3) v_w = T.axis.spatial(112, w_0 * 112 + w_1 * 8 + w_2 * 8 + w_3) v_co = T.axis.spatial(64, co_0 * 32 + co_1 * 32 + co_2 + co_3) v_rd = T.axis.reduce(7, rd_0 * 7 + rd_1) v_rh = T.axis.reduce(7, rh_0 + rh_1) v_rw = T.axis.reduce(7, rw_0 + rw_1) v_rc = T.axis.reduce(3, rc_0 + rc_1) T.reads(PadInput[v_n, v_d * 2 + v_rd, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc], weight[v_rd, v_rh, v_rw, v_rc, v_co]) T.writes(conv3d_ndhwc[v_n, v_d, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv3d_ndhwc[v_n, v_d, v_h, v_w, v_co] = T.float32(0) conv3d_ndhwc[v_n, v_d, v_h, v_w, v_co] = conv3d_ndhwc[v_n, v_d, v_h, v_w, v_co] + PadInput[v_n, v_d * 2 + v_rd, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 64 * 3 + v_rc] * weight[v_rd, v_rh, v_rw, v_rc, v_co] # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [2, 4, 1, 1]), ("SamplePerfectTile", [4, 4, 1, 7]), ("SamplePerfectTile", [1, 14, 1, 8]), ("SamplePerfectTile", [2, 1, 32, 1]), ("SamplePerfectTile", [1, 7]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [3, 1]), ("SampleCategorical", 3), ("SampleComputeLocation", 4), ] decision_1 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [2, 4, 1, 1]), ("SamplePerfectTile", [4, 4, 1, 7]), ("SamplePerfectTile", [1, 14, 1, 8]), ("SamplePerfectTile", [2, 1, 32, 1]), ("SamplePerfectTile", [1, 7]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [3, 1]), ("SampleCategorical", 2), ("SampleComputeLocation", 8), ] decision_2 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [2, 4, 1, 1]), ("SamplePerfectTile", [4, 4, 1, 7]), ("SamplePerfectTile", [1, 14, 1, 8]), ("SamplePerfectTile", [2, 1, 32, 1]), ("SamplePerfectTile", [1, 7]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [3, 1]), ("SampleCategorical", 1), ("SampleComputeLocation", 8), ] mod = create_te_workload("C3D", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[c3d_0, c3d_1, c3d_2], expected_decisions=[decision_0, decision_1, decision_2], ) def test_cpu_cap(): # fmt: off @T.prim_func def cap_0(inputs: T.Buffer((1, 16, 16, 4, 4, 32), "float32"), weight: T.Buffer((3, 3, 4, 4, 32, 32), "float32"), conv2d_capsule_nhwijc: T.Buffer((1, 8, 8, 4, 4, 32), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 0, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 18, 18, 4, 4, 32)) conv2d_capsule_nhwijc_global = T.alloc_buffer((1, 8, 8, 4, 4, 32)) for n_0, h_0, w_0, cap_i_0, cap_j_0, co_0, n_1, h_1 in T.grid(1, 2, 1, 1, 1, 1, 1, 4): for ax0, ax1, ax2, ax3, ax4, ax5 in T.grid(1, 3, 17, 4, 4, 32): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(18, h_0 * 8 + h_1 * 2 + ax1) v_i2 = T.axis.spatial(18, ax2) v_i3, v_i4, v_i5 = T.axis.remap("SSS", [ax3, ax4, ax5]) T.reads(inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3, v_i4, v_i5]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3, v_i4, v_i5]) PadInput[v_i0, v_i1, v_i2, v_i3, v_i4, v_i5] = T.if_then_else(1 <= v_i1 and v_i1 < 17 and 1 <= v_i2 and v_i2 < 17, inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3, v_i4, v_i5], T.float32(0)) for w_1, cap_i_1, cap_j_1, co_1 in T.grid(4, 1, 4, 2): for rh_0, rw_0, cap_k_0, rc_0, n_2, h_2, w_2, cap_i_2, cap_j_2, co_2, rh_1, rw_1, cap_k_1, rc_1, n_3, h_3, w_3, cap_i_3, cap_j_3, co_3 in T.grid(1, 3, 4, 1, 1, 1, 2, 1, 1, 1, 3, 1, 1, 32, 1, 1, 1, 4, 1, 16): with T.block("conv2d_capsule_nhwijc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(8, h_0 * 4 + h_1 + h_2 + h_3) v_w = T.axis.spatial(8, w_0 * 8 + w_1 * 2 + w_2 + w_3) v_cap_i = T.axis.spatial(4, cap_i_0 * 4 + cap_i_1 * 4 + cap_i_2 * 4 + cap_i_3) v_cap_j = T.axis.spatial(4, cap_j_0 * 4 + cap_j_1 + cap_j_2 + cap_j_3) v_co = T.axis.spatial(32, co_0 * 32 + co_1 * 16 + co_2 * 16 + co_3) v_rh = T.axis.reduce(3, rh_0 * 3 + rh_1) v_rw = T.axis.reduce(3, rw_0 + rw_1) v_cap_k = T.axis.reduce(4, cap_k_0 + cap_k_1) v_rc = T.axis.reduce(32, rc_0 * 32 + rc_1) T.reads(PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_cap_i, v_cap_k, v_rc], weight[v_rh, v_rw, v_cap_k, v_cap_j, v_rc, v_co]) T.writes(conv2d_capsule_nhwijc_global[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_capsule_nhwijc_global[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co] = T.float32(0) conv2d_capsule_nhwijc_global[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co] = conv2d_capsule_nhwijc_global[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co] + PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_cap_i, v_cap_k, v_rc] * weight[v_rh, v_rw, v_cap_k, v_cap_j, v_rc, v_co] for ax0, ax1, ax2, ax3, ax4, ax5 in T.grid(1, 1, 2, 4, 1, 16): with T.block("conv2d_capsule_nhwijc_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(8, h_0 * 4 + h_1 + ax1) v2 = T.axis.spatial(8, w_1 * 2 + ax2) v3 = T.axis.spatial(4, ax3) v4 = T.axis.spatial(4, cap_j_1 + ax4) v5 = T.axis.spatial(32, co_1 * 16 + ax5) T.reads(conv2d_capsule_nhwijc_global[v0, v1, v2, v3, v4, v5]) T.writes(conv2d_capsule_nhwijc[v0, v1, v2, v3, v4, v5]) conv2d_capsule_nhwijc[v0, v1, v2, v3, v4, v5] = conv2d_capsule_nhwijc_global[v0, v1, v2, v3, v4, v5] @T.prim_func def cap_1(inputs: T.Buffer((1, 16, 16, 4, 4, 32), "float32"), weight: T.Buffer((3, 3, 4, 4, 32, 32), "float32"), conv2d_capsule_nhwijc: T.Buffer((1, 8, 8, 4, 4, 32), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 0, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 18, 18, 4, 4, 32)) conv2d_capsule_nhwijc_global = T.alloc_buffer((1, 8, 8, 4, 4, 32)) for n_0, h_0, w_0, cap_i_0, cap_j_0, co_0 in T.grid(1, 2, 1, 1, 1, 1): for n_1, h_1, w_1, cap_i_1, cap_j_1, co_1 in T.grid(1, 4, 4, 1, 4, 2): for ax0, ax1, ax2, ax3, ax4, ax5 in T.grid(1, 3, 5, 4, 4, 32): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(18, h_0 * 8 + h_1 * 2 + ax1) v_i2 = T.axis.spatial(18, w_1 * 4 + ax2) v_i3, v_i4, v_i5 = T.axis.remap("SSS", [ax3, ax4, ax5]) T.reads(inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3, v_i4, v_i5]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3, v_i4, v_i5]) PadInput[v_i0, v_i1, v_i2, v_i3, v_i4, v_i5] = T.if_then_else(1 <= v_i1 and v_i1 < 17 and 1 <= v_i2 and v_i2 < 17, inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3, v_i4, v_i5], T.float32(0)) for rh_0, rw_0, cap_k_0, rc_0, n_2, h_2, w_2, cap_i_2, cap_j_2, co_2, rh_1, rw_1, cap_k_1, rc_1, n_3, h_3, w_3, cap_i_3, cap_j_3, co_3 in T.grid(1, 3, 4, 1, 1, 1, 2, 1, 1, 1, 3, 1, 1, 32, 1, 1, 1, 4, 1, 16): with T.block("conv2d_capsule_nhwijc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(8, h_0 * 4 + h_1 + h_2 + h_3) v_w = T.axis.spatial(8, w_0 * 8 + w_1 * 2 + w_2 + w_3) v_cap_i = T.axis.spatial(4, cap_i_0 * 4 + cap_i_1 * 4 + cap_i_2 * 4 + cap_i_3) v_cap_j = T.axis.spatial(4, cap_j_0 * 4 + cap_j_1 + cap_j_2 + cap_j_3) v_co = T.axis.spatial(32, co_0 * 32 + co_1 * 16 + co_2 * 16 + co_3) v_rh = T.axis.reduce(3, rh_0 * 3 + rh_1) v_rw = T.axis.reduce(3, rw_0 + rw_1) v_cap_k = T.axis.reduce(4, cap_k_0 + cap_k_1) v_rc = T.axis.reduce(32, rc_0 * 32 + rc_1) T.reads(PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_cap_i, v_cap_k, v_rc], weight[v_rh, v_rw, v_cap_k, v_cap_j, v_rc, v_co]) T.writes(conv2d_capsule_nhwijc_global[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_capsule_nhwijc_global[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co] = T.float32(0) conv2d_capsule_nhwijc_global[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co] = conv2d_capsule_nhwijc_global[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co] + PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_cap_i, v_cap_k, v_rc] * weight[v_rh, v_rw, v_cap_k, v_cap_j, v_rc, v_co] for ax0, ax1, ax2, ax3, ax4, ax5 in T.grid(1, 4, 8, 4, 4, 32): with T.block("conv2d_capsule_nhwijc_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(8, h_0 * 4 + ax1) v2, v3, v4, v5 = T.axis.remap("SSSS", [ax2, ax3, ax4, ax5]) T.reads(conv2d_capsule_nhwijc_global[v0, v1, v2, v3, v4, v5]) T.writes(conv2d_capsule_nhwijc[v0, v1, v2, v3, v4, v5]) conv2d_capsule_nhwijc[v0, v1, v2, v3, v4, v5] = conv2d_capsule_nhwijc_global[v0, v1, v2, v3, v4, v5] @T.prim_func def cap_2(inputs: T.Buffer((1, 16, 16, 4, 4, 32), "float32"), weight: T.Buffer((3, 3, 4, 4, 32, 32), "float32"), conv2d_capsule_nhwijc: T.Buffer((1, 8, 8, 4, 4, 32), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 16, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 18, 18, 4, 4, 32)) for i0, i1, i2, i3, i4, i5 in T.grid(1, 18, 18, 4, 4, 32): with T.block("PadInput"): v_i0, v_i1, v_i2, v_i3, v_i4, v_i5 = T.axis.remap("SSSSSS", [i0, i1, i2, i3, i4, i5]) T.reads(inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3, v_i4, v_i5]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3, v_i4, v_i5]) PadInput[v_i0, v_i1, v_i2, v_i3, v_i4, v_i5] = T.if_then_else(1 <= v_i1 and v_i1 < 17 and 1 <= v_i2 and v_i2 < 17, inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3, v_i4, v_i5], T.float32(0)) for n_0, h_0, w_0, cap_i_0, cap_j_0, co_0, n_1, h_1, w_1, cap_i_1, cap_j_1, co_1, rh_0, rw_0, cap_k_0, rc_0, n_2, h_2, w_2, cap_i_2, cap_j_2, co_2, rh_1, rw_1, cap_k_1, rc_1, n_3, h_3, w_3, cap_i_3, cap_j_3, co_3 in T.grid(1, 2, 1, 1, 1, 1, 1, 4, 4, 1, 4, 2, 1, 3, 4, 1, 1, 1, 2, 1, 1, 1, 3, 1, 1, 32, 1, 1, 1, 4, 1, 16): with T.block("conv2d_capsule_nhwijc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(8, h_0 * 4 + h_1 + h_2 + h_3) v_w = T.axis.spatial(8, w_0 * 8 + w_1 * 2 + w_2 + w_3) v_cap_i = T.axis.spatial(4, cap_i_0 * 4 + cap_i_1 * 4 + cap_i_2 * 4 + cap_i_3) v_cap_j = T.axis.spatial(4, cap_j_0 * 4 + cap_j_1 + cap_j_2 + cap_j_3) v_co = T.axis.spatial(32, co_0 * 32 + co_1 * 16 + co_2 * 16 + co_3) v_rh = T.axis.reduce(3, rh_0 * 3 + rh_1) v_rw = T.axis.reduce(3, rw_0 + rw_1) v_cap_k = T.axis.reduce(4, cap_k_0 + cap_k_1) v_rc = T.axis.reduce(32, rc_0 * 32 + rc_1) T.reads(PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_cap_i, v_cap_k, v_rc], weight[v_rh, v_rw, v_cap_k, v_cap_j, v_rc, v_co]) T.writes(conv2d_capsule_nhwijc[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_capsule_nhwijc[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co] = T.float32(0) conv2d_capsule_nhwijc[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co] = conv2d_capsule_nhwijc[v_n, v_h, v_w, v_cap_i, v_cap_j, v_co] + PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_cap_i, v_cap_k, v_rc] * weight[v_rh, v_rw, v_cap_k, v_cap_j, v_rc, v_co] # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [2, 4, 1, 1]), ("SamplePerfectTile", [1, 4, 2, 1]), ("SamplePerfectTile", [1, 1, 1, 4]), ("SamplePerfectTile", [1, 4, 1, 1]), ("SamplePerfectTile", [1, 2, 1, 16]), ("SamplePerfectTile", [1, 3]), ("SamplePerfectTile", [3, 1]), ("SamplePerfectTile", [4, 1]), ("SamplePerfectTile", [1, 32]), ("SampleCategorical", 0), ("SampleComputeLocation", 7), ] decision_1 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [2, 4, 1, 1]), ("SamplePerfectTile", [1, 4, 2, 1]), ("SamplePerfectTile", [1, 1, 1, 4]), ("SamplePerfectTile", [1, 4, 1, 1]), ("SamplePerfectTile", [1, 2, 1, 16]), ("SamplePerfectTile", [1, 3]), ("SamplePerfectTile", [3, 1]), ("SamplePerfectTile", [4, 1]), ("SamplePerfectTile", [1, 32]), ("SampleCategorical", 0), ("SampleComputeLocation", 11), ] decision_2 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [2, 4, 1, 1]), ("SamplePerfectTile", [1, 4, 2, 1]), ("SamplePerfectTile", [1, 1, 1, 4]), ("SamplePerfectTile", [1, 4, 1, 1]), ("SamplePerfectTile", [1, 2, 1, 16]), ("SamplePerfectTile", [1, 3]), ("SamplePerfectTile", [3, 1]), ("SamplePerfectTile", [4, 1]), ("SamplePerfectTile", [1, 32]), ("SampleCategorical", 1), ("SampleComputeLocation", -1), ] mod = create_te_workload("CAP", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[cap_0, cap_1, cap_2], expected_decisions=[decision_0, decision_1, decision_2], ) def test_cpu_dep(): # fmt: off @T.prim_func def dep_0(placeholder: T.Buffer((1, 112, 112, 32), "float32"), placeholder_1: T.Buffer((1, 3, 3, 32), "float32"), depth_conv2d_nhwc: T.Buffer((1, 112, 112, 32), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 64, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 114, 114, 32)) depth_conv2d_nhwc_global = T.alloc_buffer((1, 112, 112, 32)) for i0, i1, i2, i3 in T.grid(1, 114, 114, 32): with T.block("PadInput"): v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(placeholder[v_i0, v_i1 - 1, v_i2 - 1, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(1 <= v_i1 and v_i1 < 113 and 1 <= v_i2 and v_i2 < 113, placeholder[v_i0, v_i1 - 1, v_i2 - 1, v_i3], T.float32(0)) for n_0, h_0, w_0, c_0, n_1, h_1, w_1, c_1 in T.grid(1, 1, 1, 1, 1, 4, 4, 8): for rh_0, rw_0, n_2, h_2, w_2, c_2, rh_1, rw_1, n_3, h_3, w_3, c_3 in T.grid(1, 1, 1, 2, 7, 2, 3, 3, 1, 14, 4, 2): with T.block("depth_conv2d_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(112, h_0 * 112 + h_1 * 28 + h_2 * 14 + h_3) v_w = T.axis.spatial(112, w_0 * 112 + w_1 * 28 + w_2 * 4 + w_3) v_c = T.axis.spatial(32, c_0 * 32 + c_1 * 4 + c_2 * 2 + c_3) v_rh = T.axis.reduce(3, rh_0 * 3 + rh_1) v_rw = T.axis.reduce(3, rw_0 * 3 + rw_1) T.reads(PadInput[v_n, v_h + v_rh, v_w + v_rw, v_c], placeholder_1[0, v_rh, v_rw, v_c]) T.writes(depth_conv2d_nhwc_global[v_n, v_h, v_w, v_c]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): depth_conv2d_nhwc_global[v_n, v_h, v_w, v_c] = T.float32(0) depth_conv2d_nhwc_global[v_n, v_h, v_w, v_c] = depth_conv2d_nhwc_global[v_n, v_h, v_w, v_c] + PadInput[v_n, v_h + v_rh, v_w + v_rw, v_c] * placeholder_1[0, v_rh, v_rw, v_c] for ax0, ax1, ax2, ax3 in T.grid(1, 28, 28, 4): with T.block("depth_conv2d_nhwc_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(112, h_1 * 28 + ax1) v2 = T.axis.spatial(112, w_1 * 28 + ax2) v3 = T.axis.spatial(32, c_1 * 4 + ax3) T.reads(depth_conv2d_nhwc_global[v0, v1, v2, v3]) T.writes(depth_conv2d_nhwc[v0, v1, v2, v3]) depth_conv2d_nhwc[v0, v1, v2, v3] = depth_conv2d_nhwc_global[v0, v1, v2, v3] @T.prim_func def dep_1(placeholder: T.Buffer((1, 112, 112, 32), "float32"), placeholder_1: T.Buffer((1, 3, 3, 32), "float32"), depth_conv2d_nhwc: T.Buffer((1, 112, 112, 32), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 16, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 114, 114, 32)) depth_conv2d_nhwc_global = T.alloc_buffer((1, 112, 112, 32)) for i0, i1, i2, i3 in T.grid(1, 114, 114, 32): with T.block("PadInput"): v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(placeholder[v_i0, v_i1 - 1, v_i2 - 1, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(1 <= v_i1 and v_i1 < 113 and 1 <= v_i2 and v_i2 < 113, placeholder[v_i0, v_i1 - 1, v_i2 - 1, v_i3], T.float32(0)) for n_0, h_0, w_0, c_0 in T.grid(1, 1, 1, 1): for n_1, h_1, w_1, c_1, rh_0, rw_0, n_2, h_2, w_2, c_2, rh_1, rw_1, n_3, h_3, w_3, c_3 in T.grid(1, 4, 4, 8, 1, 1, 1, 2, 7, 2, 3, 3, 1, 14, 4, 2): with T.block("depth_conv2d_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(112, h_0 * 112 + h_1 * 28 + h_2 * 14 + h_3) v_w = T.axis.spatial(112, w_0 * 112 + w_1 * 28 + w_2 * 4 + w_3) v_c = T.axis.spatial(32, c_0 * 32 + c_1 * 4 + c_2 * 2 + c_3) v_rh = T.axis.reduce(3, rh_0 * 3 + rh_1) v_rw = T.axis.reduce(3, rw_0 * 3 + rw_1) T.reads(PadInput[v_n, v_h + v_rh, v_w + v_rw, v_c], placeholder_1[0, v_rh, v_rw, v_c]) T.writes(depth_conv2d_nhwc_global[v_n, v_h, v_w, v_c]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): depth_conv2d_nhwc_global[v_n, v_h, v_w, v_c] = T.float32(0) depth_conv2d_nhwc_global[v_n, v_h, v_w, v_c] = depth_conv2d_nhwc_global[v_n, v_h, v_w, v_c] + PadInput[v_n, v_h + v_rh, v_w + v_rw, v_c] * placeholder_1[0, v_rh, v_rw, v_c] for ax0, ax1, ax2, ax3 in T.grid(1, 112, 112, 32): with T.block("depth_conv2d_nhwc_global"): v0, v1, v2, v3 = T.axis.remap("SSSS", [ax0, ax1, ax2, ax3]) T.reads(depth_conv2d_nhwc_global[v0, v1, v2, v3]) T.writes(depth_conv2d_nhwc[v0, v1, v2, v3]) depth_conv2d_nhwc[v0, v1, v2, v3] = depth_conv2d_nhwc_global[v0, v1, v2, v3] @T.prim_func def dep_2(placeholder: T.Buffer((1, 112, 112, 32), "float32"), placeholder_1: T.Buffer((1, 3, 3, 32), "float32"), depth_conv2d_nhwc: T.Buffer((1, 112, 112, 32), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 0, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 114, 114, 32)) for n_0, h_0, w_0, c_0, n_1, h_1 in T.grid(1, 1, 1, 1, 1, 4): for ax0, ax1, ax2, ax3 in T.grid(1, 30, 114, 32): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(114, h_1 * 28 + ax1) v_i2, v_i3 = T.axis.remap("SS", [ax2, ax3]) T.reads(placeholder[v_i0, v_i1 - 1, v_i2 - 1, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(1 <= v_i1 and v_i1 < 113 and 1 <= v_i2 and v_i2 < 113, placeholder[v_i0, v_i1 - 1, v_i2 - 1, v_i3], T.float32(0)) for w_1, c_1, rh_0, rw_0, n_2, h_2, w_2, c_2, rh_1, rw_1, n_3, h_3, w_3, c_3 in T.grid(4, 8, 1, 1, 1, 2, 7, 2, 3, 3, 1, 14, 4, 2): with T.block("depth_conv2d_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(112, h_0 * 112 + h_1 * 28 + h_2 * 14 + h_3) v_w = T.axis.spatial(112, w_0 * 112 + w_1 * 28 + w_2 * 4 + w_3) v_c = T.axis.spatial(32, c_0 * 32 + c_1 * 4 + c_2 * 2 + c_3) v_rh = T.axis.reduce(3, rh_0 * 3 + rh_1) v_rw = T.axis.reduce(3, rw_0 * 3 + rw_1) T.reads(PadInput[v_n, v_h + v_rh, v_w + v_rw, v_c], placeholder_1[0, v_rh, v_rw, v_c]) T.writes(depth_conv2d_nhwc[v_n, v_h, v_w, v_c]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): depth_conv2d_nhwc[v_n, v_h, v_w, v_c] = T.float32(0) depth_conv2d_nhwc[v_n, v_h, v_w, v_c] = depth_conv2d_nhwc[v_n, v_h, v_w, v_c] + PadInput[v_n, v_h + v_rh, v_w + v_rw, v_c] * placeholder_1[0, v_rh, v_rw, v_c] # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [1, 4, 2, 14]), ("SamplePerfectTile", [1, 4, 7, 4]), ("SamplePerfectTile", [1, 8, 2, 2]), ("SamplePerfectTile", [1, 3]), ("SamplePerfectTile", [1, 3]), ("SampleCategorical", 2), ("SampleComputeLocation", -1), ] decision_1 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [1, 4, 2, 14]), ("SamplePerfectTile", [1, 4, 7, 4]), ("SamplePerfectTile", [1, 8, 2, 2]), ("SamplePerfectTile", [1, 3]), ("SamplePerfectTile", [1, 3]), ("SampleCategorical", 1), ("SampleComputeLocation", -1), ] decision_2 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [1, 4, 2, 14]), ("SamplePerfectTile", [1, 4, 7, 4]), ("SamplePerfectTile", [1, 8, 2, 2]), ("SamplePerfectTile", [1, 3]), ("SamplePerfectTile", [1, 3]), ("SampleCategorical", 0), ("SampleComputeLocation", 5), ] mod = create_te_workload("DEP", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[dep_0, dep_1, dep_2], expected_decisions=[decision_0, decision_1, decision_2], ) def test_cpu_dil(): # fmt: off @T.prim_func def dil_0(inputs: T.Buffer((1, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 3, 64), "float32"), conv2d_nhwc: T.Buffer((1, 109, 109, 64), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 64, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 230, 230, 3)) conv2d_nhwc_global = T.alloc_buffer((1, 109, 109, 64)) for n_0, h_0, w_0, co_0, n_1, h_1, w_1, co_1 in T.grid(1, 109, 1, 4, 1, 1, 1, 2): for ax0, ax1, ax2, ax3 in T.grid(1, 13, 229, 3): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(230, h_0 * 2 + ax1) v_i2 = T.axis.spatial(230, ax2) v_i3 = T.axis.spatial(3, ax3) T.reads(inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(3 <= v_i1 and v_i1 < 227 and 3 <= v_i2 and v_i2 < 227, inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3], T.float32(0)) for rh_0, rw_0, rc_0, n_2, h_2, w_2, co_2, rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3 in T.grid(7, 1, 1, 1, 1, 109, 8, 1, 7, 3, 1, 1, 1, 1): with T.block("conv2d_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(109, h_0 + h_1 + h_2 + h_3) v_w = T.axis.spatial(109, w_0 * 109 + w_1 * 109 + w_2 + w_3) v_co = T.axis.spatial(64, co_0 * 16 + co_1 * 8 + co_2 + co_3) v_rh = T.axis.reduce(7, rh_0 + rh_1) v_rw = T.axis.reduce(7, rw_0 * 7 + rw_1) v_rc = T.axis.reduce(3, rc_0 * 3 + rc_1) T.reads(PadInput[v_n, v_h * 2 + v_rh * 2, v_w * 2 + v_rw * 2, v_co // 64 * 3 + v_rc], weight[v_rh, v_rw, v_rc, v_co]) T.writes(conv2d_nhwc_global[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_nhwc_global[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_nhwc_global[v_n, v_h, v_w, v_co] = conv2d_nhwc_global[v_n, v_h, v_w, v_co] + PadInput[v_n, v_h * 2 + v_rh * 2, v_w * 2 + v_rw * 2, v_co // 64 * 3 + v_rc] * weight[v_rh, v_rw, v_rc, v_co] for ax0, ax1, ax2, ax3 in T.grid(1, 1, 109, 8): with T.block("conv2d_nhwc_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(109, h_0 + ax1) v2 = T.axis.spatial(109, ax2) v3 = T.axis.spatial(64, co_0 * 16 + co_1 * 8 + ax3) T.reads(conv2d_nhwc_global[v0, v1, v2, v3]) T.writes(conv2d_nhwc[v0, v1, v2, v3]) conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3] @T.prim_func def dil_1(inputs: T.Buffer((1, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 3, 64), "float32"), conv2d_nhwc: T.Buffer((1, 109, 109, 64), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 0, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 230, 230, 3)) conv2d_nhwc_global = T.alloc_buffer((1, 109, 109, 64)) for n_0, h_0, w_0, co_0 in T.grid(1, 109, 1, 4): for n_1, h_1, w_1, co_1, rh_0 in T.grid(1, 1, 1, 2, 7): for ax0, ax1, ax2, ax3 in T.grid(1, 1, 229, 3): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(230, h_0 * 2 + rh_0 * 2 + ax1) v_i2 = T.axis.spatial(230, ax2) v_i3 = T.axis.spatial(3, ax3) T.reads(inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(3 <= v_i1 and v_i1 < 227 and 3 <= v_i2 and v_i2 < 227, inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3], T.float32(0)) for rw_0, rc_0, n_2, h_2, w_2, co_2, rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3 in T.grid(1, 1, 1, 1, 109, 8, 1, 7, 3, 1, 1, 1, 1): with T.block("conv2d_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(109, h_0 + h_1 + h_2 + h_3) v_w = T.axis.spatial(109, w_0 * 109 + w_1 * 109 + w_2 + w_3) v_co = T.axis.spatial(64, co_0 * 16 + co_1 * 8 + co_2 + co_3) v_rh = T.axis.reduce(7, rh_0 + rh_1) v_rw = T.axis.reduce(7, rw_0 * 7 + rw_1) v_rc = T.axis.reduce(3, rc_0 * 3 + rc_1) T.reads(PadInput[v_n, v_h * 2 + v_rh * 2, v_w * 2 + v_rw * 2, v_co // 64 * 3 + v_rc], weight[v_rh, v_rw, v_rc, v_co]) T.writes(conv2d_nhwc_global[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_nhwc_global[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_nhwc_global[v_n, v_h, v_w, v_co] = conv2d_nhwc_global[v_n, v_h, v_w, v_co] + PadInput[v_n, v_h * 2 + v_rh * 2, v_w * 2 + v_rw * 2, v_co // 64 * 3 + v_rc] * weight[v_rh, v_rw, v_rc, v_co] for ax0, ax1, ax2, ax3 in T.grid(1, 1, 109, 16): with T.block("conv2d_nhwc_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(109, h_0 + ax1) v2 = T.axis.spatial(109, ax2) v3 = T.axis.spatial(64, co_0 * 16 + ax3) T.reads(conv2d_nhwc_global[v0, v1, v2, v3]) T.writes(conv2d_nhwc[v0, v1, v2, v3]) conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3] @T.prim_func def dil_2(inputs: T.Buffer((1, 224, 224, 3), "float32"), weight: T.Buffer((7, 7, 3, 64), "float32"), conv2d_nhwc: T.Buffer((1, 109, 109, 64), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 0, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 230, 230, 3)) for n_0, h_0 in T.grid(1, 109): for ax0, ax1, ax2, ax3 in T.grid(1, 13, 229, 3): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(230, h_0 * 2 + ax1) v_i2 = T.axis.spatial(230, ax2) v_i3 = T.axis.spatial(3, ax3) T.reads(inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(3 <= v_i1 and v_i1 < 227 and 3 <= v_i2 and v_i2 < 227, inputs[v_i0, v_i1 - 3, v_i2 - 3, v_i3], T.float32(0)) for w_0, co_0, n_1, h_1, w_1, co_1, rh_0, rw_0, rc_0, n_2, h_2, w_2, co_2, rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3 in T.grid(1, 4, 1, 1, 1, 2, 7, 1, 1, 1, 1, 109, 8, 1, 7, 3, 1, 1, 1, 1): with T.block("conv2d_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(109, h_0 + h_1 + h_2 + h_3) v_w = T.axis.spatial(109, w_0 * 109 + w_1 * 109 + w_2 + w_3) v_co = T.axis.spatial(64, co_0 * 16 + co_1 * 8 + co_2 + co_3) v_rh = T.axis.reduce(7, rh_0 + rh_1) v_rw = T.axis.reduce(7, rw_0 * 7 + rw_1) v_rc = T.axis.reduce(3, rc_0 * 3 + rc_1) T.reads(PadInput[v_n, v_h * 2 + v_rh * 2, v_w * 2 + v_rw * 2, v_co // 64 * 3 + v_rc], weight[v_rh, v_rw, v_rc, v_co]) T.writes(conv2d_nhwc[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_nhwc[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_nhwc[v_n, v_h, v_w, v_co] = conv2d_nhwc[v_n, v_h, v_w, v_co] + PadInput[v_n, v_h * 2 + v_rh * 2, v_w * 2 + v_rw * 2, v_co // 64 * 3 + v_rc] * weight[v_rh, v_rw, v_rc, v_co] # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [109, 1, 1, 1]), ("SamplePerfectTile", [1, 1, 109, 1]), ("SamplePerfectTile", [4, 2, 8, 1]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [1, 7]), ("SamplePerfectTile", [1, 3]), ("SampleCategorical", 2), ("SampleComputeLocation", 7), ] decision_1 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [109, 1, 1, 1]), ("SamplePerfectTile", [1, 1, 109, 1]), ("SamplePerfectTile", [4, 2, 8, 1]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [1, 7]), ("SamplePerfectTile", [1, 3]), ("SampleCategorical", 0), ("SampleComputeLocation", 8), ] decision_2 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [109, 1, 1, 1]), ("SamplePerfectTile", [1, 1, 109, 1]), ("SamplePerfectTile", [4, 2, 8, 1]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [1, 7]), ("SamplePerfectTile", [1, 3]), ("SampleCategorical", 0), ("SampleComputeLocation", 1), ] mod = create_te_workload("DIL", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[dil_0, dil_1, dil_2], expected_decisions=[decision_0, decision_1, decision_2], ) def test_cpu_gmm(): # fmt: off @T.prim_func def gmm_0(X: T.Buffer((1, 128, 128), "float32"), Y: T.Buffer((1, 128, 128), "float32"), Z: T.Buffer((1, 128, 128), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 16, "meta_schedule.vectorize": 64}) Z_global = T.alloc_buffer((1, 128, 128)) for b_0, i_0, j_0, b_1, i_1, j_1 in T.grid(1, 4, 2, 1, 1, 8): for k_0, b_2, i_2, j_2, k_1, b_3, i_3, j_3 in T.grid(128, 1, 16, 1, 1, 1, 2, 8): with T.block("Z"): v_b = T.axis.spatial(1, b_0 + b_1 + b_2 + b_3) v_i = T.axis.spatial(128, i_0 * 32 + i_1 * 32 + i_2 * 2 + i_3) v_j = T.axis.spatial(128, j_0 * 64 + j_1 * 8 + j_2 * 8 + j_3) v_k = T.axis.reduce(128, k_0 + k_1) T.reads(X[v_b, v_i, v_k], Y[v_b, v_k, v_j]) T.writes(Z_global[v_b, v_i, v_j]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): Z_global[v_b, v_i, v_j] = T.float32(0) Z_global[v_b, v_i, v_j] = Z_global[v_b, v_i, v_j] + X[v_b, v_i, v_k] * Y[v_b, v_k, v_j] for ax0, ax1, ax2 in T.grid(1, 32, 8): with T.block("Z_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(128, i_0 * 32 + ax1) v2 = T.axis.spatial(128, j_0 * 64 + j_1 * 8 + ax2) T.reads(Z_global[v0, v1, v2]) T.writes(Z[v0, v1, v2]) Z[v0, v1, v2] = Z_global[v0, v1, v2] @T.prim_func def gmm_1(X: T.Buffer((1, 128, 128), "float32"), Y: T.Buffer((1, 128, 128), "float32"), Z: T.Buffer((1, 128, 128), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 16, "meta_schedule.vectorize": 64}) Z_global = T.alloc_buffer((1, 128, 128)) for b_0, i_0, j_0 in T.grid(1, 4, 2): for b_1, i_1, j_1, k_0, b_2, i_2, j_2, k_1, b_3, i_3, j_3 in T.grid(1, 1, 8, 128, 1, 16, 1, 1, 1, 2, 8): with T.block("Z"): v_b = T.axis.spatial(1, b_0 + b_1 + b_2 + b_3) v_i = T.axis.spatial(128, i_0 * 32 + i_1 * 32 + i_2 * 2 + i_3) v_j = T.axis.spatial(128, j_0 * 64 + j_1 * 8 + j_2 * 8 + j_3) v_k = T.axis.reduce(128, k_0 + k_1) T.reads(X[v_b, v_i, v_k], Y[v_b, v_k, v_j]) T.writes(Z_global[v_b, v_i, v_j]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): Z_global[v_b, v_i, v_j] = T.float32(0) Z_global[v_b, v_i, v_j] = Z_global[v_b, v_i, v_j] + X[v_b, v_i, v_k] * Y[v_b, v_k, v_j] for ax0, ax1, ax2 in T.grid(1, 32, 64): with T.block("Z_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(128, i_0 * 32 + ax1) v2 = T.axis.spatial(128, j_0 * 64 + ax2) T.reads(Z_global[v0, v1, v2]) T.writes(Z[v0, v1, v2]) Z[v0, v1, v2] = Z_global[v0, v1, v2] @T.prim_func def gmm_2(X: T.Buffer((1, 128, 128), "float32"), Y: T.Buffer((1, 128, 128), "float32"), Z: T.Buffer((1, 128, 128), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 16, "meta_schedule.vectorize": 64}) for b_0, i_0, j_0, b_1, i_1, j_1, k_0, b_2, i_2, j_2, k_1, b_3, i_3, j_3 in T.grid(1, 4, 2, 1, 1, 8, 128, 1, 16, 1, 1, 1, 2, 8): with T.block("Z"): v_b = T.axis.spatial(1, b_0 + b_1 + b_2 + b_3) v_i = T.axis.spatial(128, i_0 * 32 + i_1 * 32 + i_2 * 2 + i_3) v_j = T.axis.spatial(128, j_0 * 64 + j_1 * 8 + j_2 * 8 + j_3) v_k = T.axis.reduce(128, k_0 + k_1) T.reads(X[v_b, v_i, v_k], Y[v_b, v_k, v_j]) T.writes(Z[v_b, v_i, v_j]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): Z[v_b, v_i, v_j] = T.float32(0) Z[v_b, v_i, v_j] = Z[v_b, v_i, v_j] + X[v_b, v_i, v_k] * Y[v_b, v_k, v_j] # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [4, 1, 16, 2]), ("SamplePerfectTile", [2, 8, 1, 8]), ("SamplePerfectTile", [128, 1]), ("SampleCategorical", 1), ] decision_1 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [4, 1, 16, 2]), ("SamplePerfectTile", [2, 8, 1, 8]), ("SamplePerfectTile", [128, 1]), ("SampleCategorical", 1), ] decision_2 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [4, 1, 16, 2]), ("SamplePerfectTile", [2, 8, 1, 8]), ("SamplePerfectTile", [128, 1]), ("SampleCategorical", 1), ] mod = create_te_workload("GMM", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[gmm_0, gmm_1, gmm_2], expected_decisions=[decision_0, decision_1, decision_2], ) def test_cpu_grp(): # fmt: off @T.prim_func def grp_0(inputs: T.Buffer((1, 56, 56, 64), "float32"), weight: T.Buffer((3, 3, 16, 128), "float32"), conv2d_nhwc: T.Buffer((1, 28, 28, 128), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 16, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 58, 58, 64)) conv2d_nhwc_global = T.alloc_buffer((1, 28, 28, 128)) for n_0, h_0, w_0, co_0 in T.grid(1, 7, 1, 2): for ax0, ax1, ax2, ax3 in T.grid(1, 9, 57, 32): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(58, h_0 * 8 + ax1) v_i2 = T.axis.spatial(58, ax2) v_i3 = T.axis.spatial(64, co_0 * 32 + ax3) T.reads(inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(1 <= v_i1 and v_i1 < 57 and 1 <= v_i2 and v_i2 < 57, inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3], T.float32(0)) for n_1, h_1, w_1, co_1 in T.grid(1, 4, 1, 1): for rh_0, rw_0, rc_0, n_2, h_2, w_2, co_2, rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3 in T.grid(1, 3, 8, 1, 1, 4, 4, 3, 1, 2, 1, 1, 7, 16): with T.block("conv2d_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(28, h_0 * 4 + h_1 + h_2 + h_3) v_w = T.axis.spatial(28, w_0 * 28 + w_1 * 28 + w_2 * 7 + w_3) v_co = T.axis.spatial(128, co_0 * 64 + co_1 * 64 + co_2 * 16 + co_3) v_rh = T.axis.reduce(3, rh_0 * 3 + rh_1) v_rw = T.axis.reduce(3, rw_0 + rw_1) v_rc = T.axis.reduce(16, rc_0 * 2 + rc_1) T.reads(PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 32 * 16 + v_rc], weight[v_rh, v_rw, v_rc, v_co]) T.writes(conv2d_nhwc_global[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_nhwc_global[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_nhwc_global[v_n, v_h, v_w, v_co] = conv2d_nhwc_global[v_n, v_h, v_w, v_co] + PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 32 * 16 + v_rc] * weight[v_rh, v_rw, v_rc, v_co] for ax0, ax1, ax2, ax3 in T.grid(1, 1, 28, 64): with T.block("conv2d_nhwc_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(28, h_0 * 4 + h_1 + ax1) v2 = T.axis.spatial(28, ax2) v3 = T.axis.spatial(128, co_0 * 64 + ax3) T.reads(conv2d_nhwc_global[v0, v1, v2, v3]) T.writes(conv2d_nhwc[v0, v1, v2, v3]) conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3] @T.prim_func def grp_1(inputs: T.Buffer((1, 56, 56, 64), "float32"), weight: T.Buffer((3, 3, 16, 128), "float32"), conv2d_nhwc: T.Buffer((1, 28, 28, 128), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 512, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 58, 58, 64)) conv2d_nhwc_global = T.alloc_buffer((1, 28, 28, 128)) for i0, i1, i2, i3 in T.grid(1, 58, 58, 64): with T.block("PadInput"): v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(1 <= v_i1 and v_i1 < 57 and 1 <= v_i2 and v_i2 < 57, inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3], T.float32(0)) for n_0, h_0, w_0, co_0 in T.grid(1, 7, 1, 2): for n_1, h_1, w_1, co_1, rh_0, rw_0, rc_0, n_2, h_2, w_2, co_2, rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3 in T.grid(1, 4, 1, 1, 1, 3, 8, 1, 1, 4, 4, 3, 1, 2, 1, 1, 7, 16): with T.block("conv2d_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(28, h_0 * 4 + h_1 + h_2 + h_3) v_w = T.axis.spatial(28, w_0 * 28 + w_1 * 28 + w_2 * 7 + w_3) v_co = T.axis.spatial(128, co_0 * 64 + co_1 * 64 + co_2 * 16 + co_3) v_rh = T.axis.reduce(3, rh_0 * 3 + rh_1) v_rw = T.axis.reduce(3, rw_0 + rw_1) v_rc = T.axis.reduce(16, rc_0 * 2 + rc_1) T.reads(PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 32 * 16 + v_rc], weight[v_rh, v_rw, v_rc, v_co]) T.writes(conv2d_nhwc_global[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_nhwc_global[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_nhwc_global[v_n, v_h, v_w, v_co] = conv2d_nhwc_global[v_n, v_h, v_w, v_co] + PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 32 * 16 + v_rc] * weight[v_rh, v_rw, v_rc, v_co] for ax0, ax1, ax2, ax3 in T.grid(1, 4, 28, 64): with T.block("conv2d_nhwc_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(28, h_0 * 4 + ax1) v2 = T.axis.spatial(28, ax2) v3 = T.axis.spatial(128, co_0 * 64 + ax3) T.reads(conv2d_nhwc_global[v0, v1, v2, v3]) T.writes(conv2d_nhwc[v0, v1, v2, v3]) conv2d_nhwc[v0, v1, v2, v3] = conv2d_nhwc_global[v0, v1, v2, v3] @T.prim_func def grp_2(inputs: T.Buffer((1, 56, 56, 64), "float32"), weight: T.Buffer((3, 3, 16, 128), "float32"), conv2d_nhwc: T.Buffer((1, 28, 28, 128), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 16, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 58, 58, 64)) for n_0, h_0, w_0, co_0, n_1, h_1, w_1, co_1, rh_0, rw_0 in T.grid(1, 7, 1, 2, 1, 4, 1, 1, 1, 3): for ax0, ax1, ax2, ax3 in T.grid(1, 3, 55, 32): with T.block("PadInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(58, h_0 * 8 + h_1 * 2 + ax1) v_i2 = T.axis.spatial(58, rw_0 + ax2) v_i3 = T.axis.spatial(64, co_0 * 32 + ax3) T.reads(inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(1 <= v_i1 and v_i1 < 57 and 1 <= v_i2 and v_i2 < 57, inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3], T.float32(0)) for rc_0, n_2, h_2, w_2, co_2, rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3 in T.grid(8, 1, 1, 4, 4, 3, 1, 2, 1, 1, 7, 16): with T.block("conv2d_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(28, h_0 * 4 + h_1 + h_2 + h_3) v_w = T.axis.spatial(28, w_0 * 28 + w_1 * 28 + w_2 * 7 + w_3) v_co = T.axis.spatial(128, co_0 * 64 + co_1 * 64 + co_2 * 16 + co_3) v_rh = T.axis.reduce(3, rh_0 * 3 + rh_1) v_rw = T.axis.reduce(3, rw_0 + rw_1) v_rc = T.axis.reduce(16, rc_0 * 2 + rc_1) T.reads(PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 32 * 16 + v_rc], weight[v_rh, v_rw, v_rc, v_co]) T.writes(conv2d_nhwc[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_nhwc[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_nhwc[v_n, v_h, v_w, v_co] = conv2d_nhwc[v_n, v_h, v_w, v_co] + PadInput[v_n, v_h * 2 + v_rh, v_w * 2 + v_rw, v_co // 32 * 16 + v_rc] * weight[v_rh, v_rw, v_rc, v_co] # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [7, 4, 1, 1]), ("SamplePerfectTile", [1, 1, 4, 7]), ("SamplePerfectTile", [2, 1, 4, 16]), ("SamplePerfectTile", [1, 3]), ("SamplePerfectTile", [3, 1]), ("SamplePerfectTile", [8, 2]), ("SampleCategorical", 1), ("SampleComputeLocation", 3), ] decision_1 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [7, 4, 1, 1]), ("SamplePerfectTile", [1, 1, 4, 7]), ("SamplePerfectTile", [2, 1, 4, 16]), ("SamplePerfectTile", [1, 3]), ("SamplePerfectTile", [3, 1]), ("SamplePerfectTile", [8, 2]), ("SampleCategorical", 3), ("SampleComputeLocation", -1), ] decision_2 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [7, 4, 1, 1]), ("SamplePerfectTile", [1, 1, 4, 7]), ("SamplePerfectTile", [2, 1, 4, 16]), ("SamplePerfectTile", [1, 3]), ("SamplePerfectTile", [3, 1]), ("SamplePerfectTile", [8, 2]), ("SampleCategorical", 1), ("SampleComputeLocation", 9), ] mod = create_te_workload("GRP", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[grp_0, grp_1, grp_2], expected_decisions=[decision_0, decision_1, decision_2], ) def test_cpu_t2d(): # fmt: off @T.prim_func def t2d_0(inputs: T.Buffer((1, 4, 4, 512), "float32"), weight: T.Buffer((4, 4, 512, 256), "float32"), conv2d_transpose_nhwc: T.Buffer((1, 8, 8, 256), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 64, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 6, 6, 512)) conv2d_transpose_nhwc_global = T.alloc_buffer((1, 8, 8, 256)) for i0, i1, i2, i3 in T.grid(1, 6, 6, 512): with T.block("PadInput"): v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(1 <= v_i1 and v_i1 < 5 and 1 <= v_i2 and v_i2 < 5, inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3], T.float32(0)) for n_0, h_0, w_0, co_0, n_1, h_1, w_1, co_1 in T.grid(1, 1, 2, 8, 1, 4, 1, 4): for rh_0, rw_0, rc_0, n_2, h_2, w_2, co_2, rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3 in T.grid(2, 2, 64, 1, 1, 1, 1, 2, 2, 8, 1, 2, 4, 8): with T.block("conv2d_transpose_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(8, h_0 * 8 + h_1 * 2 + h_2 * 2 + h_3) v_w = T.axis.spatial(8, w_0 * 4 + w_1 * 4 + w_2 * 4 + w_3) v_co = T.axis.spatial(256, co_0 * 32 + co_1 * 8 + co_2 * 8 + co_3) v_rh = T.axis.reduce(4, rh_0 * 2 + rh_1) v_rw = T.axis.reduce(4, rw_0 * 2 + rw_1) v_rc = T.axis.reduce(512, rc_0 * 8 + rc_1) T.reads(PadInput[v_n, (v_h + v_rh) // 2, (v_w + v_rw) // 2, v_rc], weight[3 - v_rh, 3 - v_rw, v_rc, v_co]) T.writes(conv2d_transpose_nhwc_global[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_transpose_nhwc_global[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_transpose_nhwc_global[v_n, v_h, v_w, v_co] = conv2d_transpose_nhwc_global[v_n, v_h, v_w, v_co] + T.if_then_else((v_h + v_rh) % 2 == 0 and (v_w + v_rw) % 2 == 0, PadInput[v_n, (v_h + v_rh) // 2, (v_w + v_rw) // 2, v_rc], T.float32(0)) * weight[3 - v_rh, 3 - v_rw, v_rc, v_co] for ax0, ax1, ax2, ax3 in T.grid(1, 2, 4, 8): with T.block("conv2d_transpose_nhwc_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(8, h_1 * 2 + ax1) v2 = T.axis.spatial(8, w_0 * 4 + ax2) v3 = T.axis.spatial(256, co_0 * 32 + co_1 * 8 + ax3) T.reads(conv2d_transpose_nhwc_global[v0, v1, v2, v3]) T.writes(conv2d_transpose_nhwc[v0, v1, v2, v3]) conv2d_transpose_nhwc[v0, v1, v2, v3] = conv2d_transpose_nhwc_global[v0, v1, v2, v3] @T.prim_func def t2d_1(inputs: T.Buffer((1, 4, 4, 512), "float32"), weight: T.Buffer((4, 4, 512, 256), "float32"), conv2d_transpose_nhwc: T.Buffer((1, 8, 8, 256), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 64, "meta_schedule.vectorize": 64}) PadInput = T.alloc_buffer((1, 6, 6, 512)) conv2d_transpose_nhwc_global = T.alloc_buffer((1, 8, 8, 256)) for n_0, h_0, w_0, co_0 in T.grid(1, 1, 2, 8): for ax0, ax1, ax2, ax3 in T.grid(1, 6, 4, 512): with T.block("PadInput"): v_i0, v_i1 = T.axis.remap("SS", [ax0, ax1]) v_i2 = T.axis.spatial(6, w_0 * 2 + ax2) v_i3 = T.axis.spatial(512, ax3) T.reads(inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3]) T.writes(PadInput[v_i0, v_i1, v_i2, v_i3]) PadInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(1 <= v_i1 and v_i1 < 5 and 1 <= v_i2 and v_i2 < 5, inputs[v_i0, v_i1 - 1, v_i2 - 1, v_i3], T.float32(0)) for n_1, h_1, w_1, co_1, rh_0, rw_0, rc_0, n_2, h_2, w_2, co_2, rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3 in T.grid(1, 4, 1, 4, 2, 2, 64, 1, 1, 1, 1, 2, 2, 8, 1, 2, 4, 8): with T.block("conv2d_transpose_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(8, h_0 * 8 + h_1 * 2 + h_2 * 2 + h_3) v_w = T.axis.spatial(8, w_0 * 4 + w_1 * 4 + w_2 * 4 + w_3) v_co = T.axis.spatial(256, co_0 * 32 + co_1 * 8 + co_2 * 8 + co_3) v_rh = T.axis.reduce(4, rh_0 * 2 + rh_1) v_rw = T.axis.reduce(4, rw_0 * 2 + rw_1) v_rc = T.axis.reduce(512, rc_0 * 8 + rc_1) T.reads(PadInput[v_n, (v_h + v_rh) // 2, (v_w + v_rw) // 2, v_rc], weight[3 - v_rh, 3 - v_rw, v_rc, v_co]) T.writes(conv2d_transpose_nhwc_global[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_transpose_nhwc_global[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_transpose_nhwc_global[v_n, v_h, v_w, v_co] = conv2d_transpose_nhwc_global[v_n, v_h, v_w, v_co] + T.if_then_else((v_h + v_rh) % 2 == 0 and (v_w + v_rw) % 2 == 0, PadInput[v_n, (v_h + v_rh) // 2, (v_w + v_rw) // 2, v_rc], T.float32(0)) * weight[3 - v_rh, 3 - v_rw, v_rc, v_co] for ax0, ax1, ax2, ax3 in T.grid(1, 8, 4, 32): with T.block("conv2d_transpose_nhwc_global"): v0, v1 = T.axis.remap("SS", [ax0, ax1]) v2 = T.axis.spatial(8, w_0 * 4 + ax2) v3 = T.axis.spatial(256, co_0 * 32 + ax3) T.reads(conv2d_transpose_nhwc_global[v0, v1, v2, v3]) T.writes(conv2d_transpose_nhwc[v0, v1, v2, v3]) conv2d_transpose_nhwc[v0, v1, v2, v3] = conv2d_transpose_nhwc_global[v0, v1, v2, v3] @T.prim_func def t2d_2(inputs: T.Buffer((1, 4, 4, 512), "float32"), weight: T.Buffer((4, 4, 512, 256), "float32"), conv2d_transpose_nhwc: T.Buffer((1, 8, 8, 256), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 512, "meta_schedule.vectorize": 64}) for n_0, h_0, w_0, co_0, n_1, h_1, w_1, co_1, rh_0, rw_0, rc_0, n_2, h_2, w_2, co_2, rh_1, rw_1, rc_1, n_3, h_3, w_3, co_3 in T.grid(1, 1, 2, 8, 1, 4, 1, 4, 2, 2, 64, 1, 1, 1, 1, 2, 2, 8, 1, 2, 4, 8): with T.block("conv2d_transpose_nhwc"): v_n = T.axis.spatial(1, n_0 + n_1 + n_2 + n_3) v_h = T.axis.spatial(8, h_0 * 8 + h_1 * 2 + h_2 * 2 + h_3) v_w = T.axis.spatial(8, w_0 * 4 + w_1 * 4 + w_2 * 4 + w_3) v_co = T.axis.spatial(256, co_0 * 32 + co_1 * 8 + co_2 * 8 + co_3) v_rh = T.axis.reduce(4, rh_0 * 2 + rh_1) v_rw = T.axis.reduce(4, rw_0 * 2 + rw_1) v_rc = T.axis.reduce(512, rc_0 * 8 + rc_1) T.reads(inputs[v_n, (v_h + v_rh) // 2 - 1, (v_w + v_rw) // 2 - 1, v_rc], weight[3 - v_rh, 3 - v_rw, v_rc, v_co]) T.writes(conv2d_transpose_nhwc[v_n, v_h, v_w, v_co]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): conv2d_transpose_nhwc[v_n, v_h, v_w, v_co] = T.float32(0) conv2d_transpose_nhwc[v_n, v_h, v_w, v_co] = conv2d_transpose_nhwc[v_n, v_h, v_w, v_co] + T.if_then_else((v_h + v_rh) % 2 == 0 and (v_w + v_rw) % 2 == 0, T.if_then_else(1 <= (v_h + v_rh) // 2 and (v_h + v_rh) // 2 < 5 and 1 <= (v_w + v_rw) // 2 and (v_w + v_rw) // 2 < 5, inputs[v_n, (v_h + v_rh) // 2 - 1, (v_w + v_rw) // 2 - 1, v_rc], T.float32(0)), T.float32(0)) * weight[3 - v_rh, 3 - v_rw, v_rc, v_co] # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [1, 4, 1, 2]), ("SamplePerfectTile", [2, 1, 1, 4]), ("SamplePerfectTile", [8, 4, 1, 8]), ("SamplePerfectTile", [2, 2]), ("SamplePerfectTile", [2, 2]), ("SamplePerfectTile", [64, 8]), ("SampleCategorical", 2), ("SampleComputeLocation", -1), ] decision_1 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [1, 4, 1, 2]), ("SamplePerfectTile", [2, 1, 1, 4]), ("SamplePerfectTile", [8, 4, 1, 8]), ("SamplePerfectTile", [2, 2]), ("SamplePerfectTile", [2, 2]), ("SamplePerfectTile", [64, 8]), ("SampleCategorical", 2), ("SampleComputeLocation", 3), ] decision_2 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [1, 4, 1, 2]), ("SamplePerfectTile", [2, 1, 1, 4]), ("SamplePerfectTile", [8, 4, 1, 8]), ("SamplePerfectTile", [2, 2]), ("SamplePerfectTile", [2, 2]), ("SamplePerfectTile", [64, 8]), ("SampleCategorical", 3), ("SampleComputeLocation", -2), ] mod = create_te_workload("T2D", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[t2d_0, t2d_1, t2d_2], expected_decisions=[decision_0, decision_1, decision_2], debug_mask=0, ) def test_cpu_nrm(): # fmt: off @T.prim_func def nrm_0(A: T.Buffer((1, 256, 256), "float32"), D: T.Buffer(1, "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 0, "meta_schedule.vectorize": 64}) C = T.alloc_buffer((1,)) C_rf = T.alloc_buffer((1, 32768)) for b, i_j_fused_0, i_j_fused_1 in T.grid(1, 32768, 2): with T.block("C_rf"): vi_j_fused_0, v_b, vi_j_fused_1 = T.axis.remap("SSR", [i_j_fused_0, b, i_j_fused_1]) T.reads(A[v_b, (vi_j_fused_0 * 2 + vi_j_fused_1) // 256, (vi_j_fused_0 * 2 + vi_j_fused_1) % 256]) T.writes(C_rf[v_b, vi_j_fused_0]) with T.init(): C_rf[v_b, vi_j_fused_0] = T.float32(0) C_rf[v_b, vi_j_fused_0] = C_rf[v_b, vi_j_fused_0] + A[v_b, (vi_j_fused_0 * 2 + vi_j_fused_1) // 256, (vi_j_fused_0 * 2 + vi_j_fused_1) % 256] * A[v_b, (vi_j_fused_0 * 2 + vi_j_fused_1) // 256, (vi_j_fused_0 * 2 + vi_j_fused_1) % 256] for b, i_j_fused_0 in T.grid(1, 32768): with T.block("C"): vi_j_fused_0, v_b = T.axis.remap("RS", [i_j_fused_0, b]) T.reads(C_rf[v_b, vi_j_fused_0]) T.writes(C[v_b]) with T.init(): C[v_b] = T.float32(0) C[v_b] = C[v_b] + C_rf[v_b, vi_j_fused_0] for b in range(1): with T.block("D"): v_b = T.axis.spatial(1, b) T.reads(C[v_b]) T.writes(D[v_b]) D[v_b] = T.sqrt(C[v_b]) @T.prim_func def nrm_1(A: T.Buffer((1, 256, 256), "float32"), D: T.Buffer(1, "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 16, "meta_schedule.vectorize": 64}) C = T.alloc_buffer((1,)) C_rf = T.alloc_buffer((1, 2)) for b, i_j_fused_0, i_j_fused_1 in T.grid(1, 32768, 2): with T.block("C_rf"): vi_j_fused_1, v_b, vi_j_fused_0 = T.axis.remap("SSR", [i_j_fused_1, b, i_j_fused_0]) T.reads(A[v_b, (vi_j_fused_0 * 2 + vi_j_fused_1) // 256, (vi_j_fused_0 * 2 + vi_j_fused_1) % 256]) T.writes(C_rf[v_b, vi_j_fused_1]) with T.init(): C_rf[v_b, vi_j_fused_1] = T.float32(0) C_rf[v_b, vi_j_fused_1] = C_rf[v_b, vi_j_fused_1] + A[v_b, (vi_j_fused_0 * 2 + vi_j_fused_1) // 256, (vi_j_fused_0 * 2 + vi_j_fused_1) % 256] * A[v_b, (vi_j_fused_0 * 2 + vi_j_fused_1) // 256, (vi_j_fused_0 * 2 + vi_j_fused_1) % 256] for b, i_j_fused_1 in T.grid(1, 2): with T.block("C"): vi_j_fused_1, v_b = T.axis.remap("RS", [i_j_fused_1, b]) T.reads(C_rf[v_b, vi_j_fused_1]) T.writes(C[v_b]) with T.init(): C[v_b] = T.float32(0) C[v_b] = C[v_b] + C_rf[v_b, vi_j_fused_1] for b in range(1): with T.block("D"): v_b = T.axis.spatial(1, b) T.reads(C[v_b]) T.writes(D[v_b]) D[v_b] = T.sqrt(C[v_b]) @T.prim_func def nrm_2(A: T.Buffer((1, 256, 256), "float32"), D: T.Buffer(1, "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 0, "meta_schedule.vectorize": 64}) C = T.alloc_buffer((1,)) for b, i, j in T.grid(1, 256, 256): with T.block("C"): v_b, v_i, v_j = T.axis.remap("SRR", [b, i, j]) T.reads(A[v_b, v_i, v_j]) T.writes(C[v_b]) with T.init(): C[v_b] = T.float32(0) C[v_b] = C[v_b] + A[v_b, v_i, v_j] * A[v_b, v_i, v_j] for b in range(1): with T.block("D"): v_b = T.axis.spatial(1, b) T.reads(C[v_b]) T.writes(D[v_b]) D[v_b] = T.sqrt(C[v_b]) # fmt: on decision_0 = [ ("SamplePerfectTile", [32768, 2]), ("SampleCategorical", 0), ("SampleComputeLocation", -1), ("SampleComputeLocation", -1), ] decision_1 = [ ("SamplePerfectTile", [32768, 2]), ("SampleCategorical", 1), ("SampleComputeLocation", -1), ("SampleComputeLocation", -1), ] decision_2 = [ ("SampleCategorical", 0), ("SampleComputeLocation", -1), ] mod = create_te_workload("NRM", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[nrm_0, nrm_1, nrm_2], expected_decisions=[decision_0, decision_1, decision_2], ) def test_cpu_sfm(): # fmt: off @T.prim_func def sfm_0(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 0, "meta_schedule.vectorize": 64}) T_softmax_maxelem = T.alloc_buffer((256,)) T_softmax_expsum = T.alloc_buffer((256,)) T_softmax_expsum_rf = T.alloc_buffer((256, 16)) T_softmax_maxelem_rf = T.alloc_buffer((256, 4)) for i0, k_0, k_1 in T.grid(256, 4, 64): with T.block("T_softmax_maxelem_rf"): vk_0, v_i0, vk_1 = T.axis.remap("SSR", [k_0, i0, k_1]) T.reads(A[v_i0, vk_0 * 64 + vk_1]) T.writes(T_softmax_maxelem_rf[v_i0, vk_0]) with T.init(): T_softmax_maxelem_rf[v_i0, vk_0] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem_rf[v_i0, vk_0] = T.max(T_softmax_maxelem_rf[v_i0, vk_0], A[v_i0, vk_0 * 64 + vk_1]) for i0, k_0 in T.grid(256, 4): with T.block("T_softmax_maxelem"): vk_0, v_i0 = T.axis.remap("RS", [k_0, i0]) T.reads(T_softmax_maxelem_rf[v_i0, vk_0]) T.writes(T_softmax_maxelem[v_i0]) with T.init(): T_softmax_maxelem[v_i0] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem[v_i0] = T.max(T_softmax_maxelem[v_i0], T_softmax_maxelem_rf[v_i0, vk_0]) for i0, k_0, k_1 in T.grid(256, 16, 16): with T.block("T_softmax_expsum_rf"): vk_0, v_i0, vk_1 = T.axis.remap("SSR", [k_0, i0, k_1]) T.reads(A[v_i0, vk_0 * 16 + vk_1], T_softmax_maxelem[v_i0]) T.writes(T_softmax_expsum_rf[v_i0, vk_0]) with T.init(): T_softmax_expsum_rf[v_i0, vk_0] = T.float32(0) T_softmax_expsum_rf[v_i0, vk_0] = T_softmax_expsum_rf[v_i0, vk_0] + T.exp(A[v_i0, vk_0 * 16 + vk_1] - T_softmax_maxelem[v_i0]) for i0, i1 in T.grid(256, 256): for ax0, ax1 in T.grid(16, 1): with T.block("T_softmax_expsum"): vk_0 = T.axis.reduce(16, ax0) v_i0 = T.axis.spatial(256, i0 + ax1) T.reads(T_softmax_expsum_rf[v_i0, vk_0]) T.writes(T_softmax_expsum[v_i0]) with T.init(): T_softmax_expsum[v_i0] = T.float32(0) T_softmax_expsum[v_i0] = T_softmax_expsum[v_i0] + T_softmax_expsum_rf[v_i0, vk_0] with T.block("T_softmax_norm"): v_i0, v_i1 = T.axis.remap("SS", [i0, i1]) T.reads(A[v_i0, v_i1], T_softmax_maxelem[v_i0], T_softmax_expsum[v_i0]) T.writes(T_softmax_norm[v_i0, v_i1]) T.block_attr({"axis": 1}) T_softmax_norm[v_i0, v_i1] = T.exp(A[v_i0, v_i1] - T_softmax_maxelem[v_i0]) / T_softmax_expsum[v_i0] @T.prim_func def sfm_1(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 16, "meta_schedule.vectorize": 64}) T_softmax_maxelem = T.alloc_buffer((256,)) T_softmax_exp = T.alloc_buffer((256, 256)) T_softmax_expsum = T.alloc_buffer((256,)) T_softmax_expsum_rf = T.alloc_buffer((256, 16)) T_softmax_maxelem_rf = T.alloc_buffer((256, 64)) for i0 in range(256): for ax0, ax1, ax2 in T.grid(64, 1, 4): with T.block("T_softmax_maxelem_rf"): vk_1 = T.axis.spatial(64, ax0) v_i0 = T.axis.spatial(256, i0 + ax1) vk_0 = T.axis.reduce(4, ax2) T.reads(A[v_i0, vk_0 * 64 + vk_1]) T.writes(T_softmax_maxelem_rf[v_i0, vk_1]) with T.init(): T_softmax_maxelem_rf[v_i0, vk_1] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem_rf[v_i0, vk_1] = T.max(T_softmax_maxelem_rf[v_i0, vk_1], A[v_i0, vk_0 * 64 + vk_1]) for i1 in range(256): for ax0, ax1 in T.grid(64, 1): with T.block("T_softmax_maxelem"): vk_1 = T.axis.reduce(64, ax0) v_i0 = T.axis.spatial(256, i0 + ax1) T.reads(T_softmax_maxelem_rf[v_i0, vk_1]) T.writes(T_softmax_maxelem[v_i0]) with T.init(): T_softmax_maxelem[v_i0] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem[v_i0] = T.max(T_softmax_maxelem[v_i0], T_softmax_maxelem_rf[v_i0, vk_1]) with T.block("T_softmax_exp"): v_i0, v_i1 = T.axis.remap("SS", [i0, i1]) T.reads(A[v_i0, v_i1], T_softmax_maxelem[v_i0]) T.writes(T_softmax_exp[v_i0, v_i1]) T_softmax_exp[v_i0, v_i1] = T.exp(A[v_i0, v_i1] - T_softmax_maxelem[v_i0]) for i0, k_0, k_1 in T.grid(256, 16, 16): with T.block("T_softmax_expsum_rf"): vk_0, v_i0, vk_1 = T.axis.remap("SSR", [k_0, i0, k_1]) T.reads(T_softmax_exp[v_i0, vk_0 * 16 + vk_1]) T.writes(T_softmax_expsum_rf[v_i0, vk_0]) with T.init(): T_softmax_expsum_rf[v_i0, vk_0] = T.float32(0) T_softmax_expsum_rf[v_i0, vk_0] = T_softmax_expsum_rf[v_i0, vk_0] + T_softmax_exp[v_i0, vk_0 * 16 + vk_1] for i0, k_0 in T.grid(256, 16): with T.block("T_softmax_expsum"): vk_0, v_i0 = T.axis.remap("RS", [k_0, i0]) T.reads(T_softmax_expsum_rf[v_i0, vk_0]) T.writes(T_softmax_expsum[v_i0]) with T.init(): T_softmax_expsum[v_i0] = T.float32(0) T_softmax_expsum[v_i0] = T_softmax_expsum[v_i0] + T_softmax_expsum_rf[v_i0, vk_0] for i0, i1 in T.grid(256, 256): with T.block("T_softmax_norm"): v_i0, v_i1 = T.axis.remap("SS", [i0, i1]) T.reads(T_softmax_exp[v_i0, v_i1], T_softmax_expsum[v_i0]) T.writes(T_softmax_norm[v_i0, v_i1]) T.block_attr({"axis": 1}) T_softmax_norm[v_i0, v_i1] = T_softmax_exp[v_i0, v_i1] / T_softmax_expsum[v_i0] @T.prim_func def sfm_2(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 512, "meta_schedule.vectorize": 64}) T_softmax_maxelem = T.alloc_buffer((256,)) T_softmax_expsum = T.alloc_buffer((256,)) T_softmax_expsum_rf = T.alloc_buffer((256, 16)) for i0, k in T.grid(256, 256): with T.block("T_softmax_maxelem"): v_i0, v_k = T.axis.remap("SR", [i0, k]) T.reads(A[v_i0, v_k]) T.writes(T_softmax_maxelem[v_i0]) with T.init(): T_softmax_maxelem[v_i0] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem[v_i0] = T.max(T_softmax_maxelem[v_i0], A[v_i0, v_k]) for i0, k_0, k_1 in T.grid(256, 16, 16): with T.block("T_softmax_expsum_rf"): vk_0, v_i0, vk_1 = T.axis.remap("SSR", [k_0, i0, k_1]) T.reads(A[v_i0, vk_0 * 16 + vk_1], T_softmax_maxelem[v_i0]) T.writes(T_softmax_expsum_rf[v_i0, vk_0]) with T.init(): T_softmax_expsum_rf[v_i0, vk_0] = T.float32(0) T_softmax_expsum_rf[v_i0, vk_0] = T_softmax_expsum_rf[v_i0, vk_0] + T.exp(A[v_i0, vk_0 * 16 + vk_1] - T_softmax_maxelem[v_i0]) for i0, k_0 in T.grid(256, 16): with T.block("T_softmax_expsum"): vk_0, v_i0 = T.axis.remap("RS", [k_0, i0]) T.reads(T_softmax_expsum_rf[v_i0, vk_0]) T.writes(T_softmax_expsum[v_i0]) with T.init(): T_softmax_expsum[v_i0] = T.float32(0) T_softmax_expsum[v_i0] = T_softmax_expsum[v_i0] + T_softmax_expsum_rf[v_i0, vk_0] for i0, i1 in T.grid(256, 256): with T.block("T_softmax_norm"): v_i0, v_i1 = T.axis.remap("SS", [i0, i1]) T.reads(A[v_i0, v_i1], T_softmax_maxelem[v_i0], T_softmax_expsum[v_i0]) T.writes(T_softmax_norm[v_i0, v_i1]) T.block_attr({"axis": 1}) T_softmax_norm[v_i0, v_i1] = T.exp(A[v_i0, v_i1] - T_softmax_maxelem[v_i0]) / T_softmax_expsum[v_i0] @T.prim_func def sfm_3(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 512, "meta_schedule.vectorize": 64}) T_softmax_maxelem = T.alloc_buffer((256,)) T_softmax_exp = T.alloc_buffer((256, 256)) T_softmax_expsum = T.alloc_buffer((256,)) T_softmax_expsum_rf = T.alloc_buffer((256, 16)) T_softmax_maxelem_rf = T.alloc_buffer((256, 256)) for i0, i1 in T.grid(256, 256): for ax0, ax1, ax2 in T.grid(256, 1, 1): with T.block("T_softmax_maxelem_rf"): vk_0 = T.axis.spatial(256, ax0) v_i0 = T.axis.spatial(256, i0 + ax1) vk_1 = T.axis.reduce(1, ax2) T.reads(A[v_i0, vk_0 + vk_1]) T.writes(T_softmax_maxelem_rf[v_i0, vk_0]) with T.init(): T_softmax_maxelem_rf[v_i0, vk_0] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem_rf[v_i0, vk_0] = T.max(T_softmax_maxelem_rf[v_i0, vk_0], A[v_i0, vk_0 + vk_1]) for ax0, ax1 in T.grid(256, 1): with T.block("T_softmax_maxelem"): vk_0 = T.axis.reduce(256, ax0) v_i0 = T.axis.spatial(256, i0 + ax1) T.reads(T_softmax_maxelem_rf[v_i0, vk_0]) T.writes(T_softmax_maxelem[v_i0]) with T.init(): T_softmax_maxelem[v_i0] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem[v_i0] = T.max(T_softmax_maxelem[v_i0], T_softmax_maxelem_rf[v_i0, vk_0]) for ax0, ax1 in T.grid(1, 256): with T.block("T_softmax_exp"): v_i0 = T.axis.spatial(256, i0 + ax0) v_i1 = T.axis.spatial(256, ax1) T.reads(A[v_i0, v_i1], T_softmax_maxelem[v_i0]) T.writes(T_softmax_exp[v_i0, v_i1]) T_softmax_exp[v_i0, v_i1] = T.exp(A[v_i0, v_i1] - T_softmax_maxelem[v_i0]) for ax0 in range(16): for ax0_1, ax1, ax2 in T.grid(1, 1, 16): with T.block("T_softmax_expsum_rf"): vk_1 = T.axis.spatial(16, ax0 + ax0_1) v_i0 = T.axis.spatial(256, i0 + ax1) vk_0 = T.axis.reduce(16, ax2) T.reads(T_softmax_exp[v_i0, vk_0 * 16 + vk_1]) T.writes(T_softmax_expsum_rf[v_i0, vk_1]) with T.init(): T_softmax_expsum_rf[v_i0, vk_1] = T.float32(0) T_softmax_expsum_rf[v_i0, vk_1] = T_softmax_expsum_rf[v_i0, vk_1] + T_softmax_exp[v_i0, vk_0 * 16 + vk_1] for ax1 in range(1): with T.block("T_softmax_expsum"): vk_1 = T.axis.reduce(16, ax0) v_i0 = T.axis.spatial(256, i0 + ax1) T.reads(T_softmax_expsum_rf[v_i0, vk_1]) T.writes(T_softmax_expsum[v_i0]) with T.init(): T_softmax_expsum[v_i0] = T.float32(0) T_softmax_expsum[v_i0] = T_softmax_expsum[v_i0] + T_softmax_expsum_rf[v_i0, vk_1] with T.block("T_softmax_norm"): v_i0, v_i1 = T.axis.remap("SS", [i0, i1]) T.reads(T_softmax_exp[v_i0, v_i1], T_softmax_expsum[v_i0]) T.writes(T_softmax_norm[v_i0, v_i1]) T.block_attr({"axis": 1}) T_softmax_norm[v_i0, v_i1] = T_softmax_exp[v_i0, v_i1] / T_softmax_expsum[v_i0] @T.prim_func def sfm_4(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 0, "meta_schedule.vectorize": 64}) T_softmax_maxelem = T.alloc_buffer((256,)) T_softmax_exp = T.alloc_buffer((256, 256)) T_softmax_expsum = T.alloc_buffer((256,)) T_softmax_expsum_rf = T.alloc_buffer((256, 16)) T_softmax_maxelem_rf = T.alloc_buffer((256, 1)) for i0 in range(256): for ax0, ax1, ax2 in T.grid(1, 1, 256): with T.block("T_softmax_maxelem_rf"): vk_1 = T.axis.spatial(1, ax0) v_i0 = T.axis.spatial(256, i0 + ax1) vk_0 = T.axis.reduce(256, ax2) T.reads(A[v_i0, vk_0 + vk_1]) T.writes(T_softmax_maxelem_rf[v_i0, vk_1]) with T.init(): T_softmax_maxelem_rf[v_i0, vk_1] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem_rf[v_i0, vk_1] = T.max(T_softmax_maxelem_rf[v_i0, vk_1], A[v_i0, vk_0 + vk_1]) for k_1 in range(1): with T.block("T_softmax_maxelem"): vk_1, v_i0 = T.axis.remap("RS", [k_1, i0]) T.reads(T_softmax_maxelem_rf[v_i0, vk_1]) T.writes(T_softmax_maxelem[v_i0]) with T.init(): T_softmax_maxelem[v_i0] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem[v_i0] = T.max(T_softmax_maxelem[v_i0], T_softmax_maxelem_rf[v_i0, vk_1]) for i0, i1 in T.grid(256, 256): with T.block("T_softmax_exp"): v_i0, v_i1 = T.axis.remap("SS", [i0, i1]) T.reads(A[v_i0, v_i1], T_softmax_maxelem[v_i0]) T.writes(T_softmax_exp[v_i0, v_i1]) T_softmax_exp[v_i0, v_i1] = T.exp(A[v_i0, v_i1] - T_softmax_maxelem[v_i0]) for i0, k_0, k_1 in T.grid(256, 16, 16): with T.block("T_softmax_expsum_rf"): vk_1, v_i0, vk_0 = T.axis.remap("SSR", [k_1, i0, k_0]) T.reads(T_softmax_exp[v_i0, vk_0 * 16 + vk_1]) T.writes(T_softmax_expsum_rf[v_i0, vk_1]) with T.init(): T_softmax_expsum_rf[v_i0, vk_1] = T.float32(0) T_softmax_expsum_rf[v_i0, vk_1] = T_softmax_expsum_rf[v_i0, vk_1] + T_softmax_exp[v_i0, vk_0 * 16 + vk_1] for i0, k_1 in T.grid(256, 16): with T.block("T_softmax_expsum"): vk_1, v_i0 = T.axis.remap("RS", [k_1, i0]) T.reads(T_softmax_expsum_rf[v_i0, vk_1]) T.writes(T_softmax_expsum[v_i0]) with T.init(): T_softmax_expsum[v_i0] = T.float32(0) T_softmax_expsum[v_i0] = T_softmax_expsum[v_i0] + T_softmax_expsum_rf[v_i0, vk_1] for i0, i1 in T.grid(256, 256): with T.block("T_softmax_norm"): v_i0, v_i1 = T.axis.remap("SS", [i0, i1]) T.reads(T_softmax_exp[v_i0, v_i1], T_softmax_expsum[v_i0]) T.writes(T_softmax_norm[v_i0, v_i1]) T.block_attr({"axis": 1}) T_softmax_norm[v_i0, v_i1] = T_softmax_exp[v_i0, v_i1] / T_softmax_expsum[v_i0] @T.prim_func def sfm_5(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 512, "meta_schedule.vectorize": 64}) T_softmax_maxelem = T.alloc_buffer((256,)) T_softmax_exp = T.alloc_buffer((256, 256)) T_softmax_expsum = T.alloc_buffer((256,)) T_softmax_expsum_rf = T.alloc_buffer((256, 16)) for i0 in range(256): for ax0, ax1 in T.grid(1, 256): with T.block("T_softmax_maxelem"): v_i0 = T.axis.spatial(256, i0 + ax0) v_k = T.axis.reduce(256, ax1) T.reads(A[v_i0, v_k]) T.writes(T_softmax_maxelem[v_i0]) with T.init(): T_softmax_maxelem[v_i0] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem[v_i0] = T.max(T_softmax_maxelem[v_i0], A[v_i0, v_k]) for ax0, ax1 in T.grid(1, 256): with T.block("T_softmax_exp"): v_i0 = T.axis.spatial(256, i0 + ax0) v_i1 = T.axis.spatial(256, ax1) T.reads(A[v_i0, v_i1], T_softmax_maxelem[v_i0]) T.writes(T_softmax_exp[v_i0, v_i1]) T_softmax_exp[v_i0, v_i1] = T.exp(A[v_i0, v_i1] - T_softmax_maxelem[v_i0]) for ax0 in range(16): for ax0_1, ax1, ax2 in T.grid(1, 1, 16): with T.block("T_softmax_expsum_rf"): vk_1 = T.axis.spatial(16, ax0 + ax0_1) v_i0 = T.axis.spatial(256, i0 + ax1) vk_0 = T.axis.reduce(16, ax2) T.reads(T_softmax_exp[v_i0, vk_0 * 16 + vk_1]) T.writes(T_softmax_expsum_rf[v_i0, vk_1]) with T.init(): T_softmax_expsum_rf[v_i0, vk_1] = T.float32(0) T_softmax_expsum_rf[v_i0, vk_1] = T_softmax_expsum_rf[v_i0, vk_1] + T_softmax_exp[v_i0, vk_0 * 16 + vk_1] for ax1 in range(1): with T.block("T_softmax_expsum"): vk_1 = T.axis.reduce(16, ax0) v_i0 = T.axis.spatial(256, i0 + ax1) T.reads(T_softmax_expsum_rf[v_i0, vk_1]) T.writes(T_softmax_expsum[v_i0]) with T.init(): T_softmax_expsum[v_i0] = T.float32(0) T_softmax_expsum[v_i0] = T_softmax_expsum[v_i0] + T_softmax_expsum_rf[v_i0, vk_1] for i1 in range(256): with T.block("T_softmax_norm"): v_i0, v_i1 = T.axis.remap("SS", [i0, i1]) T.reads(T_softmax_exp[v_i0, v_i1], T_softmax_expsum[v_i0]) T.writes(T_softmax_norm[v_i0, v_i1]) T.block_attr({"axis": 1}) T_softmax_norm[v_i0, v_i1] = T_softmax_exp[v_i0, v_i1] / T_softmax_expsum[v_i0] @T.prim_func def sfm_6(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 64, "meta_schedule.vectorize": 64}) T_softmax_maxelem = T.alloc_buffer((256,)) T_softmax_expsum = T.alloc_buffer((256,)) T_softmax_maxelem_rf = T.alloc_buffer((256, 64)) for i0 in range(256): for ax0, ax1, ax2 in T.grid(64, 1, 4): with T.block("T_softmax_maxelem_rf"): vk_0 = T.axis.spatial(64, ax0) v_i0 = T.axis.spatial(256, i0 + ax1) vk_1 = T.axis.reduce(4, ax2) T.reads(A[v_i0, vk_0 * 4 + vk_1]) T.writes(T_softmax_maxelem_rf[v_i0, vk_0]) with T.init(): T_softmax_maxelem_rf[v_i0, vk_0] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem_rf[v_i0, vk_0] = T.max(T_softmax_maxelem_rf[v_i0, vk_0], A[v_i0, vk_0 * 4 + vk_1]) for k_0 in range(64): with T.block("T_softmax_maxelem"): vk_0, v_i0 = T.axis.remap("RS", [k_0, i0]) T.reads(T_softmax_maxelem_rf[v_i0, vk_0]) T.writes(T_softmax_maxelem[v_i0]) with T.init(): T_softmax_maxelem[v_i0] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem[v_i0] = T.max(T_softmax_maxelem[v_i0], T_softmax_maxelem_rf[v_i0, vk_0]) for i0, k in T.grid(256, 256): with T.block("T_softmax_expsum"): v_i0, v_k = T.axis.remap("SR", [i0, k]) T.reads(A[v_i0, v_k], T_softmax_maxelem[v_i0]) T.writes(T_softmax_expsum[v_i0]) with T.init(): T_softmax_expsum[v_i0] = T.float32(0) T_softmax_expsum[v_i0] = T_softmax_expsum[v_i0] + T.exp(A[v_i0, v_k] - T_softmax_maxelem[v_i0]) for i0, i1 in T.grid(256, 256): with T.block("T_softmax_norm"): v_i0, v_i1 = T.axis.remap("SS", [i0, i1]) T.reads(A[v_i0, v_i1], T_softmax_maxelem[v_i0], T_softmax_expsum[v_i0]) T.writes(T_softmax_norm[v_i0, v_i1]) T.block_attr({"axis": 1}) T_softmax_norm[v_i0, v_i1] = T.exp(A[v_i0, v_i1] - T_softmax_maxelem[v_i0]) / T_softmax_expsum[v_i0] @T.prim_func def sfm_7(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 64, "meta_schedule.vectorize": 64}) T_softmax_maxelem = T.alloc_buffer((256,)) T_softmax_expsum = T.alloc_buffer((256,)) T_softmax_maxelem_rf = T.alloc_buffer((256, 4)) for i0, k_0, k_1 in T.grid(256, 64, 4): with T.block("T_softmax_maxelem_rf"): vk_1, v_i0, vk_0 = T.axis.remap("SSR", [k_1, i0, k_0]) T.reads(A[v_i0, vk_0 * 4 + vk_1]) T.writes(T_softmax_maxelem_rf[v_i0, vk_1]) with T.init(): T_softmax_maxelem_rf[v_i0, vk_1] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem_rf[v_i0, vk_1] = T.max(T_softmax_maxelem_rf[v_i0, vk_1], A[v_i0, vk_0 * 4 + vk_1]) for i0, k_1 in T.grid(256, 4): with T.block("T_softmax_maxelem"): vk_1, v_i0 = T.axis.remap("RS", [k_1, i0]) T.reads(T_softmax_maxelem_rf[v_i0, vk_1]) T.writes(T_softmax_maxelem[v_i0]) with T.init(): T_softmax_maxelem[v_i0] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem[v_i0] = T.max(T_softmax_maxelem[v_i0], T_softmax_maxelem_rf[v_i0, vk_1]) for i0, i1 in T.grid(256, 256): for ax0, ax1 in T.grid(1, 256): with T.block("T_softmax_expsum"): v_i0 = T.axis.spatial(256, i0 + ax0) v_k = T.axis.reduce(256, ax1) T.reads(A[v_i0, v_k], T_softmax_maxelem[v_i0]) T.writes(T_softmax_expsum[v_i0]) with T.init(): T_softmax_expsum[v_i0] = T.float32(0) T_softmax_expsum[v_i0] = T_softmax_expsum[v_i0] + T.exp(A[v_i0, v_k] - T_softmax_maxelem[v_i0]) with T.block("T_softmax_norm"): v_i0, v_i1 = T.axis.remap("SS", [i0, i1]) T.reads(A[v_i0, v_i1], T_softmax_maxelem[v_i0], T_softmax_expsum[v_i0]) T.writes(T_softmax_norm[v_i0, v_i1]) T.block_attr({"axis": 1}) T_softmax_norm[v_i0, v_i1] = T.exp(A[v_i0, v_i1] - T_softmax_maxelem[v_i0]) / T_softmax_expsum[v_i0] @T.prim_func def sfm_8(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 512, "meta_schedule.vectorize": 64}) T_softmax_maxelem = T.alloc_buffer((256,)) T_softmax_exp = T.alloc_buffer((256, 256)) T_softmax_expsum = T.alloc_buffer((256,)) for i0 in range(256): for ax0, ax1 in T.grid(1, 256): with T.block("T_softmax_maxelem"): v_i0 = T.axis.spatial(256, i0 + ax0) v_k = T.axis.reduce(256, ax1) T.reads(A[v_i0, v_k]) T.writes(T_softmax_maxelem[v_i0]) with T.init(): T_softmax_maxelem[v_i0] = T.float32(-3.4028234663852886e+38) T_softmax_maxelem[v_i0] = T.max(T_softmax_maxelem[v_i0], A[v_i0, v_k]) for i1 in range(256): with T.block("T_softmax_exp"): v_i0, v_i1 = T.axis.remap("SS", [i0, i1]) T.reads(A[v_i0, v_i1], T_softmax_maxelem[v_i0]) T.writes(T_softmax_exp[v_i0, v_i1]) T_softmax_exp[v_i0, v_i1] = T.exp(A[v_i0, v_i1] - T_softmax_maxelem[v_i0]) for i0, k in T.grid(256, 256): with T.block("T_softmax_expsum"): v_i0, v_k = T.axis.remap("SR", [i0, k]) T.reads(T_softmax_exp[v_i0, v_k]) T.writes(T_softmax_expsum[v_i0]) with T.init(): T_softmax_expsum[v_i0] = T.float32(0) T_softmax_expsum[v_i0] = T_softmax_expsum[v_i0] + T_softmax_exp[v_i0, v_k] for i0, i1 in T.grid(256, 256): with T.block("T_softmax_norm"): v_i0, v_i1 = T.axis.remap("SS", [i0, i1]) T.reads(T_softmax_exp[v_i0, v_i1], T_softmax_expsum[v_i0]) T.writes(T_softmax_norm[v_i0, v_i1]) T.block_attr({"axis": 1}) T_softmax_norm[v_i0, v_i1] = T_softmax_exp[v_i0, v_i1] / T_softmax_expsum[v_i0] # fmt: on decision_0 = [ ("SamplePerfectTile", [16, 16]), ("SamplePerfectTile", [4, 64]), ("SampleCategorical", 0), ("SampleComputeLocation", 1), ("SampleComputeLocation", -1), ("SampleComputeLocation", -2), ("SampleComputeLocation", -1), ("SampleComputeLocation", -1), ] decision_1 = [ ("SamplePerfectTile", [16, 16]), ("SamplePerfectTile", [4, 64]), ("SampleCategorical", 1), ("SampleComputeLocation", -1), ("SampleComputeLocation", -1), ("SampleComputeLocation", -1), ("SampleComputeLocation", 1), ("SampleComputeLocation", 0), ] decision_2 = [ ("SamplePerfectTile", [16, 16]), ("SampleCategorical", 3), ("SampleComputeLocation", -1), ("SampleComputeLocation", -1), ("SampleComputeLocation", -2), ("SampleComputeLocation", -1), ] decision_3 = [ ("SamplePerfectTile", [16, 16]), ("SamplePerfectTile", [256, 1]), ("SampleCategorical", 3), ("SampleComputeLocation", 1), ("SampleComputeLocation", 2), ("SampleComputeLocation", 1), ("SampleComputeLocation", 1), ("SampleComputeLocation", 1), ] decision_4 = [ ("SamplePerfectTile", [16, 16]), ("SamplePerfectTile", [256, 1]), ("SampleCategorical", 0), ("SampleComputeLocation", -1), ("SampleComputeLocation", -1), ("SampleComputeLocation", -1), ("SampleComputeLocation", -1), ("SampleComputeLocation", 0), ] decision_5 = [ ("SamplePerfectTile", [16, 16]), ("SampleCategorical", 3), ("SampleComputeLocation", 0), ("SampleComputeLocation", 1), ("SampleComputeLocation", 0), ("SampleComputeLocation", 0), ] decision_6 = [ ("SamplePerfectTile", [64, 4]), ("SampleCategorical", 2), ("SampleComputeLocation", -1), ("SampleComputeLocation", -2), ("SampleComputeLocation", -1), ("SampleComputeLocation", 0), ] decision_7 = [ ("SamplePerfectTile", [64, 4]), ("SampleCategorical", 2), ("SampleComputeLocation", 1), ("SampleComputeLocation", -2), ("SampleComputeLocation", -1), ("SampleComputeLocation", -1), ] decision_8 = [ ("SampleCategorical", 3), ("SampleComputeLocation", -1), ("SampleComputeLocation", -1), ("SampleComputeLocation", 0), ] mod = create_te_workload("SFM", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[sfm_0, sfm_1, sfm_2, sfm_3, sfm_4, sfm_5, sfm_6, sfm_7, sfm_8], expected_decisions=[ decision_0, decision_1, decision_2, decision_3, decision_4, decision_5, decision_6, decision_7, decision_8, ], ) def test_cpu_cbr(): # fmt: off @T.prim_func def cbr_0(data: T.Buffer((1, 224, 224, 3), "float32"), kernel: T.Buffer((7, 7, 3, 64), "float32"), bias: T.Buffer(64, "float32"), bn_offset: T.Buffer(64, "float32"), bn_scale: T.Buffer(64, "float32"), compute: T.Buffer((1, 112, 112, 64), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 64, "meta_schedule.vectorize": 64}) Conv2dOutput = T.alloc_buffer((1, 112, 112, 64)) for nn_0, yy_0, xx_0, ff_0, nn_1, yy_1, xx_1, ff_1, ry_0, rx_0, rc_0, nn_2, yy_2, xx_2, ff_2, ry_1, rx_1, rc_1, nn_3, yy_3, xx_3, ff_3 in T.grid(1, 2, 7, 1, 1, 2, 2, 32, 7, 7, 1, 1, 1, 4, 1, 1, 1, 3, 1, 28, 2, 2): with T.block("Conv2dOutput"): v_nn = T.axis.spatial(1, nn_0 + nn_1 + nn_2 + nn_3) v_yy = T.axis.spatial(112, yy_0 * 56 + yy_1 * 28 + yy_2 * 28 + yy_3) v_xx = T.axis.spatial(112, xx_0 * 16 + xx_1 * 8 + xx_2 * 2 + xx_3) v_ff = T.axis.spatial(64, ff_0 * 64 + ff_1 * 2 + ff_2 * 2 + ff_3) v_ry = T.axis.reduce(7, ry_0 + ry_1) v_rx = T.axis.reduce(7, rx_0 + rx_1) v_rc = T.axis.reduce(3, rc_0 * 3 + rc_1) T.reads(data[v_nn, v_yy * 2 + v_ry - 3, v_xx * 2 + v_rx - 3, v_rc], kernel[v_ry, v_rx, v_rc, v_ff]) T.writes(Conv2dOutput[v_nn, v_yy, v_xx, v_ff]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): Conv2dOutput[v_nn, v_yy, v_xx, v_ff] = T.float32(0) Conv2dOutput[v_nn, v_yy, v_xx, v_ff] = Conv2dOutput[v_nn, v_yy, v_xx, v_ff] + T.if_then_else(3 <= v_yy * 2 + v_ry and v_yy * 2 + v_ry < 227 and 3 <= v_xx * 2 + v_rx and v_xx * 2 + v_rx < 227, data[v_nn, v_yy * 2 + v_ry - 3, v_xx * 2 + v_rx - 3, v_rc], T.float32(0)) * kernel[v_ry, v_rx, v_rc, v_ff] for i0, i1, i2, i3 in T.grid(1, 112, 112, 64): with T.block("compute"): v_i0, v_i1, v_i2, v_i3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(Conv2dOutput[v_i0, v_i1, v_i2, v_i3], bias[v_i3], bn_scale[v_i3], bn_offset[v_i3]) T.writes(compute[v_i0, v_i1, v_i2, v_i3]) compute[v_i0, v_i1, v_i2, v_i3] = T.max((Conv2dOutput[v_i0, v_i1, v_i2, v_i3] + bias[v_i3]) * bn_scale[v_i3] + bn_offset[v_i3], T.float32(0)) @T.prim_func def cbr_1(data: T.Buffer((1, 224, 224, 3), "float32"), kernel: T.Buffer((7, 7, 3, 64), "float32"), bias: T.Buffer(64, "float32"), bn_offset: T.Buffer(64, "float32"), bn_scale: T.Buffer(64, "float32"), compute: T.Buffer((1, 112, 112, 64), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 512, "meta_schedule.vectorize": 64}) PaddedInput = T.alloc_buffer((1, 230, 230, 3)) Conv2dOutput = T.alloc_buffer((1, 112, 112, 64)) for nn_0, yy_0 in T.grid(1, 2): for ax0, ax1, ax2, ax3 in T.grid(1, 117, 229, 3): with T.block("PaddedInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(230, yy_0 * 112 + ax1) v_i2 = T.axis.spatial(230, ax2) v_i3 = T.axis.spatial(3, ax3) T.reads(data[v_i0, v_i1 - 3, v_i2 - 3, v_i3]) T.writes(PaddedInput[v_i0, v_i1, v_i2, v_i3]) PaddedInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(3 <= v_i1 and v_i1 < 227 and 3 <= v_i2 and v_i2 < 227, data[v_i0, v_i1 - 3, v_i2 - 3, v_i3], T.float32(0)) for xx_0, ff_0, nn_1, yy_1, xx_1, ff_1 in T.grid(7, 1, 1, 2, 2, 32): for ry_0, rx_0, rc_0, nn_2, yy_2, xx_2, ff_2, ry_1, rx_1, rc_1, nn_3, yy_3, xx_3, ff_3 in T.grid(7, 7, 1, 1, 1, 4, 1, 1, 1, 3, 1, 28, 2, 2): with T.block("Conv2dOutput"): v_nn = T.axis.spatial(1, nn_0 + nn_1 + nn_2 + nn_3) v_yy = T.axis.spatial(112, yy_0 * 56 + yy_1 * 28 + yy_2 * 28 + yy_3) v_xx = T.axis.spatial(112, xx_0 * 16 + xx_1 * 8 + xx_2 * 2 + xx_3) v_ff = T.axis.spatial(64, ff_0 * 64 + ff_1 * 2 + ff_2 * 2 + ff_3) v_ry = T.axis.reduce(7, ry_0 + ry_1) v_rx = T.axis.reduce(7, rx_0 + rx_1) v_rc = T.axis.reduce(3, rc_0 * 3 + rc_1) T.reads(PaddedInput[v_nn, v_yy * 2 + v_ry, v_xx * 2 + v_rx, v_rc], kernel[v_ry, v_rx, v_rc, v_ff]) T.writes(Conv2dOutput[v_nn, v_yy, v_xx, v_ff]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): Conv2dOutput[v_nn, v_yy, v_xx, v_ff] = T.float32(0) Conv2dOutput[v_nn, v_yy, v_xx, v_ff] = Conv2dOutput[v_nn, v_yy, v_xx, v_ff] + PaddedInput[v_nn, v_yy * 2 + v_ry, v_xx * 2 + v_rx, v_rc] * kernel[v_ry, v_rx, v_rc, v_ff] for ax0, ax1, ax2, ax3 in T.grid(1, 28, 8, 2): with T.block("compute"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(112, yy_0 * 56 + yy_1 * 28 + ax1) v_i2 = T.axis.spatial(112, xx_0 * 16 + xx_1 * 8 + ax2) v_i3 = T.axis.spatial(64, ff_1 * 2 + ax3) T.reads(Conv2dOutput[v_i0, v_i1, v_i2, v_i3], bias[v_i3], bn_scale[v_i3], bn_offset[v_i3]) T.writes(compute[v_i0, v_i1, v_i2, v_i3]) compute[v_i0, v_i1, v_i2, v_i3] = T.max((Conv2dOutput[v_i0, v_i1, v_i2, v_i3] + bias[v_i3]) * bn_scale[v_i3] + bn_offset[v_i3], T.float32(0)) @T.prim_func def cbr_2(data: T.Buffer((1, 224, 224, 3), "float32"), kernel: T.Buffer((7, 7, 3, 64), "float32"), bias: T.Buffer(64, "float32"), bn_offset: T.Buffer(64, "float32"), bn_scale: T.Buffer(64, "float32"), compute: T.Buffer((1, 112, 112, 64), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 64, "meta_schedule.vectorize": 64}) PaddedInput = T.alloc_buffer((1, 230, 230, 3)) Conv2dOutput = T.alloc_buffer((1, 112, 112, 64)) for nn_0, yy_0 in T.grid(1, 2): for ax0, ax1, ax2, ax3 in T.grid(1, 117, 229, 3): with T.block("PaddedInput"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(230, yy_0 * 112 + ax1) v_i2 = T.axis.spatial(230, ax2) v_i3 = T.axis.spatial(3, ax3) T.reads(data[v_i0, v_i1 - 3, v_i2 - 3, v_i3]) T.writes(PaddedInput[v_i0, v_i1, v_i2, v_i3]) PaddedInput[v_i0, v_i1, v_i2, v_i3] = T.if_then_else(3 <= v_i1 and v_i1 < 227 and 3 <= v_i2 and v_i2 < 227, data[v_i0, v_i1 - 3, v_i2 - 3, v_i3], T.float32(0)) for xx_0, ff_0 in T.grid(7, 1): for nn_1, yy_1, xx_1, ff_1, ry_0, rx_0, rc_0, nn_2, yy_2, xx_2, ff_2, ry_1, rx_1, rc_1, nn_3, yy_3, xx_3, ff_3 in T.grid(1, 2, 2, 32, 7, 7, 1, 1, 1, 4, 1, 1, 1, 3, 1, 28, 2, 2): with T.block("Conv2dOutput"): v_nn = T.axis.spatial(1, nn_0 + nn_1 + nn_2 + nn_3) v_yy = T.axis.spatial(112, yy_0 * 56 + yy_1 * 28 + yy_2 * 28 + yy_3) v_xx = T.axis.spatial(112, xx_0 * 16 + xx_1 * 8 + xx_2 * 2 + xx_3) v_ff = T.axis.spatial(64, ff_0 * 64 + ff_1 * 2 + ff_2 * 2 + ff_3) v_ry = T.axis.reduce(7, ry_0 + ry_1) v_rx = T.axis.reduce(7, rx_0 + rx_1) v_rc = T.axis.reduce(3, rc_0 * 3 + rc_1) T.reads(PaddedInput[v_nn, v_yy * 2 + v_ry, v_xx * 2 + v_rx, v_rc], kernel[v_ry, v_rx, v_rc, v_ff]) T.writes(Conv2dOutput[v_nn, v_yy, v_xx, v_ff]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): Conv2dOutput[v_nn, v_yy, v_xx, v_ff] = T.float32(0) Conv2dOutput[v_nn, v_yy, v_xx, v_ff] = Conv2dOutput[v_nn, v_yy, v_xx, v_ff] + PaddedInput[v_nn, v_yy * 2 + v_ry, v_xx * 2 + v_rx, v_rc] * kernel[v_ry, v_rx, v_rc, v_ff] for ax0, ax1, ax2, ax3 in T.grid(1, 56, 16, 64): with T.block("compute"): v_i0 = T.axis.spatial(1, ax0) v_i1 = T.axis.spatial(112, yy_0 * 56 + ax1) v_i2 = T.axis.spatial(112, xx_0 * 16 + ax2) v_i3 = T.axis.spatial(64, ax3) T.reads(Conv2dOutput[v_i0, v_i1, v_i2, v_i3], bias[v_i3], bn_scale[v_i3], bn_offset[v_i3]) T.writes(compute[v_i0, v_i1, v_i2, v_i3]) compute[v_i0, v_i1, v_i2, v_i3] = T.max((Conv2dOutput[v_i0, v_i1, v_i2, v_i3] + bias[v_i3]) * bn_scale[v_i3] + bn_offset[v_i3], T.float32(0)) # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [2, 2, 1, 28]), ("SamplePerfectTile", [7, 2, 4, 2]), ("SamplePerfectTile", [1, 32, 1, 2]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [1, 3]), ("SampleCategorical", 2), ("SampleComputeLocation", -2), ] decision_1 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [2, 2, 1, 28]), ("SamplePerfectTile", [7, 2, 4, 2]), ("SamplePerfectTile", [1, 32, 1, 2]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [1, 3]), ("SampleCategorical", 3), ("SampleComputeLocation", 1), ] decision_2 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [2, 2, 1, 28]), ("SamplePerfectTile", [7, 2, 4, 2]), ("SamplePerfectTile", [1, 32, 1, 2]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [7, 1]), ("SamplePerfectTile", [1, 3]), ("SampleCategorical", 2), ("SampleComputeLocation", 1), ] mod = create_te_workload("CBR", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[cbr_0, cbr_1, cbr_2], expected_decisions=[decision_0, decision_1, decision_2], ) def test_cpu_tbg(): # fmt: off @T.prim_func def tbg_0(query: T.Buffer((1, 128, 12, 64), "float32"), value: T.Buffer((1, 128, 12, 64), "float32"), C: T.Buffer((1, 12, 128, 128), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 64, "meta_schedule.vectorize": 64}) query_T = T.alloc_buffer((1, 12, 128, 64)) value_T = T.alloc_buffer((1, 12, 64, 128)) C_global = T.alloc_buffer((1, 12, 128, 128)) for b_0, h_0, i_0, j_0, b_1, h_1, i_1 in T.grid(1, 1, 1, 2, 1, 6, 2): for ax0, ax1, ax2, ax3 in T.grid(1, 2, 64, 64): with T.block("value_T"): v_b = T.axis.spatial(1, ax0) v_h = T.axis.spatial(12, h_1 * 2 + ax1) v_d = T.axis.spatial(64, ax2) v_l = T.axis.spatial(128, j_0 * 64 + ax3) T.reads(value[v_b, v_l, v_h, v_d]) T.writes(value_T[v_b, v_h, v_d, v_l]) value_T[v_b, v_h, v_d, v_l] = value[v_b, v_l, v_h, v_d] for ax0, ax1, ax2, ax3 in T.grid(1, 2, 64, 64): with T.block("query_T"): v_b = T.axis.spatial(1, ax0) v_h = T.axis.spatial(12, h_1 * 2 + ax1) v_l = T.axis.spatial(128, i_1 * 64 + ax2) v_d = T.axis.spatial(64, ax3) T.reads(query[v_b, v_l, v_h, v_d]) T.writes(query_T[v_b, v_h, v_l, v_d]) query_T[v_b, v_h, v_l, v_d] = query[v_b, v_l, v_h, v_d] for j_1 in range(8): for k_0, b_2, h_2, i_2, j_2, k_1, b_3, h_3, i_3, j_3 in T.grid(1, 1, 2, 2, 4, 64, 1, 1, 32, 2): with T.block("C"): v_b = T.axis.spatial(1, b_0 + b_1 + b_2 + b_3) v_h = T.axis.spatial(12, h_0 * 12 + h_1 * 2 + h_2 + h_3) v_i = T.axis.spatial(128, i_0 * 128 + i_1 * 64 + i_2 * 32 + i_3) v_j = T.axis.spatial(128, j_0 * 64 + j_1 * 8 + j_2 * 2 + j_3) v_k = T.axis.reduce(64, k_0 * 64 + k_1) T.reads(query_T[v_b, v_h, v_i, v_k], value_T[v_b, v_h, v_k, v_j]) T.writes(C_global[v_b, v_h, v_i, v_j]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): C_global[v_b, v_h, v_i, v_j] = T.float32(0) C_global[v_b, v_h, v_i, v_j] = C_global[v_b, v_h, v_i, v_j] + query_T[v_b, v_h, v_i, v_k] * value_T[v_b, v_h, v_k, v_j] for ax0, ax1, ax2, ax3 in T.grid(1, 2, 64, 8): with T.block("C_global"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(12, h_1 * 2 + ax1) v2 = T.axis.spatial(128, i_1 * 64 + ax2) v3 = T.axis.spatial(128, j_0 * 64 + j_1 * 8 + ax3) T.reads(C_global[v0, v1, v2, v3]) T.writes(C[v0, v1, v2, v3]) C[v0, v1, v2, v3] = C_global[v0, v1, v2, v3] @T.prim_func def tbg_1(query: T.Buffer((1, 128, 12, 64), "float32"), value: T.Buffer((1, 128, 12, 64), "float32"), C: T.Buffer((1, 12, 128, 128), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 64, "meta_schedule.vectorize": 64}) query_T = T.alloc_buffer((1, 12, 128, 64)) value_T = T.alloc_buffer((1, 12, 64, 128)) C_global = T.alloc_buffer((1, 12, 128, 128)) for b, h, l, d in T.grid(1, 12, 128, 64): with T.block("query_T"): v_b, v_h, v_l, v_d = T.axis.remap("SSSS", [b, h, l, d]) T.reads(query[v_b, v_l, v_h, v_d]) T.writes(query_T[v_b, v_h, v_l, v_d]) query_T[v_b, v_h, v_l, v_d] = query[v_b, v_l, v_h, v_d] for b_0, h_0, i_0, j_0 in T.grid(1, 1, 1, 2): for b_1, h_1, i_1, j_1, k_0, b_2, h_2, i_2, j_2, k_1 in T.grid(1, 6, 2, 8, 1, 1, 2, 2, 4, 64): for ax0, ax1, ax2, ax3 in T.grid(1, 1, 1, 2): with T.block("value_T"): v_b = T.axis.spatial(1, ax0) v_h = T.axis.spatial(12, h_1 * 2 + h_2 + ax1) v_d = T.axis.spatial(64, k_1 + ax2) v_l = T.axis.spatial(128, j_0 * 64 + j_1 * 8 + j_2 * 2 + ax3) T.reads(value[v_b, v_l, v_h, v_d]) T.writes(value_T[v_b, v_h, v_d, v_l]) value_T[v_b, v_h, v_d, v_l] = value[v_b, v_l, v_h, v_d] for b_3, h_3, i_3, j_3 in T.grid(1, 1, 32, 2): with T.block("C"): v_b = T.axis.spatial(1, b_0 + b_1 + b_2 + b_3) v_h = T.axis.spatial(12, h_0 * 12 + h_1 * 2 + h_2 + h_3) v_i = T.axis.spatial(128, i_0 * 128 + i_1 * 64 + i_2 * 32 + i_3) v_j = T.axis.spatial(128, j_0 * 64 + j_1 * 8 + j_2 * 2 + j_3) v_k = T.axis.reduce(64, k_0 * 64 + k_1) T.reads(query_T[v_b, v_h, v_i, v_k], value_T[v_b, v_h, v_k, v_j]) T.writes(C_global[v_b, v_h, v_i, v_j]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): C_global[v_b, v_h, v_i, v_j] = T.float32(0) C_global[v_b, v_h, v_i, v_j] = C_global[v_b, v_h, v_i, v_j] + query_T[v_b, v_h, v_i, v_k] * value_T[v_b, v_h, v_k, v_j] for ax0, ax1, ax2, ax3 in T.grid(1, 12, 128, 64): with T.block("C_global"): v0, v1, v2 = T.axis.remap("SSS", [ax0, ax1, ax2]) v3 = T.axis.spatial(128, j_0 * 64 + ax3) T.reads(C_global[v0, v1, v2, v3]) T.writes(C[v0, v1, v2, v3]) C[v0, v1, v2, v3] = C_global[v0, v1, v2, v3] @T.prim_func def tbg_2(query: T.Buffer((1, 128, 12, 64), "float32"), value: T.Buffer((1, 128, 12, 64), "float32"), C: T.Buffer((1, 12, 128, 128), "float32")) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": T.bool(True)}) with T.block("root"): T.reads() T.writes() T.block_attr({"meta_schedule.parallel": 288, "meta_schedule.unroll_explicit": 512, "meta_schedule.vectorize": 64}) value_T = T.alloc_buffer((1, 12, 64, 128)) for b_0, h_0, i_0, j_0, b_1, h_1, i_1, j_1 in T.grid(1, 1, 1, 2, 1, 6, 2, 8): for ax0, ax1, ax2, ax3 in T.grid(1, 2, 64, 8): with T.block("value_T"): v_b = T.axis.spatial(1, ax0) v_h = T.axis.spatial(12, h_1 * 2 + ax1) v_d = T.axis.spatial(64, ax2) v_l = T.axis.spatial(128, j_0 * 64 + j_1 * 8 + ax3) T.reads(value[v_b, v_l, v_h, v_d]) T.writes(value_T[v_b, v_h, v_d, v_l]) value_T[v_b, v_h, v_d, v_l] = value[v_b, v_l, v_h, v_d] for k_0, b_2, h_2, i_2, j_2, k_1, b_3, h_3, i_3, j_3 in T.grid(1, 1, 2, 2, 4, 64, 1, 1, 32, 2): with T.block("C"): v_b = T.axis.spatial(1, b_0 + b_1 + b_2 + b_3) v_h = T.axis.spatial(12, h_0 * 12 + h_1 * 2 + h_2 + h_3) v_i = T.axis.spatial(128, i_0 * 128 + i_1 * 64 + i_2 * 32 + i_3) v_j = T.axis.spatial(128, j_0 * 64 + j_1 * 8 + j_2 * 2 + j_3) v_k = T.axis.reduce(64, k_0 * 64 + k_1) T.reads(query[v_b, v_i, v_h, v_k], value_T[v_b, v_h, v_k, v_j]) T.writes(C[v_b, v_h, v_i, v_j]) T.block_attr({"meta_schedule.tiling_structure": "SSRSRS"}) with T.init(): C[v_b, v_h, v_i, v_j] = T.float32(0) C[v_b, v_h, v_i, v_j] = C[v_b, v_h, v_i, v_j] + query[v_b, v_i, v_h, v_k] * value_T[v_b, v_h, v_k, v_j] # fmt: on decision_0 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [1, 6, 2, 1]), ("SamplePerfectTile", [1, 2, 2, 32]), ("SamplePerfectTile", [2, 8, 4, 2]), ("SamplePerfectTile", [1, 64]), ("SampleCategorical", 2), ("SampleComputeLocation", 6), ("SampleComputeLocation", 6), ] decision_1 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [1, 6, 2, 1]), ("SamplePerfectTile", [1, 2, 2, 32]), ("SamplePerfectTile", [2, 8, 4, 2]), ("SamplePerfectTile", [1, 64]), ("SampleCategorical", 2), ("SampleComputeLocation", 13), ("SampleComputeLocation", -1), ] decision_2 = [ ("SamplePerfectTile", [1, 1, 1, 1]), ("SamplePerfectTile", [1, 6, 2, 1]), ("SamplePerfectTile", [1, 2, 2, 32]), ("SamplePerfectTile", [2, 8, 4, 2]), ("SamplePerfectTile", [1, 64]), ("SampleCategorical", 3), ("SampleComputeLocation", 7), ("SampleComputeLocation", -2), ] mod = create_te_workload("TBG", 0) actual = _design_space(mod) check_sketches( mod, sketches=actual, expected_mods=[tbg_0, tbg_1, tbg_2], expected_decisions=[decision_0, decision_1, decision_2], ) if __name__ == "__main__": test_cpu_c1d() test_cpu_c2d() test_cpu_c3d() test_cpu_cap() test_cpu_dep() test_cpu_dil() test_cpu_gmm() test_cpu_grp() test_cpu_t2d() test_cpu_nrm() test_cpu_sfm() test_cpu_cbr() test_cpu_tbg()
152,681
60.639887
426
py
tvm
tvm-main/tests/python/unittest/test_target_codegen_rocm.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm import te import numpy as np import unittest tx = te.thread_axis("threadIdx.x") ty = te.thread_axis("threadIdx.y") bx = te.thread_axis("blockIdx.x") by = te.thread_axis("blockIdx.y") @tvm.testing.requires_rocm def test_rocm_cross_thread_reduction(): # based on the reduction tutorial n = te.size_var("n") m = te.size_var("m") A = te.placeholder((n, m), name="A") k = te.reduce_axis((0, m), "k") B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B") s = te.create_schedule(B.op) ko, ki = s[B].split(B.op.reduce_axis[0], factor=16) BF = s.rfactor(B, ki) xo, xi = s[B].split(s[B].op.axis[0], factor=32) s[B].bind(xo, bx) s[B].bind(xi, ty) s[B].bind(s[B].op.reduce_axis[0], tx) s[BF].compute_at(s[B], s[B].op.reduce_axis[0]) s[B].set_store_predicate(tx.var.equal(0)) frocm = tvm.build(s, [A, B], "rocm") nn = 128 dev = tvm.rocm(0) a = tvm.nd.array(np.random.uniform(size=(nn, nn)).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(nn, dtype=B.dtype), dev) frocm(a, b) tvm.testing.assert_allclose(b.numpy(), np.sum(a.numpy(), axis=1), rtol=1e-4) @tvm.testing.requires_rocm def test_rocm_inf_nan(): def check_inf_nan(dev, n, value, dtype): A = te.placeholder((n,), name="A", dtype=dtype) inf_value = tvm.tir.const(value, dtype=dtype) C = te.compute((n,), lambda i: inf_value, name="C") s = te.create_schedule(C.op) s[C].bind(s[C].op.axis[0], tx) fun = tvm.build(s, [A, C], "rocm") a = tvm.nd.empty((n,), A.dtype, dev) c = tvm.nd.empty((n,), A.dtype, dev) # Only need to test compiling here fun(a, c) dev = tvm.rocm(0) check_inf_nan(dev, 1, -float("inf"), "float32") check_inf_nan(dev, 1, -float("inf"), "float64") check_inf_nan(dev, 1, float("inf"), "float32") check_inf_nan(dev, 1, float("inf"), "float64") check_inf_nan(dev, 1, float("nan"), "float32") check_inf_nan(dev, 1, float("nan"), "float64") @tvm.testing.requires_rocm def test_rocm_reduction_binding(): k = te.reduce_axis((0, 32), "k") A = te.placeholder((96, 32), name="A") B = te.compute((96,), lambda m: te.sum(A[m, k], axis=k), name="B") s = te.create_schedule(B.op) s[B].reorder(B.op.reduce_axis[0], B.op.axis[0]) mo, _ = s[B].split(B.op.axis[0], 32) s[B].bind(mo, bx) @tvm.testing.requires_rocm def test_rocm_copy(): def check_rocm(dtype, n): A = te.placeholder((n,), name="A", dtype=dtype) dev = tvm.rocm(0) a_np = np.random.uniform(size=(n,)).astype(A.dtype) a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(a_np) b_np = a.numpy() tvm.testing.assert_allclose(a_np, b_np) tvm.testing.assert_allclose(a_np, a.numpy()) for _ in range(100): dtype = np.random.choice(["float32", "float16", "int8", "int32"]) logN = np.random.randint(1, 15) peturb = np.random.uniform(low=0.5, high=1.5) check_rocm(dtype, int(peturb * (2**logN))) @tvm.testing.requires_rocm def test_rocm_vectorize_add(): num_thread = 8 def check_rocm(dtype, n, lanes): A = te.placeholder((n,), name="A", dtype="%sx%d" % (dtype, lanes)) B = te.compute((n,), lambda i: A[i] + tvm.tir.const(1, A.dtype), name="B") s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], factor=num_thread) s[B].bind(xo, bx) s[B].bind(xi, tx) fun = tvm.build(s, [A, B], "rocm") dev = tvm.rocm(0) a = tvm.nd.empty((n,), A.dtype, dev).copyfrom(np.random.uniform(size=(n, lanes))) c = tvm.nd.empty((n,), B.dtype, dev) fun(a, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + 1) check_rocm("float32", 64, 2) check_rocm("float16", 64, 2) if __name__ == "__main__": test_rocm_cross_thread_reduction() test_rocm_inf_nan() test_rocm_reduction_binding() test_rocm_copy() test_rocm_vectorize_add()
4,810
33.611511
89
py
tvm
tvm-main/tests/python/unittest/test_tvmscript_printer_ir.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-docstring from tvm import IRModule from tvm.script.ir_builder import IRBuilder from tvm.script.ir_builder import ir as I from tvm.script.ir_builder import tir as T def _assert_print(obj, expected): assert str(obj).strip() == expected.strip() assert repr(obj).strip() == expected.strip() if isinstance(obj, IRModule): assert obj.script().strip() == expected.strip() def test_ir_module(): with IRBuilder() as ib: # pylint: disable=invalid-name with I.ir_module(): with T.prim_func(): T.func_name("foo") mod = ib.get() _assert_print( mod, """ # from tvm.script import ir as I # from tvm.script import tir as T @I.ir_module class Module: @T.prim_func def foo(): T.evaluate(0)""", ) if __name__ == "__main__": test_ir_module()
1,649
30.132075
62
py
tvm
tvm-main/tests/python/unittest/test_tir_host_func.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm.script import ir as I from tvm.script import tir as T from tvm.meta_schedule.testing import te_workload # pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument,missing-class-docstring,missing-function-docstring # fmt: off @I.ir_module class Module: @T.prim_func def main( A: T.Buffer((729, 729), "float32"), B: T.Buffer((729, 729), "float32"), C: T.Buffer((729, 729), "float32"), ): T.func_attr( { "global_symbol": "test", "target": tvm.target.Target("llvm", host="llvm"), "tir.noalias": True, } ) # with T.block("root"): for i, j, k in T.grid(729, 729, 729): with T.block("C"): v_i, v_j, v_k = T.axis.remap("SSR", [i, j, k]) T.reads(A[v_i, v_k], B[v_k, v_j]) T.writes(C[v_i, v_j]) with T.init(): C[v_i, v_j] = T.float32(0) C[v_i, v_j] = C[v_i, v_j] + A[v_i, v_k] * B[v_k, v_j] # fmt: on # pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument,missing-class-docstring,missing-function-docstring def test_host_func(): """Test that host functions are not split.""" # te schedule copied from test_tir_transform_split_host_device.py func = tvm.te.create_prim_func( te_workload.matmul(729, 729, 729, in_dtype="float32", out_dtype="float32") ) mod = tvm.ir.IRModule({"main": func}) target = tvm.target.Target("cuda") mod = tvm.tir.transform.Apply( lambda f: f.with_attr( { "global_symbol": "test", "tir.is_host_func": 1, } ) )(mod) mod = tvm.tir.transform.BindTarget(target)(mod) tvm.ir.assert_structural_equal(mod, Module) assert ( "tir.is_host_func" not in mod["main"].attrs ), """Target and is_host_func attributes should be mutually exclusive""" if __name__ == "__main__": test_host_func()
2,871
34.45679
145
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_inject_software_pipeline.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys import numpy as np import pytest import tvm import tvm.testing import tvm.tir.tensor_intrin.cuda from tvm import TVMError, te, tir from tvm.meta_schedule.testing import te_workload from tvm.script import tir as T from tvm.testing.tir import mma_schedule from tvm.tir.tensor_intrin.cuda import ( LDMATRIX_16x16_A_DYN_INTRIN, LDMATRIX_16x16_B_DYN_INTRIN, MMA_f16f16f32_INTRIN, MMA_fill_16x16_f32_INTRIN, MMA_store_16x16_f32_global_INTRIN, shared_16x16_to_ldmatrix_32x8_layout, ) def _check(original, transformed): func = original mod = tvm.IRModule.from_expr(func) mod = tvm.tir.transform.InjectSoftwarePipeline()(mod) mod = tvm.tir.transform.Simplify()(mod) tvm.ir.assert_structural_equal(mod["main"], transformed, True) def _check_error(func): mod = tvm.IRModule.from_expr(func) with pytest.raises(ValueError): tvm.tir.transform.InjectSoftwarePipeline()(mod) @T.prim_func def trivial_pipeline(A: T.Buffer((16, 1), "float32"), C: T.Buffer((16, 1), "float32")): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 1, annotations={"software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1]} ): with T.block(): T.reads(A[tx, i]) T.writes(C[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, i]) C[tx, i] = B[tx, 0] + T.float32(1) @T.prim_func def transformed_trivial_pipeline( A: T.Buffer((16, 1), "float32"), C: T.Buffer((16, 1), "float32") ) -> None: for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0]) T.writes(C[tx, 0]) B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads(A[tx, 0]) T.writes(B[0, tx, 0]) B[0, tx, 0] = A[tx, 0] * T.float32(2) with T.block(): T.reads() T.writes() T.evaluate(0) with T.block(): T.reads(B[0, tx, 0]) T.writes(C[tx, 0]) C[tx, 0] = B[0, tx, 0] + T.float32(1) def gen_simple_compute(num_stages): @T.prim_func def simple_compute(A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32")): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, num_stages], "software_pipeline_order": [0, 1], }, ): with T.block("compute"): T.reads(A[tx, i]) T.writes(C[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, i]) C[tx, i] = B[tx, 0] + T.float32(1) return simple_compute @T.prim_func def transformed_simple_compute( A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32") ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): with T.block(): T.reads([A[tx, 0:16]]) T.writes([C[tx, 0:16]]) B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads([A[tx, 0]]) T.writes([B[0, tx, 0]]) B[0, tx, 0] = A[tx, 0] * T.float32(2) with T.block(): T.reads([A[tx, 1:16], B[0:2, tx, 0]]) T.writes([B[0:2, tx, 0], C[tx, 0:15]]) for i in T.serial(0, 15): with T.block(): T.reads([A[tx, i + 1]]) T.writes([B[(i + 1) % 2, tx, 0]]) B[(i + 1) % 2, tx, 0] = A[tx, i + 1] * T.float32(2) with T.block(): T.reads([B[i % 2, tx, 0]]) T.writes([C[tx, i]]) C[tx, i] = B[i % 2, tx, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, 0]]) T.writes([C[tx, 15]]) C[tx, 15] = B[1, tx, 0] + T.float32(1) @T.prim_func def simple_compute_with_other_annotation( A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1], "pragma_loop_partition_hint": True, }, ): with T.block("compute"): T.reads(A[tx, i]) T.writes(C[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, i]) C[tx, i] = B[tx, 0] + T.float32(1) @T.prim_func def transformed_simple_compute_with_other_annotation( A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32") ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): with T.block(): T.reads([A[tx, 0:16]]) T.writes([C[tx, 0:16]]) B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads([A[tx, 0]]) T.writes([B[0, tx, 0]]) B[0, tx, 0] = A[tx, 0] * T.float32(2) with T.block(): T.reads([A[tx, 1:16], B[0:2, tx, 0]]) T.writes([B[0:2, tx, 0], C[tx, 0:15]]) for i in T.serial( 0, 15, annotations={"pragma_loop_partition_hint": True}, ): with T.block(): T.reads([A[tx, i + 1]]) T.writes([B[(i + 1) % 2, tx, 0]]) B[(i + 1) % 2, tx, 0] = A[tx, i + 1] * T.float32(2) with T.block(): T.reads([B[i % 2, tx, 0]]) T.writes([C[tx, i]]) C[tx, i] = B[i % 2, tx, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, 0]]) T.writes([C[tx, 15]]) C[tx, 15] = B[1, tx, 0] + T.float32(1) @T.prim_func def three_stage_compute(A: T.Buffer((16, 16), "float32"), D: T.Buffer((16, 16), "float32")): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1, 2], "software_pipeline_order": [0, 1, 2], }, ): with T.block("compute"): T.reads(A[tx, i]) T.writes(D[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") C = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, 0]) C[tx, 0] = B[tx, 0] + T.float32(2) with T.block(): T.reads(C[tx, 0]) T.writes(D[tx, i]) D[tx, i] = C[tx, 0] + T.float32(1) @T.prim_func def transformed_three_stage_compute( A: T.Buffer((16, 16), "float32"), D: T.Buffer((16, 16), "float32") ) -> None: for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0:16]) T.writes(D[tx, 0:16]) B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") C = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads(A[tx, 0:2], B[0:2, tx, 0]) T.writes(B[0:2, tx, 0], C[0:2, tx, 0]) for i in T.unroll(2): with T.block(): T.reads(A[tx, i]) T.writes(B[0:2, tx, 0]) B[i, tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.where(i == 1) T.reads(B[0:2, tx, 0]) T.writes(C[0:2, tx, 0]) C[(i + 1) % 2, tx, 0] = B[(i + 1) % 2, tx, 0] + T.float32(2) with T.block(): T.reads(A[tx, 2:16], B[0:2, tx, 0], C[0:2, tx, 0]) T.writes(B[0:2, tx, 0], C[0:2, tx, 0], D[tx, 0:14]) for i in T.serial(14): with T.block(): T.reads(A[tx, i + 2]) T.writes(B[0:2, tx, 0]) B[i % 2, tx, 0] = A[tx, i + 2] * T.float32(2) with T.block(): T.reads(B[0:2, tx, 0]) T.writes(C[0:2, tx, 0]) C[(i + 1) % 2, tx, 0] = B[(i + 1) % 2, tx, 0] + T.float32(2) with T.block(): T.reads(C[0:2, tx, 0]) T.writes(D[tx, i]) D[tx, i] = C[i % 2, tx, 0] + T.float32(1) with T.block(): T.reads(B[0:2, tx, 0], C[0:2, tx, 0]) T.writes(C[0:2, tx, 0], D[tx, 14:16]) for i in T.unroll(2): with T.block(): T.where(i < 1) T.reads(B[0:2, tx, 0]) T.writes(C[0:2, tx, 0]) C[(i + 1) % 2, tx, 0] = B[(i + 1) % 2, tx, 0] + T.float32(2) with T.block(): T.reads(C[0:2, tx, 0]) T.writes(D[tx, i + 14]) D[tx, i + 14] = C[i, tx, 0] + T.float32(1) @T.prim_func def dag_interleaving( A: T.Buffer((16, 16), "float32"), B: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32"), ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 0, 0, 0, 1], "software_pipeline_order": [0, 2, 1, 3, 4], }, ): with T.block(): T.reads(A[tx, i]) T.writes(C[tx, i]) AS = T.alloc_buffer((16, 1), dtype="float32", scope="shared") BS = T.alloc_buffer((16, 1), dtype="float32", scope="shared") AL = T.alloc_buffer((1, 1), dtype="float32", scope="local") BL = T.alloc_buffer((1, 1), dtype="float32", scope="local") with T.block(): T.reads(A[tx, i]) T.writes(AS[tx, 0]) AS[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(AS[tx, 0]) T.writes(AL[0, 0]) AL[0, 0] = AS[tx, 0] with T.block(): T.reads(B[tx, i]) T.writes(BS[tx, 0]) BS[tx, 0] = B[tx, i] + T.float32(2) with T.block(): T.reads(BS[tx, 0]) T.writes(BL[0, 0]) BL[0, 0] = BS[tx, 0] with T.block(): T.reads(AL[0, 0], BL[0, 0]) T.writes(C[tx, i]) C[tx, i] = AL[0, 0] * BL[0, 0] @T.prim_func def transformed_dag_interleaving( A: T.Buffer((16, 16), "float32"), B: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32"), ) -> None: for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0:16], B[tx, 0:16]) T.writes(C[tx, 0:16]) AS = T.alloc_buffer([16, 1], dtype="float32", scope="shared") BS = T.alloc_buffer([16, 1], dtype="float32", scope="shared") AL = T.alloc_buffer([2, 1, 1], dtype="float32", scope="local") BL = T.alloc_buffer([2, 1, 1], dtype="float32", scope="local") with T.block(): T.reads(A[tx, 0], B[tx, 0], AS[tx, 0], BS[tx, 0]) T.writes(AS[tx, 0], BS[tx, 0], AL[0, 0, 0], BL[0, 0, 0]) with T.block(): T.reads(A[tx, 0]) T.writes(AS[tx, 0]) AS[tx, 0] = A[tx, 0] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(BS[tx, 0]) BS[tx, 0] = B[tx, 0] + T.float32(2) with T.block(): T.reads(AS[tx, 0]) T.writes(AL[0, 0, 0]) AL[0, 0, 0] = AS[tx, 0] with T.block(): T.reads(BS[tx, 0]) T.writes(BL[0, 0, 0]) BL[0, 0, 0] = BS[tx, 0] with T.block(): T.reads( A[tx, 1:16], B[tx, 1:16], AS[tx, 0], BS[tx, 0], AL[0:2, 0, 0], BL[0:2, 0, 0] ) T.writes(AS[tx, 0], BS[tx, 0], AL[0:2, 0, 0], BL[0:2, 0, 0], C[tx, 0:15]) for i in T.serial(15): with T.block(): T.reads(A[tx, i + 1]) T.writes(AS[tx, 0]) AS[tx, 0] = A[tx, i + 1] * T.float32(2) with T.block(): T.reads(B[tx, i + 1]) T.writes(BS[tx, 0]) BS[tx, 0] = B[tx, i + 1] + T.float32(2) with T.block(): T.reads(AS[tx, 0]) T.writes(AL[(i + 1) % 2, 0, 0]) AL[(i + 1) % 2, 0, 0] = AS[tx, 0] with T.block(): T.reads(BS[tx, 0]) T.writes(BL[(i + 1) % 2, 0, 0]) BL[(i + 1) % 2, 0, 0] = BS[tx, 0] with T.block(): T.reads(AL[i % 2, 0, 0], BL[i % 2, 0, 0]) T.writes(C[tx, i]) C[tx, i] = AL[i % 2, 0, 0] * BL[i % 2, 0, 0] with T.block(): T.reads(AL[1, 0, 0], BL[1, 0, 0]) T.writes(C[tx, 15]) C[tx, 15] = AL[1, 0, 0] * BL[1, 0, 0] @T.prim_func def nested_pipeline_simple( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1, 1, 1], "software_pipeline_order": [0, 1, 2, 3], }, ): with T.block(): T.reads(A[tx, i, 0:16]) T.writes(C[tx, i, 0:16]) A_shared = T.alloc_buffer((16, 1, 16), dtype="float32", scope="shared") for j in T.serial(0, 16): with T.block(): T.reads(A[tx, i, j]) T.writes(A_shared[tx, 0, j]) A_shared[tx, 0, j] = A[tx, i, j] for j in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1], }, ): with T.block(): T.reads(A_shared[tx, 0, j]) T.writes(C[tx, i, j]) B = T.alloc_buffer((16, 1, 1), dtype="float32", scope="shared") with T.block(): T.reads(A_shared[tx, i, j]) T.writes(B[tx, i, 0]) B[tx, i, 0] = A_shared[tx, 0, j] * T.float32(2) with T.block(): T.reads(B[tx, i, 0]) T.writes(C[tx, i, j]) C[tx, i, j] = B[tx, i, 0] + T.float32(1) @T.prim_func def transformed_nested_pipeline_simple( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): with T.block(): T.reads([A[tx, 0:16, 0:16]]) T.writes([C[tx, 0:16, 0:16]]) A_shared = T.alloc_buffer([2, 16, 1, 16], dtype="float32", scope="shared") B = T.alloc_buffer([2, 16, 1, 1], dtype="float32", scope="shared") with T.block(): T.reads([A[tx, 0, 0:16]]) T.writes([A_shared[0, tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, 0, j]]) T.writes([A_shared[0, tx, 0, j]]) A_shared[0, tx, 0, j] = A[tx, 0, j] with T.block(): T.reads([A[tx, 1:16, 0:16], A_shared[0:2, tx, 0:15, 0:16], B[0:2, tx, 0:15, 0]]) T.writes([A_shared[0:2, tx, 0, 0:16], B[0:2, tx, 0:15, 0], C[tx, 0:15, 0:16]]) for i in T.serial(0, 15): with T.block(): T.reads([A[tx, i + 1, 0:16]]) T.writes([A_shared[(i + 1) % 2, tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, i + 1, j]]) T.writes([A_shared[(i + 1) % 2, tx, 0, j]]) A_shared[(i + 1) % 2, tx, 0, j] = A[tx, i + 1, j] with T.block(): T.reads([A_shared[i % 2, tx, i, 0]]) T.writes([B[0, tx, i, 0]]) B[0, tx, i, 0] = A_shared[i % 2, tx, 0, 0] * T.float32(2) with T.block(): T.reads([A_shared[i % 2, tx, i, 1:16], B[0:2, tx, i, 0]]) T.writes([B[0:2, tx, i, 0], C[tx, i, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_shared[i % 2, tx, i, j + 1]]) T.writes([B[(j + 1) % 2, tx, i, 0]]) B[(j + 1) % 2, tx, i, 0] = A_shared[ i % 2, tx, 0, j + 1 ] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, i, 0]]) T.writes([C[tx, i, j]]) C[tx, i, j] = B[j % 2, tx, i, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, i, 0]]) T.writes([C[tx, i, 15]]) C[tx, i, 15] = B[1, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_shared[1, tx, 15, 0:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:16]]) with T.block(): T.reads([A_shared[1, tx, 15, 0]]) T.writes([B[0, tx, 15, 0]]) B[0, tx, 15, 0] = A_shared[1, tx, 0, 0] * T.float32(2) with T.block(): T.reads([A_shared[1, tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_shared[1, tx, 15, j + 1]]) T.writes([B[(j + 1) % 2, tx, 15, 0]]) B[(j + 1) % 2, tx, 15, 0] = A_shared[1, tx, 0, j + 1] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, 15, 0]]) T.writes([C[tx, 15, j]]) C[tx, 15, j] = B[j % 2, tx, 15, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, 15, 0]]) T.writes([C[tx, 15, 15]]) C[tx, 15, 15] = B[1, tx, 15, 0] + T.float32(1) @T.prim_func def nested_pipeline_prefetch_inner( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 0, 1, 1], "software_pipeline_order": [0, 2, 1, 3], }, ): with T.block(): T.reads(A[tx, i, 0:16]) T.writes(C[tx, i, 0:16]) A_shared = T.alloc_buffer((16, 1, 16), dtype="float32", scope="shared") for j in T.serial(0, 16): with T.block(): T.reads(A[tx, i, j]) T.writes(A_shared[tx, 0, j]) A_shared[tx, 0, j] = A[tx, i, j] for j in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1], }, ): with T.block(): T.reads(A_shared[tx, 0, j]) T.writes(C[tx, i, j]) B = T.alloc_buffer((16, 1, 1), dtype="float32", scope="shared") with T.block(): T.reads(A_shared[tx, i, j]) T.writes(B[tx, i, 0]) B[tx, i, 0] = A_shared[tx, 0, j] * T.float32(2) with T.block(): T.reads(B[tx, i, 0]) T.writes(C[tx, i, j]) C[tx, i, j] = B[tx, i, 0] + T.float32(1) @T.prim_func def transformed_nested_pipeline_prefetch_inner( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): with T.block(): T.reads([A[tx, 0:16, 0:16]]) T.writes([C[tx, 0:16, 0:16]]) A_shared = T.alloc_buffer([2, 16, 1, 16], dtype="float32", scope="shared") B = T.alloc_buffer([2, 16, 1, 1], dtype="float32", scope="shared") with T.block(): T.reads([A[tx, 0, 0:16], A_shared[0, tx, 0, 0]]) T.writes([A_shared[0, tx, 0, 0:16], B[0, tx, 0, 0]]) with T.block(): T.reads([A[tx, 0, 0:16]]) T.writes([A_shared[0, tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, 0, j]]) T.writes([A_shared[0, tx, 0, j]]) A_shared[0, tx, 0, j] = A[tx, 0, j] with T.block(): T.reads([A_shared[0, tx, 0, 0]]) T.writes([B[0, tx, 0, 0]]) B[0, tx, 0, 0] = A_shared[0, tx, 0, 0] * T.float32(2) with T.block(): T.reads([A[tx, 1:16, 0:16], A_shared[0:2, tx, 0:16, 0:16], B[0:2, tx, 0:15, 0]]) T.writes([A_shared[0:2, tx, 0, 0:16], B[0:2, tx, 0:16, 0], C[tx, 0:15, 0:16]]) for i in T.serial(0, 15): with T.block(): T.reads([A[tx, i + 1, 0:16]]) T.writes([A_shared[(i + 1) % 2, tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, i + 1, j]]) T.writes([A_shared[(i + 1) % 2, tx, 0, j]]) A_shared[(i + 1) % 2, tx, 0, j] = A[tx, i + 1, j] with T.block(): T.reads([A_shared[i % 2, tx, i, 1:16], B[0:2, tx, i, 0]]) T.writes([B[0:2, tx, i, 0], C[tx, i, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_shared[i % 2, tx, i, j + 1]]) T.writes([B[(j + 1) % 2, tx, i, 0]]) B[(j + 1) % 2, tx, i, 0] = A_shared[ i % 2, tx, 0, j + 1 ] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, i, 0]]) T.writes([C[tx, i, j]]) C[tx, i, j] = B[j % 2, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_shared[(i + 1) % 2, tx, i + 1, 0]]) T.writes([B[0, tx, i + 1, 0]]) B[0, tx, i + 1, 0] = A_shared[(i + 1) % 2, tx, 0, 0] * T.float32(2) with T.block(): T.reads([B[1, tx, i, 0]]) T.writes([C[tx, i, 15]]) C[tx, i, 15] = B[1, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_shared[1, tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:16]]) with T.block(): T.reads([A_shared[1, tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_shared[1, tx, 15, j + 1]]) T.writes([B[(j + 1) % 2, tx, 15, 0]]) B[(j + 1) % 2, tx, 15, 0] = A_shared[1, tx, 0, j + 1] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, 15, 0]]) T.writes([C[tx, 15, j]]) C[tx, 15, j] = B[j % 2, tx, 15, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, 15, 0]]) T.writes([C[tx, 15, 15]]) C[tx, 15, 15] = B[1, tx, 15, 0] + T.float32(1) @T.prim_func def nested_pipeline_interleaving( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 0, 0, 1, 1], "software_pipeline_order": [0, 2, 3, 1, 4], }, ): with T.block(): T.reads(A[tx, i, 0:16]) T.writes(C[tx, i, 0:16]) A_shared = T.alloc_buffer((16, 1, 16), dtype="float32", scope="shared") A_local = T.alloc_buffer((1, 1, 16), dtype="float32", scope="local") for j in T.serial(0, 16): with T.block(): T.reads(A[tx, i, j]) T.writes(A_shared[tx, 0, j]) A_shared[tx, 0, j] = A[tx, i, j] for j in T.serial(0, 16): with T.block(): T.reads(A_shared[tx, 0, j]) T.writes(A_local[0, 0, j]) A_local[0, 0, j] = A_shared[tx, i, j] for j in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1], }, ): with T.block(): T.reads(A_local[0, 0, j]) T.writes(C[tx, i, j]) B = T.alloc_buffer((16, 1, 1), dtype="float32", scope="shared") with T.block(): T.reads(A_local[tx, i, j]) T.writes(B[tx, i, 0]) B[tx, i, 0] = A_local[0, 0, j] * T.float32(2) with T.block(): T.reads(B[tx, i, 0]) T.writes(C[tx, i, j]) C[tx, i, j] = B[tx, i, 0] + T.float32(1) @T.prim_func def transformed_nested_pipeline_interleaving( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): with T.block(): T.reads([A[tx, 0:16, 0:16]]) T.writes([C[tx, 0:16, 0:16]]) A_shared = T.alloc_buffer([16, 1, 16], dtype="float32", scope="shared") A_local = T.alloc_buffer([1, 1, 16], dtype="float32", scope="local") B = T.alloc_buffer([2, 16, 1, 1], dtype="float32", scope="shared") with T.block(): T.reads([A[tx, 0, 0:16], A_shared[tx, 0, 0:16], A_local[tx, 0, 0]]) T.writes([A_shared[tx, 0, 0:16], A_local[0, 0, 0:16], B[0, tx, 0, 0]]) with T.block(): T.reads([A[tx, 0, 0:16]]) T.writes([A_shared[tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, 0, j]]) T.writes([A_shared[tx, 0, j]]) A_shared[tx, 0, j] = A[tx, 0, j] with T.block(): T.reads([A_shared[tx, 0, 0:16]]) T.writes([A_local[0, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A_shared[tx, 0, j]]) T.writes([A_local[0, 0, j]]) A_local[0, 0, j] = A_shared[tx, 0, j] with T.block(): T.reads([A_local[tx, 0, 0]]) T.writes([B[0, tx, 0, 0]]) B[0, tx, 0, 0] = A_local[0, 0, 0] * T.float32(2) with T.block(): T.reads( [ A[tx, 1:16, 0:16], A_local[tx, 0:16, 0:16], B[0:2, tx, 0:15, 0], A_shared[tx, 0, 0:16], ] ) T.writes( [ A_shared[tx, 0, 0:16], B[0:2, tx, 0:16, 0], C[tx, 0:15, 0:16], A_local[0, 0, 0:16], ] ) for i in T.serial(0, 15): with T.block(): T.reads([A[tx, i + 1, 0:16]]) T.writes([A_shared[tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, i + 1, j]]) T.writes([A_shared[tx, 0, j]]) A_shared[tx, 0, j] = A[tx, i + 1, j] with T.block(): T.reads([A_local[tx, i, 1:16], B[0:2, tx, i, 0]]) T.writes([B[0:2, tx, i, 0], C[tx, i, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_local[tx, i, j + 1]]) T.writes([B[(j + 1) % 2, tx, i, 0]]) B[(j + 1) % 2, tx, i, 0] = A_local[0, 0, j + 1] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, i, 0]]) T.writes([C[tx, i, j]]) C[tx, i, j] = B[j % 2, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_shared[tx, 0, 0:16]]) T.writes([A_local[0, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A_shared[tx, 0, j]]) T.writes([A_local[0, 0, j]]) A_local[0, 0, j] = A_shared[tx, i + 1, j] with T.block(): T.reads([A_local[tx, i + 1, 0]]) T.writes([B[0, tx, i + 1, 0]]) B[0, tx, i + 1, 0] = A_local[0, 0, 0] * T.float32(2) with T.block(): T.reads([B[1, tx, i, 0]]) T.writes([C[tx, i, 15]]) C[tx, i, 15] = B[1, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_local[tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:16]]) with T.block(): T.reads([A_local[tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_local[tx, 15, j + 1]]) T.writes([B[(j + 1) % 2, tx, 15, 0]]) B[(j + 1) % 2, tx, 15, 0] = A_local[0, 0, j + 1] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, 15, 0]]) T.writes([C[tx, 15, j]]) C[tx, 15, j] = B[j % 2, tx, 15, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, 15, 0]]) T.writes([C[tx, 15, 15]]) C[tx, 15, 15] = B[1, tx, 15, 0] + T.float32(1) @T.prim_func def nested_pipeline_double_buffer( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 0, 0, 1, 1], "software_pipeline_order": [0, 2, 3, 1, 4], }, ): with T.block(): T.reads(A[tx, i, 0:16]) T.writes(C[tx, i, 0:16]) A_shared = T.alloc_buffer((16, 1, 16), dtype="float32", scope="shared") A_local = T.alloc_buffer((1, 1, 16), dtype="float32", scope="local") for j in T.serial(0, 16): with T.block(): T.reads(A[tx, i, j]) T.writes(A_shared[tx, 0, j]) A_shared[tx, 0, j] = A[tx, i, j] for j in T.serial(0, 16): with T.block(): T.block_attr({"double_buffer_scope": 0}) T.reads(A_shared[tx, 0, j]) T.writes(A_local[0, 0, j]) A_local[0, 0, j] = A_shared[tx, i, j] for j in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1], "software_pipeline_order": [0, 1], }, ): with T.block(): T.reads(A_local[0, 0, j]) T.writes(C[tx, i, j]) B = T.alloc_buffer((16, 1, 1), dtype="float32", scope="shared") with T.block(): T.reads(A_local[tx, i, j]) T.writes(B[tx, i, 0]) B[tx, i, 0] = A_local[0, 0, j] * T.float32(2) with T.block(): T.reads(B[tx, i, 0]) T.writes(C[tx, i, j]) C[tx, i, j] = B[tx, i, 0] + T.float32(1) @T.prim_func def transformed_nested_pipeline_double_buffer( A: T.Buffer((16, 16, 16), "float32"), C: T.Buffer((16, 16, 16), "float32") ) -> None: for tx in T.thread_binding(0, 16, thread="threadIdx.x"): with T.block(): T.reads([A[tx, 0:16, 0:16]]) T.writes([C[tx, 0:16, 0:16]]) A_shared = T.alloc_buffer([16, 1, 16], dtype="float32", scope="shared") A_local = T.alloc_buffer([2, 1, 1, 16], dtype="float32", scope="local") B = T.alloc_buffer([2, 16, 1, 1], dtype="float32", scope="shared") with T.block(): T.reads([A[tx, 0, 0:16], A_shared[tx, 0, 0:16], A_local[0, tx, 0, 0]]) T.writes([A_shared[tx, 0, 0:16], A_local[0, 0, 0, 0:16], B[0, tx, 0, 0]]) with T.block(): T.reads([A[tx, 0, 0:16]]) T.writes([A_shared[tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, 0, j]]) T.writes([A_shared[tx, 0, j]]) A_shared[tx, 0, j] = A[tx, 0, j] with T.block(): T.reads([A_shared[tx, 0, 0:16]]) T.writes([A_local[0, 0, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A_shared[tx, 0, j]]) T.writes([A_local[0, 0, 0, j]]) T.block_attr({"double_buffer_scope": 0}) A_local[0, 0, 0, j] = A_shared[tx, 0, j] with T.block(): T.reads([A_local[0, tx, 0, 0]]) T.writes([B[0, tx, 0, 0]]) B[0, tx, 0, 0] = A_local[0, 0, 0, 0] * T.float32(2) with T.block(): T.reads( [ A[tx, 1:16, 0:16], A_local[0:2, tx, 0:16, 0:16], B[0:2, tx, 0:15, 0], A_shared[tx, 0, 0:16], ] ) T.writes( [ A_shared[tx, 0, 0:16], B[0:2, tx, 0:16, 0], C[tx, 0:15, 0:16], A_local[0:2, 0, 0, 0:16], ] ) for i in T.serial(0, 15): with T.block(): T.reads([A[tx, i + 1, 0:16]]) T.writes([A_shared[tx, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A[tx, i + 1, j]]) T.writes([A_shared[tx, 0, j]]) A_shared[tx, 0, j] = A[tx, i + 1, j] with T.block(): T.reads([A_local[i % 2, tx, i, 1:16], B[0:2, tx, i, 0]]) T.writes([B[0:2, tx, i, 0], C[tx, i, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_local[i % 2, tx, i, j + 1]]) T.writes([B[(j + 1) % 2, tx, i, 0]]) B[(j + 1) % 2, tx, i, 0] = A_local[i % 2, 0, 0, j + 1] * T.float32( 2 ) with T.block(): T.reads([B[j % 2, tx, i, 0]]) T.writes([C[tx, i, j]]) C[tx, i, j] = B[j % 2, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_shared[tx, 0, 0:16]]) T.writes([A_local[(i + 1) % 2, 0, 0, 0:16]]) for j in T.serial(0, 16): with T.block(): T.reads([A_shared[tx, 0, j]]) T.writes([A_local[(i + 1) % 2, 0, 0, j]]) T.block_attr({"double_buffer_scope": 0}) A_local[(i + 1) % 2, 0, 0, j] = A_shared[tx, i + 1, j] with T.block(): T.reads([A_local[(i + 1) % 2, tx, i + 1, 0]]) T.writes([B[0, tx, i + 1, 0]]) B[0, tx, i + 1, 0] = A_local[(i + 1) % 2, 0, 0, 0] * T.float32(2) with T.block(): T.reads([B[1, tx, i, 0]]) T.writes([C[tx, i, 15]]) C[tx, i, 15] = B[1, tx, i, 0] + T.float32(1) with T.block(): T.reads([A_local[1, tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:16]]) with T.block(): T.reads([A_local[1, tx, 15, 1:16], B[0:2, tx, 15, 0]]) T.writes([B[0:2, tx, 15, 0], C[tx, 15, 0:15]]) for j in T.serial(0, 15): with T.block(): T.reads([A_local[1, tx, 15, j + 1]]) T.writes([B[(j + 1) % 2, tx, 15, 0]]) B[(j + 1) % 2, tx, 15, 0] = A_local[1, 0, 0, j + 1] * T.float32(2) with T.block(): T.reads([B[j % 2, tx, 15, 0]]) T.writes([C[tx, 15, j]]) C[tx, 15, j] = B[j % 2, tx, 15, 0] + T.float32(1) with T.block(): T.reads([B[1, tx, 15, 0]]) T.writes([C[tx, 15, 15]]) C[tx, 15, 15] = B[1, tx, 15, 0] + T.float32(1) @T.prim_func def simple_compute_incorrect_reorder( A: T.Buffer((16, 16), "float32"), D: T.Buffer((16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1, 1], "software_pipeline_order": [0, 2, 1], }, ): with T.block(): T.reads(A[tx, i]) T.writes(D[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") C = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, 0]) C[tx, 0] = B[tx, 0] + T.float32(2) with T.block(): T.reads(C[tx, 0]) T.writes(D[tx, i]) D[tx, i] = C[tx, 0] + T.float32(1) @T.prim_func def simple_compute_conflicting_order( A: T.Buffer((16, 16), "float32"), D: T.Buffer((16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial( 0, 16, annotations={ "software_pipeline_stage": [0, 1, 1], "software_pipeline_order": [0, 1, 1], }, ): with T.block(): T.reads(A[tx, i]) T.writes(D[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") C = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, 0]) C[tx, 0] = B[tx, 0] + T.float32(2) with T.block(): T.reads(C[tx, 0]) T.writes(D[tx, i]) D[tx, i] = C[tx, 0] + T.float32(1) @T.prim_func def simple_compute_missing_annotation( A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32") ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in T.serial(0, 16, annotations={"software_pipeline_stage": [0, 1]}): with T.block(): T.reads(A[tx, i]) T.writes(C[tx, i]) B = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(B[tx, 0]) B[tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(B[tx, 0]) T.writes(C[tx, i]) C[tx, i] = B[tx, 0] + T.float32(1) def test_simple_compute(): _check(gen_simple_compute(1), transformed_simple_compute) def test_simple_compute_with_other_annotation(): _check(simple_compute_with_other_annotation, transformed_simple_compute_with_other_annotation) def test_trivial_pipeline(): _check(trivial_pipeline, transformed_trivial_pipeline) def test_three_stage_compute(): _check(three_stage_compute, transformed_three_stage_compute) def test_dag_interleaving(): _check(dag_interleaving, transformed_dag_interleaving) def test_nest_pipeline_simple(): _check(nested_pipeline_simple, transformed_nested_pipeline_simple) def test_nest_pipeline_prefetch_inner(): _check(nested_pipeline_prefetch_inner, transformed_nested_pipeline_prefetch_inner) def test_nest_pipeline_interleaving(): _check(nested_pipeline_interleaving, transformed_nested_pipeline_interleaving) def test_nest_pipeline_double_buffer(): _check(nested_pipeline_double_buffer, transformed_nested_pipeline_double_buffer) def test_error_reorder(): _check_error(simple_compute_incorrect_reorder) def test_error_conflicting_order(): _check_error(simple_compute_conflicting_order) def test_error_missing_annotation(): _check_error(simple_compute_missing_annotation) def test_simple_compute_async(): mod = tvm.IRModule.from_expr(gen_simple_compute(1)) sch = tvm.tir.Schedule(mod) _, loop = sch.get_loops(sch.get_block("compute")) sch.annotate(loop, ann_key="software_pipeline_async_stages", ann_val=[0]) mod = tvm.tir.transform.InjectSoftwarePipeline()(sch.mod) @T.prim_func def ref(A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32")): for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0:16]) T.writes(C[tx, 0:16]) B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads(A[tx, 0]) T.writes(B[T.FloorMod(0, 2), tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): B[T.FloorMod(0, 2), tx, 0] = A[tx, 0] * T.float32(2) with T.block(): T.reads(A[tx, 1:16], B[0:2, tx, 0]) T.writes(B[0:2, tx, 0], C[tx, 0:15]) for i in T.serial(15): with T.block(): T.where(i + 1 < 16) T.reads(A[tx, i + 1]) T.writes(B[(i + 1) % 2, tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): B[(i + 1) % 2, tx, 0] = A[tx, i + 1] * T.float32(2) with T.block(): T.where(i + 1 - 1 < 16) T.reads(B[(i - 1 + 1) % 2, tx, 0]) T.writes(C[tx, i - 1 + 1]) with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 1): C[tx, i - 1 + 1] = B[(i - 1 + 1) % 2, tx, 0] + T.float32(1) with T.block(): T.reads(B[T.FloorMod(15, 2), tx, 0]) T.writes(C[tx, 15]) with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 0): C[tx, 15] = B[T.FloorMod(15, 2), tx, 0] + T.float32(1) tvm.ir.assert_structural_equal(mod["main"], ref, True) mod = tvm.IRModule.from_expr(gen_simple_compute(3)) sch = tvm.tir.Schedule(mod) _, loop = sch.get_loops(sch.get_block("compute")) sch.annotate(loop, ann_key="software_pipeline_async_stages", ann_val=[0]) mod = tvm.tir.transform.InjectSoftwarePipeline()(sch.mod) @T.prim_func def ref(A: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32")) -> None: for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0:16]) T.writes(C[tx, 0:16]) B = T.alloc_buffer([4, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads(A[tx, 0:3]) T.writes(B[0:3, tx, 0]) for i in T.unroll(3): with T.block(): T.where(i < 16) T.reads(A[tx, i]) T.writes(B[i % 4, tx, 0]) T.attr(0, "async_commit_queue_scope", 0) T.attr(0, "async_scope", 1) B[i % 4, tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.reads(A[tx, 3:16], B[0:4, tx, 0]) T.writes(B[0:4, tx, 0], C[tx, 0:13]) for i in T.serial(13): with T.block(): T.where(i + 3 < 16) T.reads(A[tx, i + 3]) T.writes(B[(i + 3) % 4, tx, 0]) T.attr(0, "async_commit_queue_scope", 0) T.attr(0, "async_scope", 1) B[(i + 3) % 4, tx, 0] = A[tx, i + 3] * T.float32(2) with T.block(): T.where(i + 3 - 3 < 16) T.reads(B[0:4, tx, 0]) T.writes(C[tx, i - 3 + 3]) with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 3): C[tx, i - 3 + 3] = B[(i - 3 + 3) % 4, tx, 0] + T.float32(1) with T.block(): T.reads(B[0:4, tx, 0]) T.writes(C[tx, 13:16]) for i in T.unroll(3): with T.block(): T.where(i + 16 - 3 < 16) T.reads(B[0:4, tx, 0]) T.writes(C[tx, i - 3 + 16]) with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 2 - i): C[tx, i - 3 + 16] = B[(i - 3 + 16) % 4, tx, 0] + T.float32(1) tvm.ir.assert_structural_equal(mod["main"], ref, True) def test_async_producer_interleaving(): @T.prim_func def simple_compute( A: T.Buffer((16, 16), "float32"), B: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32"), ): for tx in T.thread_binding(0, 16, thread="threadIdx.x"): for i in range(16): with T.block("compute"): T.reads(A[tx, i]) T.writes(C[tx, i]) A_shared = T.alloc_buffer((16, 1), dtype="float32", scope="shared") B_shared = T.alloc_buffer((16, 1), dtype="float32", scope="shared") with T.block(): T.reads(A[tx, i]) T.writes(A_shared[tx, 0]) A_shared[tx, 0] = A[tx, i] with T.block(): T.reads(B[tx, i]) T.writes(B_shared[tx, 0]) B_shared[tx, 0] = B[tx, i] with T.block(): T.reads(A_shared[tx, 0], B_shared[tx, 0]) T.writes(C[tx, i]) C[tx, i] = A_shared[tx, 0] + B_shared[tx, 0] mod = tvm.IRModule.from_expr(simple_compute) sch = tvm.tir.Schedule(mod) _, loop = sch.get_loops(sch.get_block("compute")) sch.annotate(loop, ann_key="software_pipeline_stage", ann_val=[0, 0, 3]) sch.annotate(loop, ann_key="software_pipeline_order", ann_val=[0, 2, 1]) sch.annotate(loop, ann_key="software_pipeline_async_stages", ann_val=[0]) mod = tvm.tir.transform.InjectSoftwarePipeline()(sch.mod) @T.prim_func def ref( A: T.Buffer((16, 16), "float32"), B: T.Buffer((16, 16), "float32"), C: T.Buffer((16, 16), "float32"), ) -> None: for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0:16], B[tx, 0:16]) T.writes(C[tx, 0:16]) A_shared = T.alloc_buffer([4, 16, 1], dtype="float32", scope="shared") B_shared = T.alloc_buffer([4, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads(A[tx, 0:3], B[tx, 0:3]) T.writes(A_shared[0:3, tx, 0], B_shared[0:3, tx, 0]) for i in T.unroll(3): with T.block(): T.where(i < 16) T.reads(A[tx, i], B[tx, i]) T.writes(A_shared[i % 4, tx, 0], B_shared[i % 4, tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): A_shared[i % 4, tx, 0] = A[tx, i] with T.attr(0, "async_scope", 1): B_shared[i % 4, tx, 0] = B[tx, i] with T.block(): T.reads(A[tx, 3:16], A_shared[0:4, tx, 0], B_shared[0:4, tx, 0], B[tx, 3:16]) T.writes(A_shared[0:4, tx, 0], C[tx, 0:13], B_shared[0:4, tx, 0]) for i in T.serial(13): with T.block(): T.where(i + 3 < 16) T.reads(A[tx, i + 3]) T.writes(A_shared[(i + 3) % 4, tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): A_shared[(i + 3) % 4, tx, 0] = A[tx, i + 3] with T.block(): T.where(i + 3 - 3 < 16) T.reads(A_shared[0:4, tx, 0], B_shared[0:4, tx, 0]) T.writes(C[tx, i - 3 + 3]) with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 5): C[tx, i - 3 + 3] = ( A_shared[(i - 3 + 3) % 4, tx, 0] + B_shared[(i - 3 + 3) % 4, tx, 0] ) with T.block(): T.where(i + 3 < 16) T.reads(B[tx, i + 3]) T.writes(B_shared[(i + 3) % 4, tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): B_shared[(i + 3) % 4, tx, 0] = B[tx, i + 3] with T.block(): T.reads(A_shared[0:4, tx, 0], B_shared[0:4, tx, 0]) T.writes(C[tx, 13:16]) for i in T.unroll(3): with T.block(): T.where(i + 16 - 3 < 16) T.reads(A_shared[0:4, tx, 0], B_shared[0:4, tx, 0]) T.writes(C[tx, i - 3 + 16]) with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 2 - i): C[tx, i - 3 + 16] = ( A_shared[(i - 3 + 16) % 4, tx, 0] + B_shared[(i - 3 + 16) % 4, tx, 0] ) tvm.ir.assert_structural_equal(mod["main"], ref, True) def test_three_stage_compute_two_stage_async(): mod = tvm.IRModule.from_expr(three_stage_compute) sch = tvm.tir.Schedule(mod) _, loop = sch.get_loops(sch.get_block("compute")) sch.annotate(loop, ann_key="software_pipeline_async_stages", ann_val=[0, 1]) mod = tvm.tir.transform.InjectSoftwarePipeline()(sch.mod) @T.prim_func def ref(A: T.Buffer((16, 16), "float32"), D: T.Buffer((16, 16), "float32")) -> None: for tx in T.thread_binding(16, thread="threadIdx.x"): with T.block(): T.reads(A[tx, 0:16]) T.writes(D[tx, 0:16]) B = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") C = T.alloc_buffer([2, 16, 1], dtype="float32", scope="shared") with T.block(): T.reads(A[tx, 0:2], B[0:2, tx, 0]) T.writes(B[0:2, tx, 0], C[0:2, tx, 0]) for i in T.unroll(2): with T.block(): T.where(i < 16) T.reads(A[tx, i]) T.writes(B[i % 2, tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): B[i % 2, tx, 0] = A[tx, i] * T.float32(2) with T.block(): T.where(i == 1 and i - 1 < 16) T.reads(B[(i - 1) % 2, tx, 0]) T.writes(C[(i - 1) % 2, tx, 0]) with T.attr(0, "async_commit_queue_scope", 1): with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 1): with T.attr(0, "async_scope", 1): C[(i - 1) % 2, tx, 0] = B[ (i - 1) % 2, tx, 0 ] + T.float32(2) with T.block(): T.reads(A[tx, 2:16], B[0:2, tx, 0], C[0:2, tx, 0]) T.writes(B[0:2, tx, 0], C[0:2, tx, 0], D[tx, 0:14]) for i in T.serial(14): with T.block(): T.where(i + 2 < 16) T.reads(A[tx, i + 2]) T.writes(B[(i + 2) % 2, tx, 0]) with T.attr(0, "async_commit_queue_scope", 0): with T.attr(0, "async_scope", 1): B[(i + 2) % 2, tx, 0] = A[tx, i + 2] * T.float32(2) with T.block(): T.where(i + 2 - 1 < 16) T.reads(B[(i - 1 + 2) % 2, tx, 0]) T.writes(C[(i - 1 + 2) % 2, tx, 0]) with T.attr(0, "async_commit_queue_scope", 1): with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 1): with T.attr(0, "async_scope", 1): C[(i - 1 + 2) % 2, tx, 0] = B[ (i - 1 + 2) % 2, tx, 0 ] + T.float32(2) with T.block(): T.where(i + 2 - 2 < 16) T.reads(C[0:2, tx, 0]) T.writes(D[tx, i - 2 + 2]) with T.attr(0, "async_wait_queue_scope", 1): with T.attr(0, "async_wait_inflight_count", 1): D[tx, i - 2 + 2] = C[(i - 2 + 2) % 2, tx, 0] + T.float32(1) with T.block(): T.reads(B[0:2, tx, 0], C[0:2, tx, 0]) T.writes(C[0:2, tx, 0], D[tx, 14:16]) for i in T.unroll(2): with T.block(): T.where(i + 16 - 1 < 16) T.reads(B[(i - 1 + 16) % 2, tx, 0]) T.writes(C[(i - 1 + 16) % 2, tx, 0]) with T.attr(0, "async_commit_queue_scope", 1): with T.attr(0, "async_wait_queue_scope", 0): with T.attr(0, "async_wait_inflight_count", 0 - i): with T.attr(0, "async_scope", 1): C[(i - 1 + 16) % 2, tx, 0] = B[ (i - 1 + 16) % 2, tx, 0 ] + T.float32(2) with T.block(): T.where(i + 16 - 2 < 16) T.reads(C[0:2, tx, 0]) T.writes(D[tx, i - 2 + 16]) with T.attr(0, "async_wait_queue_scope", 1): with T.attr( 0, "async_wait_inflight_count", T.if_then_else(i + 16 - 1 < 16, 1, 0, dtype="int32"), ): D[tx, i - 2 + 16] = C[(i - 2 + 16) % 2, tx, 0] + T.float32(1) tvm.ir.assert_structural_equal(mod["main"], ref, True) N = K = M = 4096 def get_mma_schedule(): i_factors, j_factors, k_factors = [1, 32, 1, 4, 2], [16, 2, 4, 1, 2], [128, 2, 1] def index_map(i, j): return ( i // 16, j // 16, *shared_16x16_to_ldmatrix_32x8_layout(i % 16, j % 16), ) workload = te.create_prim_func( te_workload.matmul(N, M, K, in_dtype="float16", out_dtype="float32") ) return mma_schedule( workload, 16, "float16", False, i_factors, j_factors, k_factors, index_map, index_map, index_map, LDMATRIX_16x16_A_DYN_INTRIN, LDMATRIX_16x16_B_DYN_INTRIN, MMA_f16f16f32_INTRIN, MMA_fill_16x16_f32_INTRIN, MMA_store_16x16_f32_global_INTRIN, "shared.dyn", ) def build_and_run(sch): if tvm.testing.is_ampere_or_newer(): with tvm.transform.PassContext(config={"tir.use_async_copy": 1}): f = tvm.build(sch.mod["main"], target="cuda") dev = tvm.device("cuda", 0) a_np = np.random.uniform(size=(N, K)).astype("float16") b_np = np.random.uniform(size=(K, M)).astype("float16") c_np = np.dot(a_np.astype("float32"), b_np.astype("float32")) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros((N, M), dtype="float32"), dev) f(a, b, c) tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3) @tvm.testing.requires_cuda def test_async_pipelined_mma_gemm_simple(): sch = get_mma_schedule() k0 = sch.get_loops(sch.get_block("C_o_update"))[3] sch.annotate(k0, ann_key="software_pipeline_stage", ann_val=[0, 0, 3]) sch.annotate(k0, ann_key="software_pipeline_order", ann_val=[0, 1, 2]) sch.annotate(k0, ann_key="software_pipeline_async_stages", ann_val=[0]) seq = tvm.transform.Sequential( [ tvm.tir.transform.PlanAndUpdateBufferAllocationLocation(), tvm.tir.transform.ConvertBlocksToOpaque(), tvm.tir.transform.UnifyThreadBinding(), tvm.tir.transform.LowerMatchBuffer(), tvm.tir.transform.InjectSoftwarePipeline(), ] ) mod = seq(sch.mod) pipeline = mod["main"].body.block.body.body.body.body.body.block.body[1].block.body prologue, body, epilogue = pipeline commit_queue_scope = prologue.block.body.body.block.body assert len(commit_queue_scope.body) == 2 assert commit_queue_scope.value == 0 commit_queue_scope = body.block.body.body[0].block.body assert len(commit_queue_scope.body) == 2 assert commit_queue_scope.value == 0 assert body.block.body.body[1].block.body.body.attr_key == "async_wait_inflight_count" assert body.block.body.body[1].block.body.body.value == 3 assert epilogue.block.body.body.block.body.body.attr_key == "async_wait_inflight_count" assert str(epilogue.block.body.body.block.body.body.value) == "2 - k_0_0" build_and_run(sch) @tvm.testing.requires_cuda def test_async_nested_pipeline_mma_gemm_ideal_annotation(): sch = get_mma_schedule() k0 = sch.get_loops(sch.get_block("C_o_update"))[3] k1 = sch.get_loops(sch.get_block("C_o_update"))[4] sch.annotate(k0, ann_key="software_pipeline_stage", ann_val=[0, 0, 2, 3, 3]) sch.annotate(k0, ann_key="software_pipeline_order", ann_val=[0, 1, 3, 2, 4]) sch.annotate(k0, ann_key="software_pipeline_async_stages", ann_val=[0]) sch.annotate(k1, ann_key="software_pipeline_stage", ann_val=[0, 0, 1]) sch.annotate(k1, ann_key="software_pipeline_order", ann_val=[0, 1, 2]) seq = tvm.transform.Sequential( [ tvm.tir.transform.PlanAndUpdateBufferAllocationLocation(), tvm.tir.transform.ConvertBlocksToOpaque(), tvm.tir.transform.UnifyThreadBinding(), tvm.tir.transform.LowerMatchBuffer(), tvm.tir.transform.InjectSoftwarePipeline(), ] ) mod = seq(sch.mod) pipeline = mod["main"].body.block.body.body.body.body.body.block.body[1].block.body prologue, body, epilogue = pipeline commit_queue_scope = prologue.block.body.body[0].block.body assert len(commit_queue_scope.body) == 2 assert commit_queue_scope.value == 0 assert prologue.block.body.body[1].block.body.body.attr_key == "async_wait_inflight_count" assert prologue.block.body.body[1].block.body.body.value == 2 commit_queue_scope = body.block.body.body[0].block.body assert len(commit_queue_scope.body) == 2 assert commit_queue_scope.value == 0 assert body.block.body.body[1].block.body.body.attr_key == "async_wait_inflight_count" assert body.block.body.body[1].block.body.body.value == 2 assert str(epilogue.block.body.body[0].block.body.body.value) == "1 - k_0_0" build_and_run(sch) if __name__ == "__main__": tvm.testing.main()
69,450
43.40601
100
py
tvm
tvm-main/tests/python/unittest/test_runtime_measure.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import time import ctypes import tvm from tvm import te from tvm.contrib.utils import tempdir from tvm.runtime.module import BenchmarkResult def test_min_repeat_ms(): tmp = tempdir() filename = tmp.relpath("log") @tvm.register_func def my_debug(filename): """one call lasts for 100 ms and writes one character to a file""" time.sleep(0.1) with open(filename, "a") as fout: fout.write("c") X = te.compute((), lambda: tvm.tir.call_packed("my_debug", filename)) s = te.create_schedule(X.op) func = tvm.build(s, [X]) x = tvm.nd.empty((), dtype="int32") ftimer = func.time_evaluator(func.entry_name, tvm.cpu(), number=1, repeat=1) ftimer(x) with open(filename, "r") as fin: ct = len(fin.readline()) assert ct == 2 ftimer = func.time_evaluator(func.entry_name, tvm.cpu(), number=1, repeat=1, min_repeat_ms=1000) ftimer(x) # make sure we get more than 10 calls with open(filename, "r") as fin: ct = len(fin.readline()) assert ct > 10 + 2 def test_benchmark_result(): r = BenchmarkResult([1, 2, 2, 5]) assert r.mean == 2.5 assert r.median == 2.0 assert r.min == 1 assert r.max == 5 assert r.std == 1.5 if __name__ == "__main__": test_min_repeat_ms() test_benchmark_result()
2,118
28.430556
100
py
tvm
tvm-main/tests/python/unittest/test_tir_schedule_merge.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import pytest import tvm import tvm.testing from tvm import tir from tvm.script import tir as T from tvm.tir.schedule.testing import verify_trace_roundtrip # pylint: disable=no-member,invalid-name,unused-variable @T.prim_func def elementwise(a: T.handle, c: T.handle, d: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) D = T.match_buffer(d, (64, 64)) B = T.alloc_buffer((128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A[vi, vj]) T.writes(B[vi, vj]) B[vi, vj] = A[vi, vj] * T.float32(2) for i_0, j_0, i_1, j_1 in T.grid(8, 8, 16, 16): with T.block("C"): vi = T.axis.spatial(128, i_0 * 16 + i_1) vj = T.axis.spatial(128, j_0 * 16 + j_1) T.reads(B[vi, vj]) T.writes(C[vi, vj]) C[vi, vj] = B[vi, vj] + T.float32(1) for i_0, j_0, i_1, j_1 in T.grid(8, 8, 8, 8): with T.block("D"): vi = T.axis.spatial(64, i_0 * 8 + i_1) vj = T.axis.spatial(64, j_0 * 8 + j_1) T.reads(B[vi, vj]) T.writes(D[vi, vj]) D[vi, vj] = B[vi, vj] + T.float32(2) @T.prim_func def elementwise_merged(a: T.handle, c: T.handle, d: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) D = T.match_buffer(d, (64, 64)) B = T.alloc_buffer((128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A[vi, vj]) T.writes(B[vi, vj]) B[vi, vj] = A[vi, vj] * T.float32(2) for i_0_m in range(8): for j_0, i_1, j_1 in T.grid(8, 16, 16): with T.block("C"): vi = T.axis.spatial(128, i_0_m * 16 + i_1) vj = T.axis.spatial(128, j_0 * 16 + j_1) T.reads(B[vi, vj]) T.writes(C[vi, vj]) C[vi, vj] = B[vi, vj] + T.float32(1) for j_0, i_1, j_1 in T.grid(8, 8, 8): with T.block("D"): vi = T.axis.spatial(64, i_0_m * 8 + i_1) vj = T.axis.spatial(64, j_0 * 8 + j_1) T.reads(B[vi, vj]) T.writes(D[vi, vj]) D[vi, vj] = B[vi, vj] + T.float32(2) @T.prim_func def elementwise_merged2(a: T.handle, c: T.handle, d: T.handle) -> None: A = T.match_buffer(a, (128, 128)) C = T.match_buffer(c, (128, 128)) D = T.match_buffer(d, (64, 64)) B = T.alloc_buffer((128, 128)) for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) T.reads(A[vi, vj]) T.writes(B[vi, vj]) B[vi, vj] = A[vi, vj] * T.float32(2) for i_0_m, j_0_m in T.grid(8, 8): for i_1, j_1 in T.grid(16, 16): with T.block("C"): vi = T.axis.spatial(128, i_0_m * 16 + i_1) vj = T.axis.spatial(128, j_0_m * 16 + j_1) T.reads(B[vi, vj]) T.writes(C[vi, vj]) C[vi, vj] = B[vi, vj] + T.float32(1) for i_1, j_1 in T.grid(8, 8): with T.block("D"): vi = T.axis.spatial(64, i_0_m * 8 + i_1) vj = T.axis.spatial(64, j_0_m * 8 + j_1) T.reads(B[vi, vj]) T.writes(D[vi, vj]) D[vi, vj] = B[vi, vj] + T.float32(2) def test_merge(): sch = tir.Schedule(elementwise, debug_mask="all") block_c = sch.get_block("C") block_d = sch.get_block("D") i = sch.get_loops(block_c)[0] j = sch.get_loops(block_d)[0] sch.merge(i, j) tvm.ir.assert_structural_equal(elementwise_merged, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise) def test_merge2(): sch = tir.Schedule(elementwise, debug_mask="all") block_c = sch.get_block("C") block_d = sch.get_block("D") i = sch.get_loops(block_c)[1] j = sch.get_loops(block_d)[1] sch.merge(i, j) tvm.ir.assert_structural_equal(elementwise_merged2, sch.mod["main"]) verify_trace_roundtrip(sch=sch, mod=elementwise) def test_merge_fail_not_only_child(): @T.prim_func def elementwise_with_seq(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) C = T.match_buffer(c, (128, 128, 128)) B = T.alloc_buffer((128, 128, 128)) D = T.alloc_buffer((128, 128, 128)) for i, j in T.grid(128, 128): for k in T.serial(0, 128): with T.block("D"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) D[vi, vj, vk] = A[vi, vj, vk] * 2.0 for k in T.serial(0, 128): with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 for i, j in T.grid(128, 128): for k in T.serial(0, 128): with T.block("C"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) C[vi, vj, vk] = B[vi, vj, vk] * 2.0 sch = tir.Schedule(elementwise_with_seq, debug_mask="all") block_b = sch.get_block("B") _, _, b = sch.get_loops(block_b) block_c = sch.get_block("C") _, _, c = sch.get_loops(block_c) with pytest.raises(tvm.tir.ScheduleError): sch.merge(b, c) def test_merge_fail_not_start_with_zero(): @T.prim_func def elementwise_loops_not_start_with_zero(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) C = T.match_buffer(c, (128, 128, 128)) B = T.alloc_buffer((128, 128, 128)) for i, j in T.grid(128, 128): for k in T.serial(1, 128): with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 for i, j in T.grid(128, 128): for k in T.serial(0, 128): with T.block("C"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) C[vi, vj, vk] = A[vi, vj, vk] * 2.0 sch = tir.Schedule(elementwise_loops_not_start_with_zero, debug_mask="all") block_b = sch.get_block("B") _, _, b = sch.get_loops(block_b) block_c = sch.get_block("C") _, _, c = sch.get_loops(block_c) with pytest.raises(tvm.tir.ScheduleError): sch.merge(b, c) def test_merge_fail_not_same_extent(): @T.prim_func def elementwise_loops_not_same_extent(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) C = T.match_buffer(c, (128, 128, 128)) B = T.alloc_buffer((64, 128, 128)) for i, j in T.grid(64, 128): for k in T.serial(0, 128): with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 for i, j in T.grid(128, 128): for k in T.serial(0, 128): with T.block("C"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) C[vi, vj, vk] = A[vi, vj, vk] * 2.0 sch = tir.Schedule(elementwise_loops_not_same_extent, debug_mask="all") block_b = sch.get_block("B") _, _, b = sch.get_loops(block_b) block_c = sch.get_block("C") _, _, c = sch.get_loops(block_c) with pytest.raises(tvm.tir.ScheduleError): sch.merge(b, c) def test_merge_fail_not_same_level(): @T.prim_func def elementwise_not_same_level(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) C = T.match_buffer(c, (128, 128, 128)) B = T.alloc_buffer((128, 128, 128)) for i, j in T.grid(128, 128): for k in T.serial(0, 128): with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 for i, j in T.grid(128, 128): for k in T.serial(0, 128): with T.block("C"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) C[vi, vj, vk] = A[vi, vj, vk] * 2.0 sch = tir.Schedule(elementwise_not_same_level, debug_mask="all") block_b = sch.get_block("B") _, b, _ = sch.get_loops(block_b) block_c = sch.get_block("C") _, _, c = sch.get_loops(block_c) with pytest.raises(tvm.tir.ScheduleError): sch.merge(b, c) def test_merge_fail_with_different_scope(): @T.prim_func def elementwise_with_different_scope(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128, 128)) C = T.match_buffer(c, (128, 128, 128)) B = T.alloc_buffer((128, 128, 128)) with T.block("A"): for i, j in T.grid(128, 128): for k in T.serial(0, 128): with T.block("B"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) B[vi, vj, vk] = A[vi, vj, vk] * 2.0 for i, j in T.grid(128, 128): for k in T.serial(0, 128): with T.block("C"): vi, vj, vk = T.axis.remap("SSS", [i, j, k]) C[vi, vj, vk] = A[vi, vj, vk] * 2.0 sch = tir.Schedule(elementwise_with_different_scope, debug_mask="all") block_b = sch.get_block("B") _, _, b = sch.get_loops(block_b) block_c = sch.get_block("C") _, _, c = sch.get_loops(block_c) with pytest.raises(tvm.tir.ScheduleError): sch.merge(b, c) if __name__ == "__main__": tvm.testing.main()
10,471
37.218978
80
py
tvm
tvm-main/tests/python/unittest/test_runtime_rpc.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te import tvm.testing import multiprocessing import os import stat import sys import time import pytest import numpy as np from tvm import rpc from tvm.relay.backend import Runtime from tvm.contrib import utils, cc from tvm.rpc.tracker import Tracker from tvm.rpc.proxy import Proxy if __name__ == "__main__": # NOTE: must live here to avoid registering PackedFunc with libtvm.so twice. tvm.testing.main() # tkonolige: The issue as I understand it is this: multiprocessing's spawn # method launches a new process and then imports the relevant modules. This # means that all registered functions must exist at the top level scope. In # this file they are, so all is well when we run this file directly. # However, when run under pytest, the functions aren't registered on the # server. I believe this is because pytest is also using multiprocessing to # run individual functions. Somewhere along the way, the imports are being # lost, so the server ends up not registering the functions. pytestmark = pytest.mark.skipif( # Windows does not support fork so we can enable Windows for testing sys.platform.startswith("win") == False and multiprocessing.get_start_method() != "fork", reason=( "pytest + multiprocessing spawn method causes tvm.register_func to " "not work on the rpc.Server." ), ) # NOTE: When writing tests, wrap remote related checking in a sub-function # to ensure all the remote resources destructs before the server terminates @tvm.testing.requires_rpc def test_bigendian_rpc(): """Test big endian rpc when there is a PowerPC RPC server available""" host = os.environ.get("TVM_POWERPC_TEST_HOST", None) port = os.environ.get("TVM_POWERPC_TEST_PORT", 9090) if host is None: return def verify_rpc(remote, target, shape, dtype): A = te.placeholder(shape, dtype=dtype) B = te.compute(A.shape, lambda i: A[i] + tvm.tir.const(1, A.dtype)) s = te.create_schedule(B.op) f = tvm.build(s, [A, B], target, name="myadd") dev = remote.cpu(0) a = tvm.nd.array(np.random.randint(0, 256, size=shape).astype(A.dtype), device=dev) b = tvm.nd.array(np.zeros(shape).astype(A.dtype), device=dev) temp = utils.tempdir() path_dso = temp.relpath("dev_lib.o") f.save(path_dso) remote.upload(path_dso) f = remote.load_module("dev_lib.o") f(a, b) tvm.testing.assert_allclose(a.numpy() + 1, b.numpy()) print("Test RPC connection to PowerPC...") remote = rpc.connect(host, port) target = "llvm -mtriple=powerpc-linux-gnu" for dtype in ["float32", "float64", "int32", "int8"]: verify_rpc(remote, target, (10,), dtype) @tvm.testing.requires_rpc def test_rpc_simple(): server = rpc.Server(key="x1") client = rpc.connect("127.0.0.1", server.port, key="x1") def check_remote(): f1 = client.get_function("rpc.test.addone") assert f1(10) == 11 f3 = client.get_function("rpc.test.except") with pytest.raises(tvm._ffi.base.TVMError): f3("abc") f2 = client.get_function("rpc.test.strcat") assert f2("abc", 11) == "abc:11" check_remote() @tvm.testing.requires_rpc def test_rpc_simple_wlog(): server = rpc.Server(key="x1") client = rpc.connect("127.0.0.1", server.port, key="x1", enable_logging=True) def check_remote(): f1 = client.get_function("rpc.test.addone") assert f1(10) == 11 f3 = client.get_function("rpc.test.except") with pytest.raises(tvm._ffi.base.TVMError): f3("abc") f2 = client.get_function("rpc.test.strcat") assert f2("abc", 11) == "abc:11" check_remote() @tvm.testing.requires_rpc def test_rpc_runtime_string(): server = rpc.Server(key="x1") client = rpc.connect("127.0.0.1", server.port, key="x1") def check_remote(): func = client.get_function("rpc.test.runtime_str_concat") x = tvm.runtime.container.String("abc") y = tvm.runtime.container.String("def") assert str(func(x, y)) == "abcdef" check_remote() @tvm.testing.requires_rpc def test_rpc_array(): server = rpc.Server() remote = rpc.connect("127.0.0.1", server.port) def check_remote(): x = np.ones((3, 4)) r_cpu = tvm.nd.array(x, remote.cpu(0)) assert str(r_cpu.device).startswith("remote") np.testing.assert_equal(r_cpu.numpy(), x) fremote = remote.get_function("rpc.test.remote_array_func") fremote(r_cpu) check_remote() @tvm.testing.requires_rpc def test_rpc_large_array(): # testcase of large array creation server = rpc.Server() remote = rpc.connect("127.0.0.1", server.port) def check_remote(): dev = remote.cpu(0) a_np = np.ones((5041, 720)).astype("float32") b_np = np.ones((720, 192)).astype("float32") a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) np.testing.assert_equal(a.numpy(), a_np) np.testing.assert_equal(b.numpy(), b_np) check_remote() @tvm.testing.skip_if_32bit(reason="skipping test for i386.") @tvm.testing.requires_rpc def test_rpc_echo(): def check(remote, local_session): fecho = remote.get_function("testing.echo") assert fecho(1, 2, 3) == 1 assert fecho(100, 2, 3) == 100 assert fecho("xyz") == "xyz" assert bytes(fecho(bytearray(b"123"))) == b"123" with pytest.raises(RuntimeError): raise_err = remote.get_function("testing.test_raise_error_callback")("RuntimeError") raise_err() remote.cpu().sync() # tests around system lib are not threadsafe by design # and do not work well with multithread pytest # skip local session as they are being tested elsewhere if not local_session: with pytest.raises(AttributeError): f3 = remote.system_lib()["notexist"] temp = rpc.server._server_env([]) server = rpc.Server() client = rpc.connect("127.0.0.1", server.port) check(rpc.LocalSession(), True) check(client, False) def check_minrpc(): if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None: return # Test minrpc server. temp = utils.tempdir() minrpc_exec = temp.relpath("minrpc") tvm.rpc.with_minrpc(cc.create_executable)(minrpc_exec, []) check(rpc.PopenSession(minrpc_exec), False) # minrpc on the remote server = rpc.Server() client = rpc.connect( "127.0.0.1", server.port, session_constructor_args=["rpc.PopenSession", open(minrpc_exec, "rb").read()], ) check(client, False) check_minrpc() @tvm.testing.requires_rpc def test_rpc_file_exchange(): server = rpc.Server() remote = rpc.connect("127.0.0.1", server.port) def check_remote(): blob = bytearray(np.random.randint(0, 10, size=(10))) remote.upload(blob, "dat.bin") rev = remote.download("dat.bin") assert rev == blob check_remote() @tvm.testing.requires_rpc @tvm.testing.requires_llvm def test_rpc_remote_module(): # graph n = tvm.runtime.convert(102) A = te.placeholder((n,), name="A") B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B") s = te.create_schedule(B.op) server0 = rpc.Server(key="x0") server1 = rpc.Server(key="x1") client = rpc.connect( "127.0.0.1", server0.port, key="x0", session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1", False], ) def check_remote(remote): temp = utils.tempdir() dev = remote.cpu(0) f = tvm.build(s, [A, B], "llvm", name="myadd") path_dso = temp.relpath("dev_lib.so") f.export_library(path_dso) remote.upload(path_dso) f1 = remote.load_module("dev_lib.so") a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev) time_f = f1.time_evaluator(f1.entry_name, remote.cpu(0), number=10) cost = time_f(a, b).mean print("%g secs/op" % cost) np.testing.assert_equal(b.numpy(), a.numpy() + 1) # Download the file from the remote path_tar = temp.relpath("dev_lib.tar") f.export_library(path_tar) remote.upload(path_tar) local_download_path = temp.relpath("dev_lib.download.so") with open(local_download_path, "wb") as fo: fo.write(remote.download_linked_module("dev_lib.tar")) fupdated = tvm.runtime.load_module(local_download_path) a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), tvm.cpu(0)) b = tvm.nd.array(np.zeros(102, dtype=A.dtype), tvm.cpu(0)) fupdated(a, b) np.testing.assert_equal(b.numpy(), a.numpy() + 1) def check_minrpc(): if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None: return # export to minrpc temp = utils.tempdir() runtime = Runtime("cpp", {"system-lib": True}) f = tvm.build(s, [A, B], "llvm", name="myadd", runtime=runtime) path_minrpc = temp.relpath("dev_lib.minrpc") f.export_library(path_minrpc, rpc.with_minrpc(cc.create_executable)) with pytest.raises(RuntimeError): rpc.PopenSession("filenotexist") # statrt the minrpc session. remote = tvm.rpc.PopenSession(path_minrpc) dev = remote.cpu(0) f1 = remote.system_lib() a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev) time_f = f1.time_evaluator("myadd", remote.cpu(0), number=1) cost = time_f(a, b).mean np.testing.assert_equal(b.numpy(), a.numpy() + 1) # change to not executable os.chmod(path_minrpc, stat.S_IRUSR) with pytest.raises(RuntimeError): rpc.PopenSession(path_minrpc) def check_remote_link_cl(remote): """Test function to run remote code such as cl This is not enabled because there is forking issue of TVM runtime when server launches after OpenCL runtime initializes. We leave it as an example on how to do rpc when we want to do linking on remote. """ if not tvm.testing.device_enabled("opencl"): print("Skip because opencl is not enabled") return temp = utils.tempdir() dev = remote.cl(0) s = te.create_schedule(B.op) xo, xi = s[B].split(B.op.axis[0], factor=32) s[B].bind(xo, te.thread_axis("blockIdx.x")) s[B].bind(xi, te.thread_axis("threadIdx.x")) f = tvm.build(s, [A, B], "opencl --host=llvm", name="myadd") # Option 1: save modules separately and rely on remote compiler path_o = temp.relpath("myadd.o") path_cl = temp.relpath("myadd.cl") path_json = temp.relpath("myadd.tvm_meta.json") f.save(path_o) f.imported_modules[0].save(path_cl) remote.upload(path_o) remote.upload(path_cl) # upload meta data remote.upload(path_json) fhost = remote.load_module("myadd.o") fdev = remote.load_module("myadd.cl") fhost.import_module(fdev) a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev) fhost(a, b) np.testing.assert_equal(b.numpy(), a.numpy() + 1) # Option 2: export library as a tar ball then handled by remote compiler path_tar = temp.relpath("myadd.tar") f.export_library(path_tar) remote.upload(path_tar) fhost = remote.load_module("myadd.tar") a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev) fhost(a, b) np.testing.assert_equal(b.numpy(), a.numpy() + 1) check_remote(rpc.LocalSession()) check_remote(client) check_minrpc() @tvm.testing.requires_rpc def test_rpc_return_func(): server = rpc.Server(key="x1") client = rpc.connect("127.0.0.1", server.port, key="x1") def check_remote(): f1 = client.get_function("rpc.test.add_to_lhs") fadd = f1(10) assert fadd(12) == 22 check_remote() @tvm.testing.requires_rpc def test_rpc_session_constructor_args(): # start server server0 = rpc.Server(key="x0") server1 = rpc.Server(key="x1") def check_multi_hop(): # use server0 as proxy to connect to server1 client = rpc.connect( "127.0.0.1", server0.port, key="x0", session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1", False], ) fecho = client.get_function("testing.echo") assert fecho(1, 2, 3) == 1 assert fecho(100, 2, 3) == 100 assert fecho("xyz") == "xyz" assert bytes(fecho(bytearray(b"123"))) == b"123" nd = tvm.nd.array([1, 2, 3], device=client.cpu(0)) assert nd.numpy()[1] == 2 def check_error_handling(): with pytest.raises(tvm.error.RPCError): client = rpc.connect( "127.0.0.1", server0.port, key="x0", session_constructor_args=["rpc.NonExistingConstructor"], ) check_multi_hop() check_error_handling() @tvm.testing.requires_rpc def test_rpc_return_ndarray(): # start server server = rpc.Server(key="x1") client = rpc.connect("127.0.0.1", server.port, key="x1") m = client.get_function("rpc.test.remote_return_nd") get_arr = m("get_arr") ref_count = m("ref_count") get_elem = m("get_elem") get_arr_elem = m("get_arr_elem") # array test def run_arr_test(): arr = get_arr() assert get_elem(0) == 0.0 assert get_arr_elem(arr, 0) == 0.0 run_arr_test() @tvm.testing.requires_rpc def test_local_func(): client = rpc.LocalSession() def check_remote(): f1 = client.get_function("rpc.test.add_to_lhs") fadd = f1(10) assert fadd(12) == 22 blob = bytearray(np.random.randint(0, 10, size=(10))) client.upload(blob, "dat.bin") rev = client.download("dat.bin") assert rev == blob check_remote() @tvm.testing.requires_rpc @pytest.mark.parametrize("device_key", ["test_device", "127.0.0.1:5555"]) def test_rpc_tracker_register(device_key): # test registration tracker = Tracker(port=9000, port_end=10000) server1 = rpc.Server( host="127.0.0.1", port=9000, port_end=10000, key=device_key, tracker_addr=("127.0.0.1", tracker.port), ) server2 = rpc.Server( host="127.0.0.1", port=9000, port_end=10000, key=device_key, tracker_addr=("127.0.0.1", tracker.port), custom_addr="test_addr", # this is a test address, which is unable to connect ) time.sleep(1) client = rpc.connect_tracker("127.0.0.1", tracker.port) def exist_address(summary, key, host, port): server_info = summary["server_info"] for device in server_info: if device["key"] == "server:%s" % key: addr = device["addr"] if (host is None or host == addr[0]) and port == addr[1]: return True return False summary = client.summary() assert summary["queue_info"][device_key]["free"] == 2 assert exist_address(summary, device_key, "127.0.0.1", server1.port) assert exist_address(summary, device_key, "test_addr", server2.port) remote = client.request(device_key) summary = client.summary() assert summary["queue_info"][device_key]["free"] == 1 del remote time.sleep(1) summary = client.summary() assert summary["queue_info"][device_key]["free"] == 2 server1.terminate() time.sleep(1) summary = client.summary() assert summary["queue_info"][device_key]["free"] == 1 assert not exist_address(summary, device_key, "127.0.0.1", server1.port) assert exist_address(summary, device_key, "test_addr", server2.port) server2.terminate() time.sleep(1) summary = client.summary() assert summary["queue_info"][device_key]["free"] == 0 assert not exist_address(summary, device_key, "test_addr", server2.port) tracker.terminate() def _target(host, port, device_key, timeout): client = rpc.connect_tracker(host, port) remote = client.request(device_key, session_timeout=timeout) while True: pass remote.cpu() @tvm.testing.requires_rpc @pytest.mark.parametrize("device_key", ["test_device", "127.0.0.1:5555"]) def test_rpc_tracker_request(device_key): # test concurrent request tracker = Tracker(port=9000, port_end=10000) server = rpc.Server( port=9000, port_end=10000, key=device_key, tracker_addr=("127.0.0.1", tracker.port), ) client = rpc.connect_tracker("127.0.0.1", tracker.port) proc1 = multiprocessing.Process(target=_target, args=("127.0.0.1", tracker.port, device_key, 4)) proc2 = multiprocessing.Process( target=_target, args=("127.0.0.1", tracker.port, device_key, 200) ) proc1.start() time.sleep(0.5) proc2.start() time.sleep(0.5) summary = client.summary() assert summary["queue_info"][device_key]["free"] == 0 assert summary["queue_info"][device_key]["pending"] == 1 proc1.terminate() proc1.join() time.sleep(0.5) summary = client.summary() assert summary["queue_info"][device_key]["free"] == 0 assert summary["queue_info"][device_key]["pending"] == 0 proc2.terminate() proc2.join() server.terminate() tracker.terminate() @tvm.testing.requires_rpc @pytest.mark.parametrize("device_key", ["test_device", "127.0.0.1:5555"]) def test_rpc_tracker_via_proxy(device_key): """ tracker / \ Host -- Proxy -- RPC server """ tracker_server = Tracker(port=9000, port_end=9100) proxy_server = Proxy( host=tracker_server.host, port=8888, port_end=8988, tracker_addr=(tracker_server.host, tracker_server.port), ) server1 = rpc.Server( host=proxy_server.host, port=proxy_server.port, key=device_key, tracker_addr=(tracker_server.host, tracker_server.port), is_proxy=True, ) server2 = rpc.Server( host=proxy_server.host, port=proxy_server.port, key=device_key, tracker_addr=(tracker_server.host, tracker_server.port), is_proxy=True, ) client = rpc.connect_tracker(tracker_server.host, tracker_server.port) remote1 = client.request(device_key, session_timeout=30) # pylint: disable=unused-variable remote2 = client.request(device_key, session_timeout=30) # pylint: disable=unused-variable server2.terminate() server1.terminate() proxy_server.terminate() tracker_server.terminate() @tvm.testing.requires_rpc @pytest.mark.parametrize("with_proxy", (True, False)) def test_rpc_session_timeout_error(with_proxy): port = 9000 port_end = 10000 tracker = Tracker(port=port, port_end=port_end) time.sleep(0.5) tracker_addr = (tracker.host, tracker.port) if with_proxy: proxy = Proxy(host="0.0.0.0", port=port, port_end=port_end, tracker_addr=tracker_addr) time.sleep(0.5) server = rpc.Server(host=proxy.host, port=proxy.port, is_proxy=True, key="x1") else: server = rpc.Server(port=port, port_end=port_end, tracker_addr=tracker_addr, key="x1") time.sleep(0.5) rpc_sess = rpc.connect_tracker(*tracker_addr).request(key="x1", session_timeout=1) with pytest.raises(tvm.error.RPCSessionTimeoutError): f1 = rpc_sess.get_function("rpc.test.addone") time.sleep(2) f1(10) server.terminate() if with_proxy: proxy.terminate() tracker.terminate()
20,976
31.776563
100
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_convert_blocks_to_opaque.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import tir, te from tvm.script import tir as T def _check(original, transformed): func = original mod = tvm.IRModule.from_expr(func) mod = tvm.tir.transform.ConvertBlocksToOpaque()(mod) mod = tvm.tir.transform.Simplify()(mod) tvm.ir.assert_structural_equal(mod["main"], transformed) @T.prim_func def elementwise_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer((16, 16), "float32") for j in range(0, 16): with T.block(): vi = T.axis.S(16, i) vj = T.axis.S(16, j) B[vi, vj] = A[vi, vj] + 1.0 for j in range(0, 16): with T.block(): vi = T.axis.S(16, i) vj = T.axis.S(16, j) C[vi, vj] = B[vi, vj] * 2.0 @T.prim_func def substituted_elementwise_func(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (16, 16), "float32") C = T.match_buffer(c, (16, 16), "float32") for i in range(0, 16): with T.block(): T.reads(A[i, 0:16]) T.writes(C[i, 0:16]) B = T.alloc_buffer([16, 16], "float32") for j in range(0, 16): with T.block(): T.reads([A[i, j]]) T.writes([B[i, j]]) B[i, j] = A[i, j] + 1.0 for j in range(0, 16): with T.block(): T.reads([B[i, j]]) T.writes([C[i, j]]) C[i, j] = B[i, j] * 2.0 def test_elementwise(): _check(elementwise_func, substituted_elementwise_func) def test_lower_te(): x = te.placeholder((1,)) y = te.compute((1,), lambda i: x[i] + 2) s = te.create_schedule(y.op) orig_mod = tvm.driver.build_module.schedule_to_module(s, [x, y]) mod = tvm.tir.transform.ConvertBlocksToOpaque()(orig_mod) tvm.ir.assert_structural_equal(mod, orig_mod) # ConvertBlocksToOpaque should do nothing on TE class TestErrorIfPredicateUsesBlockVariables(tvm.testing.CompareBeforeAfter): transform = tvm.tir.transform.ConvertBlocksToOpaque() def before(A: T.Buffer(8, "int32")): for i in T.serial(8): with T.block(): vi = T.axis.remap("S", [i]) T.where(vi < 6) T.evaluate(0) expected = tvm.TVMError if __name__ == "__main__": tvm.testing.main()
3,432
33.33
98
py
tvm
tvm-main/tests/python/unittest/test_autotvm_database.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test database""" import copy import logging from tvm.autotvm import database from tvm.autotvm.record import encode, MeasureResult from tvm.testing.autotvm import get_sample_records def test_save_load(): logging.info("test basic db load/save ...") records = get_sample_records(3) inp1, res1 = records[0] inp2, res2 = records[1] inp3, _ = records[2] _db = database.DummyDatabase() _db.flush() _db.save(inp1, res1) _db.save(inp2, res2) load1 = _db.load(inp1) load2 = _db.load(inp2) load3 = _db.load(inp3) assert load1 == res1 assert load2 == res2 assert load3 is None assert load1 != load2 TRIAL_LIMIT = 2 def test_db_hash(): logging.info("test db hash check ...") inp1, res1 = get_sample_records(1)[0] inp2 = copy.deepcopy(inp1) inp1.config.code_hash = "cafecafe" inp2.config.code_hash = "dbffdbff" res2l = list(tuple(res1)) # set timestamp res2l[-1] = -1 res2 = MeasureResult(*res2l) _db = database.DummyDatabase() _db.flush() _db.save(inp1, res1, extend=True) _db.save(inp2, res2, extend=True) load1 = _db.load(inp1) load2 = _db.load(inp2) assert load1 != load2 assert load1.timestamp != -1 assert load2.timestamp == -1 def test_db_latest_all(): logging.info("test db load w/ multiple results ...") inp1, res1 = get_sample_records(1)[0] lis1 = list(tuple(res1)) lis2 = list(tuple(res1)) lis3 = list(tuple(res1)) # set timestamp lis1[-1] = 0.0 lis2[-1] = 1.1 lis3[-1] = 9999.9999 res1 = MeasureResult(*lis1) res2 = MeasureResult(*lis2) res3 = MeasureResult(*lis3) _db = database.DummyDatabase() _db.flush() _db.save(inp1, res1, extend=True) load1 = _db.load(inp1) assert load1.timestamp == 0.0 _db.save(inp1, res2, extend=True) load2 = _db.load(inp1) assert load2.timestamp == 1.1 _db.save(inp1, res3, extend=True) load3 = _db.load(inp1) assert load3.timestamp == 9999.9999 load4 = _db.load(inp1, get_all=True) assert encode(inp1, load4[0]) == encode(inp1, res1) assert encode(inp1, load4[1]) == encode(inp1, res2) assert encode(inp1, load4[2]) == encode(inp1, res3) def test_db_filter(): logging.info("test db filter ...") records = get_sample_records(5) _db = database.DummyDatabase() _db.flush() for inp, result in records: _db.save(inp, result) records = _db.filter(lambda inp, ress: any(r.costs[0] <= 2 for r in ress)) assert len(records) == 2 if __name__ == "__main__": logging.basicConfig(level=logging.INFO) test_save_load() test_db_hash() test_db_latest_all() test_db_filter()
3,495
26.968
78
py
tvm
tvm-main/tests/python/unittest/test_tir_transform_remove_assume.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm import TVMError from tvm.script import tir as T class BaseBeforeAfter(tvm.testing.CompareBeforeAfter): @tvm.testing.fixture def transform(self): return tvm.tir.transform.RemoveAssume() class TestRemoveAssume(BaseBeforeAfter): """Remove any instance of T.assume""" def before(A: T.Buffer(1, "int32")): T.evaluate(T.assume(A[0] == 5)) A[0] = 10 def expected(A: T.Buffer(1, "int32")): A[0] = 10 class TestRemoveAssumeLoop(BaseBeforeAfter): """Loops containing only T.assume should be removed""" def before(A: T.Buffer(16, "int32")): for i in T.serial(16): T.evaluate(T.assume(A[i] == 0)) for i in T.serial(16): A[i] = 10 def expected(A: T.Buffer(16, "int32")): for i in T.serial(16): A[i] = 10 if __name__ == "__main__": tvm.testing.main()
1,703
28.37931
62
py
tvm
tvm-main/tests/python/unittest/test_te_schedule_tensor_core.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # 'License'); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te import numpy as np from tvm.topi.testing import conv2d_nhwc_python import tvm.testing VERIFY = True def intrin_wmma_load_matrix(shape, scope): n, m, l = shape if scope == "wmma.matrix_a": row, col = n, l elif scope == "wmma.matrix_b": row, col = l, m A = te.placeholder((row, col), name="A", dtype="float16") BA = tvm.tir.decl_buffer( A.shape, A.dtype, scope="shared", data_alignment=32, offset_factor=row * col ) C = te.compute((row, col), lambda i, j: A[i, j], name="C") BC = tvm.tir.decl_buffer( C.shape, C.dtype, scope=scope, data_alignment=32, offset_factor=row * col ) def intrin_func(ins, outs): ib = tvm.tir.ir_builder.create() BA = ins[0] BC = outs[0] ib.emit( tvm.tir.call_intrin( "handle", "tir.tvm_load_matrix_sync", BC.data, n, m, l, BC.elem_offset // (row * col), BA.access_ptr("r"), col, "row_major", ) ) return ib.get() return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC}) def intrin_wmma_gemm(shape): n, m, l = shape A = te.placeholder((n, l), name="A", dtype="float16") B = te.placeholder((l, m), name="B", dtype="float16") k = te.reduce_axis((0, l), name="k") C = te.compute( (n, m), lambda ii, jj: te.sum(A[ii, k].astype("float") * B[k, jj].astype("float"), axis=k), name="C", ) BA = tvm.tir.decl_buffer( A.shape, A.dtype, name="BA", scope="wmma.matrix_a", data_alignment=32, offset_factor=n * l ) BB = tvm.tir.decl_buffer( B.shape, B.dtype, name="BB", scope="wmma.matrix_b", data_alignment=32, offset_factor=l * m ) BC = tvm.tir.decl_buffer( C.shape, C.dtype, name="BC", scope="wmma.accumulator", data_alignment=32, offset_factor=n * m, ) def intrin_func(ins, outs): BA, BB = ins (BC,) = outs def init(): ib = tvm.tir.ir_builder.create() ib.emit( tvm.tir.call_intrin( "handle", "tir.tvm_fill_fragment", BC.data, n, m, l, BC.elem_offset // (n * m), 0.0, ) ) return ib.get() def update(): ib = tvm.tir.ir_builder.create() ib.emit( tvm.tir.call_intrin( "handle", "tir.tvm_mma_sync", BC.data, BC.elem_offset // (n * m), BA.data, BA.elem_offset // (n * l), BB.data, BB.elem_offset // (l * m), BC.data, BC.elem_offset // (n * m), ) ) return ib.get() return update(), init(), update() return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, B: BB, C: BC}) def intrin_wmma_store_matrix(shape): n, m, l = shape A = te.placeholder((n, m), name="A", dtype="float32") BA = tvm.tir.decl_buffer( A.shape, A.dtype, scope="wmma.accumulator", data_alignment=32, offset_factor=n * m ) C = te.compute((n, m), lambda i, j: A[i, j], name="C") BC = tvm.tir.decl_buffer( C.shape, C.dtype, scope="global", data_alignment=32, offset_factor=n * m ) def intrin_func(ins, outs): ib = tvm.tir.ir_builder.create() BA = ins[0] BC = outs[0] ib.emit( tvm.tir.call_intrin( "handle", "tir.tvm_store_matrix_sync", BA.data, n, m, l, BA.elem_offset // (n * m), BC.access_ptr("w"), m, "row_major", ) ) return ib.get() return te.decl_tensor_intrin(C.op, intrin_func, binds={A: BA, C: BC}) @tvm.testing.requires_tensorcore def test_tensor_core_batch_matmal(): batch_size = 4 n = 512 m, l = n, n assert n % 32 == 0 assert m % 8 == 0 assert l % 16 == 0 nn, mm, ll = n // 32, m // 8, l // 16 A = te.placeholder((batch_size, nn, ll, 32, 16), name="A", dtype="float16") B = te.placeholder((batch_size, ll, mm, 16, 8), name="B", dtype="float16") k1 = te.reduce_axis((0, ll), name="k1") k2 = te.reduce_axis((0, 16), name="k2") C = te.compute( (batch_size, nn, mm, 32, 8), lambda b, i, j, ii, jj: te.sum( A[b, i, k1, ii, k2].astype("float") * B[b, k1, j, k2, jj].astype("float"), axis=[k1, k2] ), name="Fragment_C", ) s = te.create_schedule(C.op) warp_size = 32 kernel_size = 16 block_row_warps = 2 block_col_warps = 4 warp_row_tiles = 4 warp_col_tiles = 2 chunk = 4 block_x = te.thread_axis("blockIdx.x") block_y = te.thread_axis("blockIdx.y") block_z = te.thread_axis("blockIdx.z") thread_x = te.thread_axis("threadIdx.x") thread_y = te.thread_axis("threadIdx.y") thread_z = te.thread_axis("threadIdx.z") AS = s.cache_read(A, "shared", [C]) BS = s.cache_read(B, "shared", [C]) AF = s.cache_read(AS, "wmma.matrix_a", [C]) BF = s.cache_read(BS, "wmma.matrix_b", [C]) CF = s.cache_write(C, "wmma.accumulator") b, i, j, kernel_i, kernel_j = s[C].op.axis i, ii = s[C].split(i, factor=warp_row_tiles) block_i, i = s[C].split(i, factor=block_row_warps) j, jj = s[C].split(j, factor=warp_col_tiles) block_j, j = s[C].split(j, factor=block_col_warps) s[C].reorder(block_i, block_j, i, j, ii, jj, kernel_i, kernel_j) s[C].bind(b, block_z) s[C].bind(block_i, block_x) s[C].bind(block_j, block_y) s[C].bind(i, thread_y) s[C].bind(j, thread_z) s[CF].compute_at(s[C], j) b, warp_i, warp_j, _i, _j = s[CF].op.axis k, _k = CF.op.reduce_axis ko, ki = s[CF].split(k, factor=chunk) s[CF].reorder(ko, ki, warp_i, warp_j, _i, _j, _k) s[AF].compute_at(s[CF], ki) s[BF].compute_at(s[CF], ki) s[AS].compute_at(s[CF], ko) b, xo, yo, xi, yi = AS.op.axis tx, xo = s[AS].split(xo, nparts=block_row_warps) ty, yo = s[AS].split(yo, nparts=block_col_warps) t = s[AS].fuse(xi, yi) to, ti = s[AS].split(t, nparts=warp_size) s[AS].bind(tx, thread_y) s[AS].bind(ty, thread_z) s[AS].bind(to, thread_x) s[BS].compute_at(s[CF], ko) b, xo, yo, xi, yi = BS.op.axis tx, xo = s[BS].split(xo, nparts=block_row_warps) ty, yo = s[BS].split(yo, nparts=block_col_warps) t = s[BS].fuse(xi, yi) to, ti = s[BS].split(t, nparts=warp_size) s[BS].bind(tx, thread_y) s[BS].bind(ty, thread_z) s[BS].bind(to, thread_x) s[AF].tensorize(AF.op.axis[-2], intrin_wmma_load_matrix((32, 8, 16), "wmma.matrix_a")) s[BF].tensorize(BF.op.axis[-2], intrin_wmma_load_matrix((32, 8, 16), "wmma.matrix_b")) s[C].tensorize(kernel_i, intrin_wmma_store_matrix((32, 8, 16))) s[CF].tensorize(_i, intrin_wmma_gemm((32, 8, 16))) func = tvm.build(s, [A, B, C], "cuda") dev = tvm.cuda(0) a_np = np.random.uniform(size=(batch_size, nn, ll, 32, 16)).astype(A.dtype) b_np = np.random.uniform(size=(batch_size, ll, mm, 16, 8)).astype(B.dtype) a = tvm.nd.array(a_np, dev) b = tvm.nd.array(b_np, dev) c = tvm.nd.array(np.zeros((batch_size, nn, mm, 32, 8), dtype=C.dtype), dev) func(a, b, c) evaluator = func.time_evaluator(func.entry_name, dev, number=3) print("gemm with tensor core: %f ms" % (evaluator(a, b, c).mean * 1e3)) if VERIFY: func(a, b, c) a_np = a_np.transpose((0, 1, 3, 2, 4)).reshape(batch_size, n, n) b_np = b_np.transpose((0, 1, 3, 2, 4)).reshape(batch_size, n, n) c_np = c.numpy().transpose((0, 1, 3, 2, 4)).reshape(batch_size, n, n) np.testing.assert_allclose( c_np, np.matmul(a_np.astype(C.dtype), b_np.astype(C.dtype)), rtol=1e-4, atol=1e-4 ) @tvm.testing.requires_tensorcore def test_tensor_core_batch_conv(): # The sizes of inputs and filters batch_size = 32 height = 14 width = 14 in_channels = 32 out_channels = 64 kernel_h = 3 kernel_w = 3 pad_h = 1 pad_w = 1 stride_h = 1 stride_w = 1 block_size = 16 block_row_warps = 2 block_col_warps = 4 warp_row_tiles = 4 warp_col_tiles = 2 warp_size = 32 chunk = 2 # Input feature map: (N, H, W, IC, n, ic) data_shape = ( batch_size // block_size, height, width, in_channels // block_size, block_size, block_size, ) # Kernel: (H, W, IC, OC, ic, oc) kernel_shape = ( kernel_h, kernel_w, in_channels // block_size, out_channels // block_size, block_size, block_size, ) # Output feature map: (N, H, W, OC, n, oc) output_shape = ( batch_size // block_size, height, width, out_channels // block_size, block_size, block_size, ) assert batch_size % block_size == 0 assert in_channels % block_size == 0 assert out_channels % block_size == 0 kh = te.reduce_axis((0, kernel_h), name="kh") kw = te.reduce_axis((0, kernel_w), name="kw") ic = te.reduce_axis((0, in_channels // block_size), name="ic") ii = te.reduce_axis((0, block_size), name="ii") # Algorithm A = te.placeholder(data_shape, name="A", dtype="float16") W = te.placeholder(kernel_shape, name="W", dtype="float16") Apad = te.compute( ( batch_size // block_size, height + 2 * pad_h, width + 2 * pad_w, in_channels // block_size, block_size, block_size, ), lambda n, h, w, i, nn, ii: tvm.tir.if_then_else( tvm.tir.all(h >= pad_h, h - pad_h < height, w >= pad_w, w - pad_w < width), A[n, h - pad_h, w - pad_w, i, nn, ii], tvm.tir.const(0.0, "float16"), ), name="Apad", ) Conv = te.compute( output_shape, lambda n, h, w, o, nn, oo: te.sum( Apad[n, h * stride_h + kh, w * stride_w + kw, ic, nn, ii].astype("float32") * W[kh, kw, ic, o, ii, oo].astype("float32"), axis=[ic, kh, kw, ii], ), name="Conv", ) s = te.create_schedule(Conv.op) s[Apad].compute_inline() AS = s.cache_read(Apad, "shared", [Conv]) WS = s.cache_read(W, "shared", [Conv]) AF = s.cache_read(AS, "wmma.matrix_a", [Conv]) WF = s.cache_read(WS, "wmma.matrix_b", [Conv]) ConvF = s.cache_write(Conv, "wmma.accumulator") block_x = te.thread_axis("blockIdx.x") block_y = te.thread_axis("blockIdx.y") block_z = te.thread_axis("blockIdx.z") thread_x = te.thread_axis("threadIdx.x") thread_y = te.thread_axis("threadIdx.y") thread_z = te.thread_axis("threadIdx.z") nc, hc, wc, oc, nnc, ooc = Conv.op.axis block_k = s[Conv].fuse(hc, wc) s[Conv].bind(block_k, block_z) nc, nci = s[Conv].split(nc, factor=warp_row_tiles) block_i, nc = s[Conv].split(nc, factor=block_row_warps) oc, oci = s[Conv].split(oc, factor=warp_col_tiles) block_j, oc = s[Conv].split(oc, factor=block_col_warps) s[Conv].reorder(block_k, block_i, block_j, nc, oc, nci, oci, nnc, ooc) s[Conv].bind(block_i, block_x) s[Conv].bind(block_j, block_y) s[Conv].bind(nc, thread_y) s[Conv].bind(oc, thread_z) s[ConvF].compute_at(s[Conv], oc) n, h, w, o, nnf, oof = ConvF.op.axis ko, ki = s[ConvF].split(ic, factor=chunk) s[ConvF].reorder(ko, kh, ki, kw, n, o, nnf, oof, ii) s[AF].compute_at(s[ConvF], kw) s[WF].compute_at(s[ConvF], kw) s[WS].compute_at(s[ConvF], kh) s[AS].compute_at(s[ConvF], kh) n, h, w, i, nn, ii = AS.op.axis tx, xo = s[AS].split(n, nparts=block_row_warps) ty, yo = s[AS].split(xo, nparts=block_col_warps) t = s[AS].fuse(nn, ii) to, ti = s[AS].split(t, factor=warp_size) s[AS].bind(tx, thread_y) s[AS].bind(ty, thread_z) s[AS].bind(ti, thread_x) kh, kw, ic, o, ii, oo = WS.op.axis tx, xo = s[WS].split(o, nparts=block_row_warps) ty, yo = s[WS].split(xo, nparts=block_col_warps) t = s[WS].fuse(ii, oo) to, ti = s[WS].split(t, nparts=warp_size) s[WS].bind(tx, thread_y) s[WS].bind(ty, thread_z) s[WS].bind(to, thread_x) s[WS].vectorize(ti) s[AF].tensorize(AF.op.axis[-2], intrin_wmma_load_matrix((16, 16, 16), "wmma.matrix_a")) s[WF].tensorize(WF.op.axis[-2], intrin_wmma_load_matrix((16, 16, 16), "wmma.matrix_b")) s[Conv].tensorize(nnc, intrin_wmma_store_matrix((16, 16, 16))) s[ConvF].tensorize(nnf, intrin_wmma_gemm((16, 16, 16))) func = tvm.build(s, [A, W, Conv], "cuda") dev = tvm.cuda(0) a_np = np.random.uniform(size=data_shape).astype(A.dtype) w_np = np.random.uniform(size=kernel_shape).astype(W.dtype) a = tvm.nd.array(a_np, dev) w = tvm.nd.array(w_np, dev) c = tvm.nd.array(np.zeros(output_shape, dtype=Conv.dtype), dev) evaluator = func.time_evaluator(func.entry_name, dev, number=3) print("conv2d with tensor core: %f ms" % (evaluator(a, w, c).mean * 1e3)) if VERIFY: func(a, w, c) a_np = a_np.transpose(0, 4, 1, 2, 3, 5).reshape(batch_size, height, width, in_channels) w_np = w_np.transpose(0, 1, 2, 4, 3, 5).reshape( kernel_h, kernel_w, in_channels, out_channels ) c_np = ( c.numpy().transpose((0, 4, 1, 2, 3, 5)).reshape(batch_size, height, width, out_channels) ) c_std = conv2d_nhwc_python( a_np.astype(Conv.dtype), w_np.astype(Conv.dtype), (stride_h, stride_w), (pad_h, pad_w) ).astype(Conv.dtype) np.testing.assert_allclose(c_np, c_std, rtol=1e-4, atol=1e-4) if __name__ == "__main__": test_tensor_core_batch_matmal() test_tensor_core_batch_conv()
15,031
31.536797
100
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_postproc_disallow_async_strided_mem_copy.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring import tvm from tvm import meta_schedule as ms from tvm import tir from tvm.script import tir as T from tvm.target import Target def _target() -> Target: return Target("hexagon", host="llvm") def _create_context(mod, target) -> ms.TuneContext: ctx = ms.TuneContext( mod=mod, target=target, space_generator=ms.space_generator.PostOrderApply( sch_rules=[], postprocs=[ ms.postproc.DisallowAsyncStridedMemCopy(), ], mutator_probs={}, ), task_name="test", ) return ctx # pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument # fmt: off @tvm.script.ir_module class Matmul: @T.prim_func def main(a: T.handle, b: T.handle, c: T.handle) -> None: T.func_attr({"global_symbol": "main"}) A = T.match_buffer(a, (1024, 1024), "float32") B = T.match_buffer(b, (1024, 1024), "float32") C = T.match_buffer(c, (1024, 1024), "float32") for i, j, k in T.grid(1024, 1024, 1024): with T.block("matmul"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) with T.init(): C[vi, vj] = 0.0 C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj] # fmt: on # pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument def test_postproc_disallow_async_strided_mem_copy_allows(): mod = Matmul sch = tir.Schedule(mod, debug_mask="all") matmul_block = sch.get_block("matmul") loops = sch.get_loops(matmul_block) cache_read = sch.cache_read(matmul_block, 0, "global.vtcm") sch.compute_at(cache_read, loops[1]) sch.annotate(loops[1], "software_pipeline_stage", [0, 1]) sch.annotate(loops[1], "software_pipeline_order", [0, 1]) sch.annotate(loops[1], "software_pipeline_async_stages", [0]) ctx = _create_context(sch.mod, target=_target()) sch.mod.show() assert ctx.space_generator.postprocs[0].apply(sch) def test_postproc_disallow_async_strided_mem_copy_disallows(): mod = Matmul sch = tir.Schedule(mod, debug_mask="all") matmul_block = sch.get_block("matmul") loops = sch.get_loops(matmul_block) # Make it a strided mem copy. cache_read = sch.cache_read(matmul_block, 1, "global.vtcm") sch.compute_at(cache_read, loops[1]) sch.annotate(loops[1], "software_pipeline_stage", [0, 1]) sch.annotate(loops[1], "software_pipeline_order", [0, 1]) sch.annotate(loops[1], "software_pipeline_async_stages", [0]) sch.mod.show() ctx = _create_context(sch.mod, target=_target()) assert not ctx.space_generator.postprocs[0].apply(sch) if __name__ == "__main__": test_postproc_disallow_async_strided_mem_copy_allows() test_postproc_disallow_async_strided_mem_copy_disallows()
3,743
32.72973
94
py
tvm
tvm-main/tests/python/unittest/test_tir_op_types.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-docstring import tvm import tvm.testing from tvm import tir def test_tir_op_tvm_tuple(): x = tir.Var("x", dtype="float32") y = tir.Var("y", dtype="float32") z = tir.Var("z", dtype="float32") expr = tir.tvm_tuple(x, y, z, 1, 2, 3) assert expr.op.name == "tir.tvm_tuple" def test_tir_op_tvm_struct_get(): x = tir.Var("x", dtype="handle") expr = tir.tvm_struct_get(x, 1, 2, dtype="int32") assert expr.op.name == "tir.tvm_struct_get" def test_tir_op_tvm_struct_set(): x = tir.Var("x", dtype="handle") expr = tir.tvm_struct_set(x, 1, 2, 3) assert expr.op.name == "tir.tvm_struct_set" def test_tir_op_address_of(): buffer = tir.decl_buffer((128), "float32") expr = tir.address_of(buffer[0]) assert expr.op.name == "tir.address_of" def test_tir_op_lookup_param(): expr = tir.lookup_param("p0") assert expr.op.name == "tir.lookup_param" def test_tir_op_reinterpret(): x = tir.Var("x", dtype="int32") expr = tir.reinterpret("float32", x) assert expr.op.name == "tir.reinterpret" def test_tir_op_isnullptr(): x = tir.Var("x", dtype="int32") expr = tir.isnullptr(x) assert expr.op.name == "tir.isnullptr" def test_tir_op_call_assume(): x = tir.Var("x", dtype="int32") expr = tir.assume(cond=x) assert expr.op.name == "tir.assume" def test_tir_op_call_undef(): expr = tir.undef() assert expr.op.name == "tir.undef" def test_tir_op_call_likely(): x = tir.Var("x", dtype="int32") expr = tir.likely(cond=x) assert expr.op.name == "tir.likely" def test_tir_op_tvm_thread_allreduce(): x = tir.Var("x", "int32") buffer = tir.decl_buffer((128), "float32") y = tir.Var("y", "handle") z = tir.Var("z", "int32") expr = tir.tvm_thread_allreduce(x, buffer[0], True, y, z) assert expr.op.name == "tir.tvm_thread_allreduce" def test_tir_op_type_annotation(): expr = tir.type_annotation("int32") assert expr.op.name == "tir.type_annotation" def test_tir_op_tvm_access_ptr(): buffer = tir.decl_buffer((128), "float32") expr = tir.tvm_access_ptr("float32", buffer.data, 0, 1, 2) assert expr.op.name == "tir.tvm_access_ptr" def test_tir_op_tvm_throw_last_error(): expr = tir.tvm_throw_last_error() assert expr.op.name == "tir.tvm_throw_last_error" def test_tir_op_tvm_load_matrix_sync(): buffer = tir.decl_buffer((16, 16), "float32") x = tir.Var("x", "handle") expr = tir.tvm_load_matrix_sync(buffer.data, 16, 16, 16, 0, x, 128, "row_major") assert expr.op.name == "tir.tvm_load_matrix_sync" def test_tir_op_tvm_store_matrix_sync(): buffer = tir.decl_buffer((16, 16), "float32") x = tir.Var("x", "handle") expr = tir.tvm_store_matrix_sync(buffer.data, 16, 16, 16, 0, x, 128, "row_major") assert expr.op.name == "tir.tvm_store_matrix_sync" def test_tir_op_tvm_mma_sync(): buffer_0 = tir.decl_buffer((16, 16), "float32") buffer_1 = tir.decl_buffer((16, 16), "float32") buffer_2 = tir.decl_buffer((16, 16), "float32") buffer_3 = tir.decl_buffer((16, 16), "float32") expr = tir.tvm_mma_sync(buffer_0.data, 0, buffer_1.data, 0, buffer_2.data, 0, buffer_3.data, 0) assert expr.op.name == "tir.tvm_mma_sync" def test_tir_op_tvm_bmma_sync(): buffer_0 = tir.decl_buffer((16, 16), "float32") buffer_1 = tir.decl_buffer((16, 16), "float32") buffer_2 = tir.decl_buffer((16, 16), "float32") buffer_3 = tir.decl_buffer((16, 16), "float32") expr = tir.tvm_bmma_sync(buffer_0.data, 0, buffer_1.data, 0, buffer_2.data, 0, buffer_3.data, 0) assert expr.op.name == "tir.tvm_bmma_sync" def test_tir_op_tvm_fill_fragment(): buffer = tir.decl_buffer((16, 16), "float32") expr = tir.tvm_fill_fragment(buffer.data, 16, 16, 16, 0, 0) assert expr.op.name == "tir.tvm_fill_fragment" def test_tir_op_ptx_mma(): buffer_a = tir.decl_buffer([32], "int4", scope="local") buffer_b = tir.decl_buffer([16], "uint4", scope="local") buffer_c = tir.decl_buffer([4], "int32", scope="local") expr = tir.ptx_mma( "int32", "m8n8k32", "row", "col", "int4", "uint4", "int32", buffer_a.data, 0, buffer_b.data, 0, buffer_c.data, 0, False, ) assert expr.op.name == "tir.ptx_mma" def test_tir_op_ptx_mma_sp(): buffer_a = tir.decl_buffer([32], "int4", scope="local") buffer_b = tir.decl_buffer([16], "uint4", scope="local") buffer_c = tir.decl_buffer([4], "int32", scope="local") buffer_d = tir.decl_buffer([1], "uint32", scope="local") expr = tir.ptx_mma_sp( "int32", "m8n8k32", "row", "col", "int4", "uint4", "int32", buffer_a.data, 0, buffer_b.data, 0, buffer_c.data, 0, buffer_d.data, 0, 0, False, ) assert expr.op.name == "tir.ptx_mma_sp" def test_tir_op_mma_store(): x = tir.Var("x", dtype="int32") y = tir.Var("y", dtype="int32") buffer_w = tir.decl_buffer([16, 8], dtype="int32", scope="warp", offset_factor=1) buffer = tir.decl_buffer( [16, 16], dtype="int32", scope="global", offset_factor=1, strides=[x, y] ) expr = tir.mma_store( "int32", 16, 16, buffer.access_ptr("w"), buffer_w.data, buffer_w.elem_offset, x, ) assert expr.op.name == "tir.mma_store" def test_tir_op_mma_fill(): buffer_w = tir.decl_buffer([16, 8], dtype="int32", scope="warp", offset_factor=1) expr = tir.mma_fill("int32", 8, buffer_w.data, buffer_w.elem_offset) assert expr.op.name == "tir.mma_fill" def test_op_ptx_ldmatrix(): buffer_shared = tir.decl_buffer([16, 16], "float16", scope="shared") buffer_local = tir.decl_buffer([8], "float16", scope="local") expr = tir.ptx_ldmatrix( "float16", False, 4, ".b16", buffer_local.data, 0, buffer_shared.data, 0 ) assert expr.op.name == "tir.ptx_ldmatrix" def test_op_ptx_cp_async(): buffer_shared = tir.decl_buffer([16, 16], "float16", scope="shared") buffer_local = tir.decl_buffer([8], "float16", scope="local") expr = tir.ptx_cp_async("float16", buffer_shared.data, 0, buffer_local.data, 0, 16) assert expr.op.name == "tir.ptx_cp_async" def test_op_ptx_commit_group(): expr = tir.ptx_commit_group() assert expr.op.name == "tir.ptx_commit_group" def test_op_ptx_wait_group(): expr = tir.ptx_wait_group(8) assert expr.op.name == "tir.ptx_wait_group" def test_tir_op_vectorlow(): buffer = tir.decl_buffer((4, 4), "int8", offset_factor=1) vec = buffer.vload([0, 0], dtype="int8x16") expr = tir.vectorlow("int8x8", vec) assert expr.op.name == "tir.vectorlow" def test_tir_op_vectorhigh(): buffer = tir.decl_buffer((4, 4), "int8", offset_factor=1) vec = buffer.vload([0, 0], dtype="int8x16") expr = tir.vectorhigh("int8x8", vec) assert expr.op.name == "tir.vectorhigh" def test_tir_op_vectorcombine(): buffer = tir.decl_buffer((4, 4), "int8", offset_factor=1) vec = buffer.vload([0, 0], dtype="int8x16") expr = tir.vectorcombine("int8x8", vec, vec) assert expr.op.name == "tir.vectorcombine" def test_tir_op_shift_left(): x = tir.Var("x", dtype="int32") y = tir.Var("x", dtype="int32") expr = tir.shift_left(x, y) assert expr.op.name == "tir.shift_left" def test_tir_op_shift_right(): x = tir.Var("x", dtype="int32") y = tir.Var("x", dtype="int32") expr = tir.shift_right(x, y) assert expr.op.name == "tir.shift_right" def test_tir_op_bitwise(): x = tir.Var("x", dtype="int32") y = tir.Var("y", dtype="int32") expr = tir.bitwise_and(x, y) assert expr.op.name == "tir.bitwise_and" expr = tir.bitwise_or(x, y) assert expr.op.name == "tir.bitwise_or" expr = tir.bitwise_not(x) assert expr.op.name == "tir.bitwise_not" expr = tir.bitwise_xor(x, y) assert expr.op.name == "tir.bitwise_xor" def test_tir_op_TVMBackendAllocWorkspace(): expr = tir.TVMBackendAllocWorkspace(0, 1, 2, 3, 4) assert expr.op.name == "tir.TVMBackendAllocWorkspace" def test_tir_op_TVMBackendFreeWorkspace(): buffer = tir.decl_buffer((128), "float32") expr = tir.TVMBackendFreeWorkspace(0, 1, buffer.data) assert expr.op.name == "tir.TVMBackendFreeWorkspace" if __name__ == "__main__": tvm.testing.main()
9,277
29.123377
100
py
tvm
tvm-main/tests/python/unittest/test_arith_detect_cse.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm.script import tir as T def test_detect_cs(): x = T.Var("x", dtype="int32") y = T.Var("y", dtype="int32") z = T.Var("z", dtype="int32") c = T.floor(x + y + 0.5) + x + z * (T.floor(x + y + 0.5)) m = tvm.arith.detect_common_subexpr(c, 2) assert c.a.a in m assert m[c.a.a] == 2 if __name__ == "__main__": tvm.testing.main()
1,180
33.735294
62
py
tvm
tvm-main/tests/python/unittest/test_target_codegen_c_host.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm import tvm.testing from tvm import te import numpy as np from tvm.contrib import utils def test_add(): nn = 1024 n = tvm.runtime.convert(nn) A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C") s = te.create_schedule(C.op) def check_c(): mhost = tvm.build(s, [A, B, C], "c", name="test_fadd") temp = utils.tempdir() path_dso = temp.relpath("temp.so") mhost.export_library(path_dso) m = tvm.runtime.load_module(path_dso) fadd = m["test_fadd"] dev = tvm.cpu(0) # launch the kernel. n = nn a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) check_c() def test_add_pipeline(): nn = 1024 n = tvm.runtime.convert(nn) A = te.placeholder((n,), name="A") B = te.placeholder((n,), name="B") AA = te.compute((n,), lambda *i: A(*i), name="A") BB = te.compute((n,), lambda *i: B(*i), name="B") T = te.compute(A.shape, lambda *i: AA(*i) + BB(*i), name="T") C = te.compute(A.shape, lambda *i: T(*i), name="C") s = te.create_schedule(C.op) xo, xi = s[C].split(C.op.axis[0], factor=4) xo1, xo2 = s[C].split(xo, factor=13) s[C].parallel(xo2) s[C].pragma(xo1, "parallel_launch_point") s[C].pragma(xo2, "parallel_stride_pattern") s[C].pragma(xo2, "parallel_barrier_when_finish") s[C].vectorize(xi) def check_c(): # Specifically allow offset to test codepath when offset is available Ab = tvm.tir.decl_buffer( A.shape, A.dtype, elem_offset=te.size_var("Aoffset"), offset_factor=8, name="A" ) binds = {A: Ab} # BUILD and invoke the kernel. f1 = tvm.lower(s, [A, B, C], name="test_fadd_pipeline") mhost = tvm.build(f1, target="c") temp = utils.tempdir() path_dso = temp.relpath("temp.so") mhost.export_library(path_dso) m = tvm.runtime.load_module(path_dso) fadd = m["test_fadd_pipeline"] dev = tvm.cpu(0) # launch the kernel. n = nn a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev) b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev) c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev) fadd(a, b, c) tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy()) check_c() def test_reinterpret(): nn = 1024 n = tvm.runtime.convert(nn) A = te.placeholder((n,), name="A", dtype="int32") B = te.compute( A.shape, lambda *i: tvm.tir.call_intrin("float32", "tir.reinterpret", 2 + A(*i)), name="B" ) s = te.create_schedule(B.op) def check_c(): mhost = tvm.build(s, [A, B], "c", name="test_reinterpret") temp = utils.tempdir() path_dso = temp.relpath("temp.so") mhost.export_library(path_dso) m = tvm.runtime.load_module(path_dso) fadd = m["test_reinterpret"] dev = tvm.cpu(0) n = nn a = tvm.nd.array(np.random.randint(-(2**30), 2**30, size=n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) fadd(a, b) tvm.testing.assert_allclose(b.numpy(), (2 + a.numpy()).view("float32")) check_c() def test_ceil(): nn = 1024 n = tvm.runtime.convert(nn) A = te.placeholder((n,), name="A", dtype="float32") B = te.compute(A.shape, lambda *i: tvm.tir.call_intrin("float32", "tir.ceil", A(*i)), name="B") s = te.create_schedule(B.op) def check_c(): mhost = tvm.build(s, [A, B], "c", name="test_ceil") temp = utils.tempdir() path_dso = temp.relpath("temp.so") mhost.export_library(path_dso) m = tvm.runtime.load_module(path_dso) fceil = m["test_ceil"] dev = tvm.cpu(0) n = nn a = tvm.nd.array(np.random.rand(n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) fceil(a, b) tvm.testing.assert_allclose(b.numpy(), (np.ceil(a.numpy()).view("float32"))) check_c() def test_floor(): nn = 1024 n = tvm.runtime.convert(nn) A = te.placeholder((n,), name="A", dtype="float32") B = te.compute(A.shape, lambda *i: tvm.tir.call_intrin("float32", "tir.floor", A(*i)), name="B") s = te.create_schedule(B.op) def check_c(): mhost = tvm.build(s, [A, B], "c", name="test_floor") temp = utils.tempdir() path_dso = temp.relpath("temp.so") mhost.export_library(path_dso) m = tvm.runtime.load_module(path_dso) ffloor = m["test_floor"] dev = tvm.cpu(0) n = nn a = tvm.nd.array(np.random.rand(n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) ffloor(a, b) tvm.testing.assert_allclose(b.numpy(), (np.floor(a.numpy()).view("float32"))) check_c() def test_round(): nn = 1024 n = tvm.runtime.convert(nn) A = te.placeholder((n,), name="A", dtype="float32") B = te.compute(A.shape, lambda *i: tvm.tir.call_intrin("float32", "tir.round", A(*i)), name="B") s = te.create_schedule(B.op) def check_c(): mhost = tvm.build(s, [A, B], "c", name="test_round") temp = utils.tempdir() path_dso = temp.relpath("temp.so") mhost.export_library(path_dso) m = tvm.runtime.load_module(path_dso) fround = m["test_round"] dev = tvm.cpu(0) n = nn a = tvm.nd.array(np.random.rand(n).astype(A.dtype), dev) b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev) fround(a, b) tvm.testing.assert_allclose(b.numpy(), (np.round(a.numpy()).view("float32"))) check_c() def test_call_packed(): def fake_func(fname="fake.func"): ib = tvm.tir.ir_builder.create() A = ib.pointer("float32", name="A") fake_func1 = tvm.tir.call_packed(fname, A[0]) ib.emit(fake_func1) body = ib.get() return A, body def check_global_packed_func(): fname = "fake.func" A, body = fake_func(fname) func1 = tvm.tir.PrimFunc([A], body).with_attr("global_symbol", "func1") B, body = fake_func() func2 = tvm.tir.PrimFunc([B], body).with_attr("global_symbol", "func2") mod = tvm.IRModule({"fake_func1": func1, "fake_func2": func2}) fcode = tvm.build(mod, None, "c") src = fcode.get_source() # there are two locations calling the packed func assert src.count(fname) == 2 suffix = "_packed" packed_func_name = fname + suffix # func name will be standardized by GetUniqueName and not exists anymore assert src.find(packed_func_name) == -1 packed_func_real_name = "_".join(fname.split(".")) + suffix func_declaration = "static void* %s = NULL;" % packed_func_real_name # src only has 1 valid declaration assert src.count(func_declaration) == 1 check_global_packed_func() if __name__ == "__main__": test_add() test_add_pipeline() test_reinterpret() test_ceil() test_floor() test_round() test_call_packed()
8,199
33.453782
100
py
tvm
tvm-main/tests/python/unittest/test_target_codegen_metal.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import tvm import tvm.script import tvm.testing from tvm import te from tvm.script import tir as T @tvm.testing.requires_gpu @tvm.testing.requires_metal def test_metal_inf_nan(): target = "metal" def check_inf_nan(dev, n, value, dtype): A = te.placeholder((n,), name="A", dtype=dtype) inf_value = tvm.tir.const(value, dtype=dtype) C = te.compute((n,), lambda i: inf_value, name="C") prim_func = te.create_prim_func([A, C]) sch = tvm.tir.Schedule(prim_func) (x,) = sch.get_loops(sch.get_block("C")) sch.bind(x, "threadIdx.x") fun = tvm.build(sch.mod, target=target) a = tvm.nd.empty((n,), A.dtype, dev) c = tvm.nd.empty((n,), A.dtype, dev) # Only need to test compiling here fun(a, c) dev = tvm.device(target, 0) check_inf_nan(dev, 1, -float("inf"), "float32") check_inf_nan(dev, 1, -float("inf"), "float16") check_inf_nan(dev, 1, float("inf"), "float32") check_inf_nan(dev, 1, float("inf"), "float16") check_inf_nan(dev, 1, float("nan"), "float32") check_inf_nan(dev, 1, float("nan"), "float16") @tvm.testing.requires_gpu @tvm.testing.requires_metal def test_unaligned_vectorize(): @tvm.script.ir_module class IRModule: @T.prim_func def main(A: T.Buffer((2, 3), "float32"), B: T.Buffer((6,), "float32")): T.func_attr({"global_symbol": "main"}) for i0_1 in T.thread_binding(3, thread="threadIdx.x"): for i0_0 in T.vectorized(2): with T.block("block"): vi0 = T.axis.spatial(6, i0_0 * 3 + i0_1) B[vi0] = A[vi0 // 3, vi0 % 3] target = "metal" dev = tvm.metal() a = (np.arange(6).reshape(2, 3)).astype("float32") a_nd = tvm.nd.array(a, dev) b_nd = tvm.nd.empty((6,), "float32", dev) f = tvm.build(IRModule, target=target) f(a_nd, b_nd) np.testing.assert_allclose(b_nd.numpy(), a.reshape(6), atol=1e-5, rtol=1e-5) @tvm.testing.requires_gpu @tvm.testing.requires_metal def test_metal_erf(): target = "metal" def check_erf(dev, n, dtype): A = te.placeholder((n,), name="A", dtype=dtype) C = te.compute(A.shape, lambda *i: te.erf(A(*i)), name="C") func = te.create_prim_func([A, C]) sch = tvm.tir.Schedule(func) (x,) = sch.get_loops(sch.get_block("C")) sch.bind(x, "threadIdx.x") fun = tvm.build(sch.mod, target=target) a = tvm.nd.empty((n,), A.dtype, dev) c = tvm.nd.empty((n,), A.dtype, dev) # Only need to test compiling here fun(a, c) dev = tvm.device(target, 0) check_erf(dev, 1, "float32") check_erf(dev, 1, "float16") @tvm.testing.requires_gpu @tvm.testing.requires_metal def test_ramp(): target = "metal" @tvm.script.ir_module class IRModule: @T.prim_func def main(A: T.Buffer((1, 2), "int32")): T.func_attr({"global_symbol": "main"}) for i in T.thread_binding(1, thread="threadIdx.x"): with T.block("block"): tx = T.axis.spatial(1, i) r = T.ramp(tx, 3, 2) A[0, T.ramp(0, 1, 2)] = r f = tvm.build(IRModule, target=target) dev = tvm.metal() a_nd = tvm.nd.empty((1, 2), "int32", dev) f(a_nd) assert tuple(a_nd.numpy()[0, :]) == (0, 3) @tvm.testing.requires_gpu @tvm.testing.requires_metal def test_select_vectorize(): @tvm.script.ir_module class IRModule: @T.prim_func def main(A: T.Buffer((6), "float32"), B: T.Buffer((6,), "float32")): T.func_attr({"global_symbol": "main"}) for i0_1 in T.thread_binding(3, thread="threadIdx.x"): for i0_0 in T.vectorized(2): with T.block("block"): vi0 = T.axis.spatial(6, i0_0 * 3 + i0_1) B[vi0] = T.Select((vi0 % 2) == 0, A[vi0], T.float32(0)) target = "metal" dev = tvm.metal() a = np.arange(6).astype("float32") a_nd = tvm.nd.array(a, dev) b_nd = tvm.nd.empty((6,), "float32", dev) f = tvm.build(IRModule, target=target) f(a_nd, b_nd) a.reshape(3, 2)[:, 1] = 0 np.testing.assert_allclose(b_nd.numpy(), a, atol=1e-5, rtol=1e-5) @tvm.testing.requires_gpu @tvm.testing.requires_metal def test_vectorized_uint8(): @T.prim_func def func(A: T.Buffer((16), "uint8"), B: T.Buffer((16), "float32")): for i in T.thread_binding(4, thread="threadIdx.x"): for j in T.vectorized(4): with T.block("block"): vi = T.axis.spatial(16, i * 4 + j) B[vi] = T.Cast("float32", A[vi]) dev = tvm.metal() a = np.arange(16).astype("uint8") a_nd = tvm.nd.array(a, dev) b_nd = tvm.nd.empty((16,), "float32", dev) f = tvm.build(func, target="metal") f(a_nd, b_nd) np.testing.assert_allclose(b_nd.numpy(), a.astype("float32"), atol=1e-5, rtol=1e-5) if __name__ == "__main__": tvm.testing.main()
5,866
32.718391
87
py
tvm
tvm-main/tests/python/unittest/test_micro_ms_tuning.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import numpy as np import pytest from types import MappingProxyType import pathlib import json import tvm import tvm.testing from tvm import relay from tvm.relay.backend import Executor from tvm.contrib import graph_executor, utils from tvm import meta_schedule as ms @tvm.testing.requires_micro def test_micro_tuning_with_meta_schedule(): from tests.micro.zephyr.test_ms_tuning import create_relay_module from tvm.contrib.micro.meta_schedule.local_builder_micro import get_local_builder_micro from tvm.contrib.micro.meta_schedule.rpc_runner_micro import get_rpc_runner_micro platform = "crt" target = tvm.target.target.micro(model="host") options = {} work_dir = utils.tempdir() mod, params, model_info = create_relay_module() input_name = model_info["in_tensor"] input_shape = model_info["in_shape"] input_dtype = model_info["in_dtype"] data_sample = np.random.rand(*input_shape).astype(input_dtype) runtime = relay.backend.Runtime("crt", {"system-lib": True}) executor = Executor("aot", {"link-params": True}) # This line is necessary for link-params to take effect during # task extraction and relay.build(...). mod = mod.with_attr("executor", executor) builder = get_local_builder_micro() with ms.Profiler() as profiler: with get_rpc_runner_micro( platform=platform, options=options, session_timeout_sec=120 ) as runner: db: ms.Database = ms.relay_integration.tune_relay( mod=mod, params=params, target=target, builder=builder, runner=runner, strategy="evolutionary", num_trials_per_iter=2, max_trials_per_task=10, max_trials_global=100, work_dir=str(work_dir), module_equality="ignore-ndarray", ) # Build model using meta_schedule logs ms_mod: tvm.runtime.Module = ms.relay_integration.compile_relay( database=db, mod=mod, target=target, params=params, pass_config=MappingProxyType( { "relay.backend.use_meta_schedule": True, "relay.backend.tir_converter": "default", "tir.disable_vectorize": True, } ), executor=executor, runtime=runtime, ) print(profiler.table()) project = tvm.micro.generate_project( str(tvm.micro.get_microtvm_template_projects(platform)), ms_mod, str(work_dir / "project"), options=options, ) project.build() project.flash() with tvm.micro.Session(project.transport()) as session: aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor()) aot_executor.get_input(0).copyfrom(data_sample) result = aot_executor.module.time_evaluator("run", session.device, number=3)() output = aot_executor.get_output(0).numpy() # Build reference model (without tuning) dev = tvm.cpu() target = tvm.target.target.micro(model="host") with tvm.transform.PassContext( opt_level=3, config={"tir.disable_vectorize": True}, disabled_pass=["AlterOpLayout"] ): ref_mod = relay.build( mod, target=target, params=params, runtime=runtime, ) ref_mod.export_library(work_dir / "compiled_lib2.so") mod2: tvm.runtime.Module = tvm.runtime.load_module(work_dir / "compiled_lib2.so") graph_mod = graph_executor.GraphModule(mod2["default"](dev)) graph_mod.set_input(input_name, data_sample) graph_mod.run() ref_output = graph_mod.get_output(0).numpy() assert np.allclose(output, ref_output, rtol=1e-4, atol=2e-4), "FAILED" work_dir.remove() if __name__ == "__main__": tvm.testing.main()
4,740
35.469231
97
py
tvm
tvm-main/tests/python/unittest/test_meta_schedule_schedule_rule_auto_inline.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring import pytest import tvm import tvm.testing from tvm import meta_schedule as ms from tvm.ir.base import assert_structural_equal from tvm.meta_schedule.testing.space_generation import generate_design_space from tvm.script import tir as T from tvm.target import Target from tvm.tir import Schedule # fmt: off # pylint: disable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks @tvm.script.ir_module class Conv2DBiasBnReLU: @T.prim_func def main(var_X: T.handle, var_W: T.handle, var_B: T.handle, var_bn_scale: T.handle, var_bn_offset: T.handle, var_compute: T.handle) -> None: X = T.match_buffer(var_X, [1, 512, 56, 56], dtype="float32") W = T.match_buffer(var_W, [512, 512, 3, 3], dtype="float32") B = T.match_buffer(var_B, [512, 1, 1], dtype="float32") bn_scale = T.match_buffer(var_bn_scale, [512, 1, 1], dtype="float32") bn_offset = T.match_buffer(var_bn_offset, [512, 1, 1], dtype="float32") compute = T.match_buffer(var_compute, [1, 512, 56, 56], dtype="float32") pad_temp = T.alloc_buffer([1, 512, 58, 58], dtype="float32") compute_1 = T.alloc_buffer([1, 512, 56, 56], dtype="float32") bias_add = T.alloc_buffer([1, 512, 56, 56], dtype="float32") bn_mul = T.alloc_buffer([1, 512, 56, 56], dtype="float32") bn_add = T.alloc_buffer([1, 512, 56, 56], dtype="float32") for i0, i1, i2, i3 in T.grid(1, 512, 58, 58): with T.block("pad_temp"): i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3]) pad_temp[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(i2_1 >= 1 and i2_1 < 57 and i3_1 >= 1 and i3_1 < 57, X[i0_1, i1_1, i2_1 - 1, i3_1 - 1], T.float32(0), dtype="float32") for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 512, 56, 56, 512, 3, 3): with T.block("compute"): nn, ff, yy, xx, rc, ry, rx = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6]) with T.init(): compute_1[nn, ff, yy, xx] = T.float32(0) compute_1[nn, ff, yy, xx] = compute_1[nn, ff, yy, xx] + pad_temp[nn, rc, yy + ry, xx + rx] * W[ff, rc, ry, rx] for i0, i1, i2, i3 in T.grid(1, 512, 56, 56): with T.block("bias_add"): i, j, k, l = T.axis.remap("SSSS", [i0, i1, i2, i3]) bias_add[i, j, k, l] = compute_1[i, j, k, l] + B[j, 0, 0] for i0, i1, i2, i3 in T.grid(1, 512, 56, 56): with T.block("bn_mul"): i, j, k, l = T.axis.remap("SSSS", [i0, i1, i2, i3]) bn_mul[i, j, k, l] = bias_add[i, j, k, l] * bn_scale[j, 0, 0] for i0, i1, i2, i3 in T.grid(1, 512, 56, 56): with T.block("bn_add"): i, j, k, l = T.axis.remap("SSSS", [i0, i1, i2, i3]) bn_add[i, j, k, l] = bn_mul[i, j, k, l] + bn_offset[j, 0, 0] for i0, i1, i2, i3 in T.grid(1, 512, 56, 56): with T.block("compute_1"): i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3]) compute[i0_2, i1_2, i2_2, i3_2] = T.max(bn_add[i0_2, i1_2, i2_2, i3_2], T.float32(0)) @tvm.script.ir_module class Conv2DBiasBnReLUInlined: @T.prim_func def main(var_X: T.handle, var_W: T.handle, var_B: T.handle, var_bn_scale: T.handle, var_bn_offset: T.handle, var_compute: T.handle) -> None: X = T.match_buffer(var_X, [1, 512, 56, 56], dtype="float32") W = T.match_buffer(var_W, [512, 512, 3, 3], dtype="float32") B = T.match_buffer(var_B, [512, 1, 1], dtype="float32") bn_scale = T.match_buffer(var_bn_scale, [512, 1, 1], dtype="float32") bn_offset = T.match_buffer(var_bn_offset, [512, 1, 1], dtype="float32") compute = T.match_buffer(var_compute, [1, 512, 56, 56], dtype="float32") pad_temp = T.alloc_buffer([1, 512, 58, 58], dtype="float32") compute_1 = T.alloc_buffer([1, 512, 56, 56], dtype="float32") for i0, i1, i2, i3 in T.grid(1, 512, 58, 58): with T.block("pad_temp"): i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3]) pad_temp[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(i2_1 >= 1 and i2_1 < 57 and i3_1 >= 1 and i3_1 < 57, X[i0_1, i1_1, i2_1 - 1, i3_1 - 1], T.float32(0), dtype="float32") for i0, i1, i2, i3, i4, i5, i6 in T.grid(1, 512, 56, 56, 512, 3, 3): with T.block("compute"): nn, ff, yy, xx, rc, ry, rx = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6]) with T.init(): compute_1[nn, ff, yy, xx] = T.float32(0) compute_1[nn, ff, yy, xx] = compute_1[nn, ff, yy, xx] + pad_temp[nn, rc, yy + ry, xx + rx] * W[ff, rc, ry, rx] for i0, i1, i2, i3 in T.grid(1, 512, 56, 56): with T.block("compute_1"): i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3]) compute[i0_2, i1_2, i2_2, i3_2] = T.max((compute_1[i0_2, i1_2, i2_2, i3_2] + B[i1_2, 0, 0]) * bn_scale[i1_2, 0, 0] + bn_offset[i1_2, 0, 0], T.float32(0)) @tvm.script.ir_module class MultiLevelTiledConv2D: @T.prim_func def main(var_X: T.handle, var_W: T.handle, var_B: T.handle, var_bn_scale: T.handle, var_bn_offset: T.handle, var_compute: T.handle) -> None: X = T.match_buffer(var_X, [1, 512, 56, 56], dtype="float32") W = T.match_buffer(var_W, [512, 512, 3, 3], dtype="float32") B = T.match_buffer(var_B, [512, 1, 1], dtype="float32") bn_scale = T.match_buffer(var_bn_scale, [512, 1, 1], dtype="float32") bn_offset = T.match_buffer(var_bn_offset, [512, 1, 1], dtype="float32") compute = T.match_buffer(var_compute, [1, 512, 56, 56], dtype="float32") pad_temp = T.alloc_buffer([1, 512, 58, 58], dtype="float32") compute_1 = T.alloc_buffer([1, 512, 56, 56], dtype="float32") compute_local = T.alloc_buffer([1, 512, 56, 56], dtype="float32", scope="local") pad_temp_shared = T.alloc_buffer([1, 512, 58, 58], dtype="float32", scope="shared") W_shared = T.alloc_buffer([512, 512, 3, 3], dtype="float32", scope="shared") for i0, i1, i2, i3 in T.grid(1, 512, 58, 58): with T.block("pad_temp"): i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3]) pad_temp[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(i2_1 >= 1 and i2_1 < 57 and i3_1 >= 1 and i3_1 < 57, X[i0_1, i1_1, i2_1 - 1, i3_1 - 1], T.float32(0), dtype="float32") for i0_0_i1_0_i2_0_i3_0_fused in T.thread_binding(0, 224, thread="blockIdx.x"): for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(0, 2, thread="vthread.x"): for i0_2_i1_2_i2_2_i3_2_fused in T.thread_binding(0, 8, thread="threadIdx.x"): for i4_0, i5_0, i6_0 in T.grid(1, 3, 1): for ax0_ax1_ax2_ax3_fused_0 in T.serial(0, 40960, annotations={"meta_schedule.cooperative_fetch":1}): for ax0_ax1_ax2_ax3_fused_1 in T.vectorized(0, 3): with T.block("pad_temp_shared"): v0 = T.axis.spatial(1, 0) v1 = T.axis.spatial(512, (ax0_ax1_ax2_ax3_fused_0 * 3 + ax0_ax1_ax2_ax3_fused_1) // 30 // 8 % 512) v2 = T.axis.spatial(58, i0_0_i1_0_i2_0_i3_0_fused % 14 // 2 * 8 + i5_0 + (ax0_ax1_ax2_ax3_fused_0 * 3 + ax0_ax1_ax2_ax3_fused_1) // 30 % 8) v3 = T.axis.spatial(58, i0_0_i1_0_i2_0_i3_0_fused % 2 * 28 + (ax0_ax1_ax2_ax3_fused_0 * 3 + ax0_ax1_ax2_ax3_fused_1) % 30) pad_temp_shared[v0, v1, v2, v3] = pad_temp[v0, v1, v2, v3] for ax0_ax1_ax2_ax3_fused_0 in T.serial(0, 12288, annotations={"meta_schedule.cooperative_fetch":1}): for ax0_ax1_ax2_ax3_fused_1 in T.vectorized(0, 4): with T.block("W_shared"): v0 = T.axis.spatial(512, i0_0_i1_0_i2_0_i3_0_fused // 14 * 32 + (ax0_ax1_ax2_ax3_fused_0 * 4 + ax0_ax1_ax2_ax3_fused_1) // 1536) v1 = T.axis.spatial(512, (ax0_ax1_ax2_ax3_fused_0 * 4 + ax0_ax1_ax2_ax3_fused_1) // 3 % 512) v2 = T.axis.spatial(3, i5_0) v3 = T.axis.spatial(3, (ax0_ax1_ax2_ax3_fused_0 * 4 + ax0_ax1_ax2_ax3_fused_1) % 3) W_shared[v0, v1, v2, v3] = W[v0, v1, v2, v3] for i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3, i4_2, i5_2, i6_2, i0_4, i1_4, i2_4, i3_4 in T.grid(32, 1, 1, 1, 1, 1, 1, 16, 1, 3, 1, 8, 2, 28): with T.block("compute"): nn = T.axis.spatial(1, 0) ff = T.axis.spatial(512, i0_0_i1_0_i2_0_i3_0_fused // 14 * 32 + i0_2_i1_2_i2_2_i3_2_fused // 2 * 8 + i1_4) yy = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused // 2 % 7 * 8 + i0_1_i1_1_i2_1_i3_1_fused * 4 + i0_2_i1_2_i2_2_i3_2_fused % 2 * 2 + i2_4) xx = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused % 2 * 28 + i3_4) rc = T.axis.reduce(512, i4_1 * 16 + i4_2) ry, rx = T.axis.remap("RR", [i5_0, i6_2]) with T.init(): compute_local[nn, ff, yy, xx] = T.float32(0) compute_local[nn, ff, yy, xx] = compute_local[nn, ff, yy, xx] + pad_temp_shared[nn, rc, yy + ry, xx + rx] * W_shared[ff, rc, ry, rx] for ax0, ax1, ax2, ax3 in T.grid(1, 8, 2, 28): with T.block("compute_local"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(512, i0_0_i1_0_i2_0_i3_0_fused // 14 * 32 + i0_2_i1_2_i2_2_i3_2_fused // 2 * 8 + ax1) v2 = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused % 14 // 2 * 8 + i0_1_i1_1_i2_1_i3_1_fused * 4 + i0_2_i1_2_i2_2_i3_2_fused % 2 * 2 + ax2) v3 = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused % 2 * 28 + ax3) compute_1[v0, v1, v2, v3] = compute_local[v0, v1, v2, v3] for i0, i1, i2, i3 in T.grid(1, 512, 56, 56): with T.block("compute_1"): i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3]) compute[i0_2, i1_2, i2_2, i3_2] = T.max((compute_1[i0_2, i1_2, i2_2, i3_2] + B[i1_2, 0, 0]) * bn_scale[i1_2, 0, 0] + bn_offset[i1_2, 0, 0], T.float32(0)) @tvm.script.ir_module class MultiLevelTiledConv2DAfterInline: @T.prim_func def main(X: T.Buffer((1, 512, 56, 56), "float32"), W: T.Buffer((512, 512, 3, 3), "float32"), B: T.Buffer((512, 1, 1), "float32"), bn_scale: T.Buffer((512, 1, 1), "float32"), bn_offset: T.Buffer((512, 1, 1), "float32"), compute: T.Buffer((1, 512, 56, 56), "float32")) -> None: compute_local = T.alloc_buffer([1, 512, 56, 56], dtype="float32", scope="local") for i0_0_i1_0_i2_0_i3_0_fused in T.thread_binding(224, thread="blockIdx.x"): for i0_1_i1_1_i2_1_i3_1_fused in T.thread_binding(2, thread="vthread.x"): for i0_2_i1_2_i2_2_i3_2_fused in T.thread_binding(8, thread="threadIdx.x"): for i4_0, i5_0, i6_0, i4_1, i5_1, i6_1, i0_3, i1_3, i2_3, i3_3, i4_2, i5_2, i6_2, i0_4, i1_4, i2_4, i3_4 in T.grid(1, 3, 1, 32, 1, 1, 1, 1, 1, 1, 16, 1, 3, 1, 8, 2, 28): with T.block("compute"): nn = T.axis.spatial(1, 0) ff = T.axis.spatial(512, i0_0_i1_0_i2_0_i3_0_fused // 14 * 32 + i0_2_i1_2_i2_2_i3_2_fused // 2 * 8 + i1_4) yy = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused // 2 % 7 * 8 + i0_1_i1_1_i2_1_i3_1_fused * 4 + i0_2_i1_2_i2_2_i3_2_fused % 2 * 2 + i2_4) xx = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused % 2 * 28 + i3_4) rc = T.axis.reduce(512, i4_1 * 16 + i4_2) ry, rx = T.axis.remap("RR", [i5_0, i6_2]) with T.init(): compute_local[nn, ff, yy, xx] = T.float32(0) compute_local[nn, ff, yy, xx] = compute_local[nn, ff, yy, xx] + T.if_then_else(yy + ry >= 1 and yy + ry < 57 and xx + rx >= 1 and xx + rx < 57, X[nn, rc, yy + ry - 1, xx + rx - 1], T.float32(0), dtype="float32") * W[ff, rc, ry, rx] for ax0, ax1, ax2, ax3 in T.grid(1, 8, 2, 28): with T.block("compute_local"): v0 = T.axis.spatial(1, ax0) v1 = T.axis.spatial(512, i0_0_i1_0_i2_0_i3_0_fused // 14 * 32 + i0_2_i1_2_i2_2_i3_2_fused // 2 * 8 + ax1) v2 = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused % 14 // 2 * 8 + i0_1_i1_1_i2_1_i3_1_fused * 4 + i0_2_i1_2_i2_2_i3_2_fused % 2 * 2 + ax2) v3 = T.axis.spatial(56, i0_0_i1_0_i2_0_i3_0_fused % 2 * 28 + ax3) compute[v0, v1, v2, v3] = T.max((compute_local[v0, v1, v2, v3] + B[v1, 0, 0]) * bn_scale[v1, 0, 0] + bn_offset[v1, 0, 0], T.float32(0)) @tvm.script.ir_module class SoftmaxBeforeInline: @T.prim_func def main(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None: T_softmax_maxelem = T.alloc_buffer([256], dtype="float32") T_softmax_exp = T.alloc_buffer([256, 256], dtype="float32") T_softmax_expsum = T.alloc_buffer([256], dtype="float32") for i0, i1 in T.grid(256, 256): with T.block("T_softmax_maxelem"): i0_1, k = T.axis.remap("SR", [i0, i1]) with T.init(): T_softmax_maxelem[i0_1] = T.min_value("float32") T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k]) for i0, i1 in T.grid(256, 256): with T.block("T_softmax_exp"): i0_2, i1_1 = T.axis.remap("SS", [i0, i1]) T_softmax_exp[i0_2, i1_1] = T.exp(A[i0_2, i1_1] - T_softmax_maxelem[i0_2], dtype="float32") for i0_3, i1 in T.grid(256, 256): with T.block("T_softmax_expsum"): i0_4, k = T.axis.remap("SR", [i0_3, i1]) with T.init(): T_softmax_expsum[i0_4] = T.float32(0) T_softmax_expsum[i0_4] = T_softmax_expsum[i0_4] + T_softmax_exp[i0_4, k] for i0_5, i1 in T.grid(256, 256): with T.block("T_softmax_norm"): i0_6, i1_2 = T.axis.remap("SS", [i0_5, i1]) T_softmax_norm[i0_6, i1_2] = T_softmax_exp[i0_6, i1_2] / T_softmax_expsum[i0_6] @tvm.script.ir_module class SoftmaxAfterInline: @T.prim_func def main(A: T.Buffer((256, 256), "float32"), T_softmax_norm: T.Buffer((256, 256), "float32")) -> None: T_softmax_maxelem = T.alloc_buffer([256], dtype="float32") T_softmax_expsum = T.alloc_buffer([256], dtype="float32") for i0, i1 in T.grid(256, 256): with T.block("T_softmax_maxelem"): i0_1, k = T.axis.remap("SR", [i0, i1]) with T.init(): T_softmax_maxelem[i0_1] = T.min_value("float32") T_softmax_maxelem[i0_1] = T.max(T_softmax_maxelem[i0_1], A[i0_1, k]) for i0, i1 in T.grid(256, 256): with T.block("T_softmax_expsum"): i0_2, k = T.axis.remap("SR", [i0, i1]) with T.init(): T_softmax_expsum[i0_2] = T.float32(0) T_softmax_expsum[i0_2] = T_softmax_expsum[i0_2] + T.exp(A[i0_2, k] - T_softmax_maxelem[i0_2], dtype="float32") for i0_3, i1 in T.grid(256, 256): with T.block("T_softmax_norm"): i0_4, i1_1 = T.axis.remap("SS", [i0_3, i1]) T_softmax_norm[i0_4, i1_1] = T.exp(A[i0_4, i1_1] - T_softmax_maxelem[i0_4], dtype="float32") / T_softmax_expsum[i0_4] @tvm.script.ir_module class BeforePureSpatial: @T.prim_func def main( placeholder: T.Buffer((1, 384), "int64"), placeholder_1: T.Buffer((30522, 768), "float32"), placeholder_2: T.Buffer((1, 384, 768), "float32"), T_add: T.Buffer((1, 384, 768), "float32"), ) -> None: T.func_attr({"global_symbol": "main", "tir.noalias": True}) compile_engine_const = T.alloc_buffer([], dtype="int64") T_less = T.alloc_buffer([1, 384], dtype="bool") compile_engine_const_1 = T.alloc_buffer([], dtype="int64") T_add_1 = T.alloc_buffer([1, 384], dtype="int64") T_where = T.alloc_buffer([1, 384], dtype="int64") T_take = T.alloc_buffer([1, 384, 768], dtype="float32") with T.block("compile_engine_const"): vi = T.axis.spatial(1, 0) T.reads() T.writes(compile_engine_const[()]) compile_engine_const[()] = T.int64(0) for i0, i1 in T.grid(1, 384): with T.block("T_less"): ax0, ax1 = T.axis.remap("SS", [i0, i1]) T.reads(placeholder[ax0, ax1], compile_engine_const[()]) T.writes(T_less[ax0, ax1]) T_less[ax0, ax1] = placeholder[ax0, ax1] < compile_engine_const[()] with T.block("compile_engine_const_1"): vi = T.axis.spatial(1, 0) T.reads() T.writes(compile_engine_const_1[()]) compile_engine_const_1[()] = T.int64(30522) for i0, i1 in T.grid(1, 384): with T.block("T_add"): ax0, ax1 = T.axis.remap("SS", [i0, i1]) T.reads(placeholder[ax0, ax1], compile_engine_const_1[()]) T.writes(T_add_1[ax0, ax1]) T_add_1[ax0, ax1] = placeholder[ax0, ax1] + compile_engine_const_1[()] for i0, i1 in T.grid(1, 384): with T.block("T_where"): ax0, ax1 = T.axis.remap("SS", [i0, i1]) T.reads(T_less[ax0, ax1], T_add_1[ax0, ax1], placeholder[ax0, ax1]) T.writes(T_where[ax0, ax1]) T_where[ax0, ax1] = T.Select( T.cast(T_less[ax0, ax1], "int32") != 0, T_add_1[ax0, ax1], placeholder[ax0, ax1] ) for i0, i1, i2 in T.grid(1, 384, 768): with T.block("T_take"): ax0, ax1, ax2 = T.axis.remap("SSS", [i0, i1, i2]) T.reads( placeholder_1[T.min(T.max(T.int64(0), T_where[ax0, ax1]), T.int64(30521)), ax2], T_where[ax0, ax1], ) T.writes(T_take[ax0, ax1, ax2]) T_take[ax0, ax1, ax2] = placeholder_1[ T.min(T.max(T.int64(0), T_where[ax0, ax1]), T.int64(30521)), ax2 ] for i0, i1, i2 in T.grid(1, 384, 768): with T.block("T_add_1"): ax0, ax1, ax2 = T.axis.remap("SSS", [i0, i1, i2]) T.reads(T_take[ax0, ax1, ax2], placeholder_2[ax0, ax1, ax2]) T.writes(T_add[ax0, ax1, ax2]) T_add[ax0, ax1, ax2] = T_take[ax0, ax1, ax2] + placeholder_2[ax0, ax1, ax2] @tvm.script.ir_module class AfterPureSpatial: @T.prim_func def main(placeholder: T.Buffer((1, 384), "int64"), placeholder_1: T.Buffer((30522, 768), "float32"), placeholder_2: T.Buffer((1, 384, 768), "float32"), T_add: T.Buffer((1, 384, 768), "float32")) -> None: # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) # body # with T.block("root") for i0, i1, i2 in T.grid(1, 384, 768): with T.block("T_add_1"): ax0, ax1, ax2 = T.axis.remap("SSS", [i0, i1, i2]) T.reads(placeholder[ax0, ax1], placeholder_1[T.min(T.max(T.int64(0), placeholder[ax0, ax1]), T.int64(30521)) : T.min(T.max(T.int64(0), placeholder[ax0, ax1] + T.int64(30522)), T.int64(30521)) + T.int64(1), ax2], placeholder_2[ax0, ax1, ax2]) T.writes(T_add[ax0, ax1, ax2]) T_add[ax0, ax1, ax2] = placeholder_1[T.min(T.max(T.int64(0), T.Select(T.cast(placeholder[ax0, ax1] < T.int64(0), "int32") != 0, placeholder[ax0, ax1] + T.int64(30522), placeholder[ax0, ax1])), T.int64(30521)), ax2] + placeholder_2[ax0, ax1, ax2] @tvm.script.ir_module class ConstConsumer: @T.prim_func def main(T_full: T.Buffer((1, 12, 4096), "int64")) -> None: # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) # body # with T.block("root") for i0, i1, i2 in T.grid(1, 12, 4096): with T.block("T_full"): ax0, ax1, ax2 = T.axis.remap("SSS", [i0, i1, i2]) T.reads() T.writes(T_full[ax0, ax1, ax2]) T_full[ax0, ax1, ax2] = T.int64(0) @tvm.script.ir_module class Conv2dInt8: @T.prim_func def main(p0: T.Buffer((16, 14, 14, 256), "int8"), p1: T.Buffer((1024, 1, 1, 256), "int8"), p2: T.Buffer((1, 1, 1, 1024), "int32"), p3: T.Buffer((1, 1, 1, 1024), "int32"), p4: T.Buffer(1024, "int32"), p5: T.Buffer(1024, "int32"), p6: T.Buffer(1024, "int32"), p7: T.Buffer(1, "int32"), p8: T.Buffer((16, 14, 14, 1024), "int32"), compute: T.Buffer((16, 14, 14, 1024), "int32")) -> None: # function attr dict T.func_attr({"global_symbol": "main", "tir.noalias": True}) # body # with T.block("root") compile_engine_const = T.alloc_buffer([], dtype="int32") pad_temp = T.alloc_buffer([16, 14, 14, 256], dtype="int8") conv2d_nhwc = T.alloc_buffer([16, 14, 14, 1024], dtype="int32") T_subtract = T.alloc_buffer([16, 14, 14, 1024], dtype="int32") T_add = T.alloc_buffer([16, 14, 14, 1024], dtype="int32") compute_1 = T.alloc_buffer([16, 14, 14, 1024], dtype="int32") T_add_1 = T.alloc_buffer([16, 14, 14, 1024], dtype="int32") compute_2 = T.alloc_buffer([16, 14, 14, 1024], dtype="int32") T_subtract_1 = T.alloc_buffer([16, 14, 14, 1024], dtype="int32") compute_3 = T.alloc_buffer([16, 14, 14, 1024], dtype="int32") T_add_2 = T.alloc_buffer([16, 14, 14, 1024], dtype="int32") with T.block("compile_engine_const"): vi = T.axis.spatial(1, 0) T.reads() T.writes(compile_engine_const[()]) compile_engine_const[()] = 59 for i0, i1, i2, i3 in T.grid(16, 14, 14, 256): with T.block("pad_temp"): i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(p0[i0_1, i1_1, i2_1, i3_1]) T.writes(pad_temp[i0_1, i1_1, i2_1, i3_1]) pad_temp[i0_1, i1_1, i2_1, i3_1] = p0[i0_1, i1_1, i2_1, i3_1] for i0, i1, i2, i3, i4, i5, i6 in T.grid(16, 14, 14, 1024, 1, 1, 256): with T.block("conv2d_nhwc"): nn, yy, xx, ff, ry, rx, rc = T.axis.remap("SSSSRRR", [i0, i1, i2, i3, i4, i5, i6]) T.reads(pad_temp[nn, yy + ry, xx + rx, rc], p1[ff, ry, rx, rc]) T.writes(conv2d_nhwc[nn, yy, xx, ff]) with T.init(): conv2d_nhwc[nn, yy, xx, ff] = 0 conv2d_nhwc[nn, yy, xx, ff] = conv2d_nhwc[nn, yy, xx, ff] + T.cast(pad_temp[nn, yy + ry, xx + rx, rc], "int32") * T.cast(p1[ff, ry, rx, rc], "int32") for i0, i1, i2, i3 in T.grid(16, 14, 14, 1024): with T.block("T_subtract"): ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(conv2d_nhwc[ax0, ax1, ax2, ax3], p2[0, 0, 0, ax3]) T.writes(T_subtract[ax0, ax1, ax2, ax3]) T_subtract[ax0, ax1, ax2, ax3] = conv2d_nhwc[ax0, ax1, ax2, ax3] - p2[0, 0, 0, ax3] for i0, i1, i2, i3 in T.grid(16, 14, 14, 1024): with T.block("T_add"): ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(T_subtract[ax0, ax1, ax2, ax3], p3[0, 0, 0, ax3]) T.writes(T_add[ax0, ax1, ax2, ax3]) T_add[ax0, ax1, ax2, ax3] = T_subtract[ax0, ax1, ax2, ax3] + p3[0, 0, 0, ax3] for i0, i1, i2, i3 in T.grid(16, 14, 14, 1024): with T.block("compute"): i0_2, i1_2, i2_2, i3_2 = T.axis.remap("SSSS", [i0, i1, i2, i3]) T.reads(T_add[i0_2, i1_2, i2_2, i3_2], p4[i3_2], p5[i3_2], p6[i3_2]) T.writes(compute_1[i0_2, i1_2, i2_2, i3_2]) compute_1[i0_2, i1_2, i2_2, i3_2] = T.q_multiply_shift_per_axis(T_add[i0_2, i1_2, i2_2, i3_2], p4[i3_2], p5[i3_2], p6[i3_2], 31, False, True, dtype="int32") for i0_3, i1_3, i2_3, i3_3 in T.grid(16, 14, 14, 1024): with T.block("T_add_1"): ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_3, i1_3, i2_3, i3_3]) T.reads(compile_engine_const[()], compute_1[ax0, ax1, ax2, ax3]) T.writes(T_add_1[ax0, ax1, ax2, ax3]) T_add_1[ax0, ax1, ax2, ax3] = compile_engine_const[()] + compute_1[ax0, ax1, ax2, ax3] for i0_4, i1_4, i2_4, i3_4 in T.grid(16, 14, 14, 1024): with T.block("compute_1"): i0_5, i1_5, i2_5, i3_5 = T.axis.remap("SSSS", [i0_4, i1_4, i2_4, i3_4]) T.reads(T_add_1[i0_5, i1_5, i2_5, i3_5]) T.writes(compute_2[i0_5, i1_5, i2_5, i3_5]) compute_2[i0_5, i1_5, i2_5, i3_5] = T.max(T.min(T_add_1[i0_5, i1_5, i2_5, i3_5], 255), 0) for i0_6, i1_6, i2_6, i3_6 in T.grid(16, 14, 14, 1024): with T.block("T_subtract_1"): ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_6, i1_6, i2_6, i3_6]) T.reads(compute_2[ax0, ax1, ax2, ax3], p7[0]) T.writes(T_subtract_1[ax0, ax1, ax2, ax3]) T_subtract_1[ax0, ax1, ax2, ax3] = compute_2[ax0, ax1, ax2, ax3] - p7[0] for i0_7, i1_7, i2_7, i3_7 in T.grid(16, 14, 14, 1024): with T.block("compute_2"): i0_8, i1_8, i2_8, i3_8 = T.axis.remap("SSSS", [i0_7, i1_7, i2_7, i3_7]) T.reads(T_subtract_1[i0_8, i1_8, i2_8, i3_8]) T.writes(compute_3[i0_8, i1_8, i2_8, i3_8]) compute_3[i0_8, i1_8, i2_8, i3_8] = T.q_multiply_shift(T_subtract_1[i0_8, i1_8, i2_8, i3_8], 1408572815, 31, 1, dtype="int32") for i0_9, i1_9, i2_9, i3_9 in T.grid(16, 14, 14, 1024): with T.block("T_add_2"): ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0_9, i1_9, i2_9, i3_9]) T.reads(compute_3[ax0, ax1, ax2, ax3], p8[ax0, ax1, ax2, ax3]) T.writes(T_add_2[ax0, ax1, ax2, ax3]) T_add_2[ax0, ax1, ax2, ax3] = compute_3[ax0, ax1, ax2, ax3] + p8[ax0, ax1, ax2, ax3] for i0_10, i1_10, i2_10, i3_10 in T.grid(16, 14, 14, 1024): with T.block("compute_3"): i0_11, i1_11, i2_11, i3_11 = T.axis.remap("SSSS", [i0_10, i1_10, i2_10, i3_10]) T.reads(T_add_2[i0_11, i1_11, i2_11, i3_11]) T.writes(compute[i0_11, i1_11, i2_11, i3_11]) compute[i0_11, i1_11, i2_11, i3_11] = T.max(T.min(T_add_2[i0_11, i1_11, i2_11, i3_11], 255), 0) # pylint: enable=no-member,invalid-name,unused-variable,no-self-argument,line-too-long,chained-comparison,not-callable,too-many-nested-blocks # fmt: on def test_inline_consumer_chain(): mod = Conv2DBiasBnReLU target = Target("llvm") (space,) = generate_design_space( kind="llvm", mod=mod, target=target, types=ms.schedule_rule.AutoInline, ) tvm.ir.assert_structural_equal(lhs=space.mod, rhs=Conv2DBiasBnReLUInlined) def test_inline_into_cache(): mod = MultiLevelTiledConv2D target = Target("cuda", host="llvm") (space,) = generate_design_space( kind="cuda", mod=mod, target=target, types=ms.schedule_rule.AutoInline, ) tvm.ir.assert_structural_equal(lhs=space.mod, rhs=MultiLevelTiledConv2DAfterInline) def test_inline_into_multiple_consumers(): mod = SoftmaxBeforeInline target = Target("cuda", host="llvm") (space,) = generate_design_space( kind="cuda", mod=mod, target=target, types=ms.schedule_rule.AutoInline, ) tvm.ir.assert_structural_equal(lhs=space.mod, rhs=SoftmaxAfterInline) def test_inline_pure_spatial(): mod = BeforePureSpatial target = Target("llvm") (space,) = generate_design_space( kind="llvm", mod=mod, target=target, types=ms.schedule_rule.AutoInline, ) tvm.ir.assert_structural_equal(lhs=space.mod, rhs=AfterPureSpatial) def test_inline_constant_tensor(): mod = ConstConsumer target = Target("cuda", host="llvm") (space,) = generate_design_space( kind="cuda", mod=mod, target=target, types=ms.schedule_rule.AutoInline, ) tvm.ir.assert_structural_equal(lhs=space.mod, rhs=ConstConsumer) def test_conv2d_int8_inline_constant_scalars(): sch = Schedule(Conv2dInt8) conv2d = sch.get_block("conv2d_nhwc") sch.cache_write(conv2d, 0, "shared") with pytest.raises(tvm.tir.ScheduleError) as e: sch.reverse_compute_inline(sch.get_block("T_add_1")) err_msg = "The block is only allowed to read a single buffer region, but it reads 2 region(s)" assert err_msg in str(e) ms.schedule_rule.InlineConstantScalars().apply(sch, sch.get_block("compile_engine_const")) sch.reverse_compute_inline(sch.get_block("T_add_1")) def test_inline_constant_scalars_skip_output_block(): # If the constant scalar block is an output block, it should not be inlined @tvm.script.ir_module class Full: @T.prim_func def main(T_full: T.Buffer((), "float32")): with T.block("T_full"): vi = T.axis.spatial(1, 0) T.reads() T.writes(T_full[()]) T_full[()] = T.float32(1) sch = Schedule(Full) sch = ms.schedule_rule.InlineConstantScalars().apply(sch, sch.get_block("T_full"))[0] assert_structural_equal(sch.mod, Full) if __name__ == "__main__": tvm.testing.main()
31,230
57.158287
387
py
tvm
tvm-main/tests/python/unittest/test_runtime_trace.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import tvm from tvm import te import numpy as np def test_trace_default_action(): n = 2 x = te.placeholder((n, n, n), name="X", dtype="float32") y = te.compute(x.shape, lambda i, j, k: tvm.tir.trace([i, j, k, x[i][j][k]])) s = te.create_schedule(y.op) f = tvm.build(s, [x, y], target="llvm") xnd = tvm.nd.array(np.ones((n, n, n), dtype=x.dtype)) ynd = tvm.nd.array(np.zeros((n, n, n), dtype=y.dtype)) f(xnd, ynd) def test_trace_expr_assign(): @tvm.register_func("tvm.tir.trace_callback2") def trace_buffer(x): return def check_assign(dtype): n = 4 x = te.placeholder((n, n, n), name="X", dtype=dtype) y = te.compute( x.shape, lambda i, j, k: tvm.tir.trace([x[i][j][k]], "tvm.tir.trace_callback2") ) z = te.compute( x.shape, lambda i, j, k: tvm.tir.trace([y[i][j][k]], "tvm.tir.trace_callback2") ) s = te.create_schedule(z.op) f = tvm.build(s, [x, y, z], "llvm") xnd = tvm.nd.array(np.ones((n, n, n), dtype=x.dtype)) ynd = tvm.nd.array(np.zeros((n, n, n), dtype=y.dtype)) znd = tvm.nd.array(np.zeros((n, n, n), dtype=z.dtype)) f(xnd, ynd, znd) assert np.array_equal(xnd.numpy(), np.ones((n, n, n))) assert np.array_equal(ynd.numpy(), np.ones((n, n, n))) assert np.array_equal(znd.numpy(), np.ones((n, n, n))) for t in ["float64", "float32", "int64", "int32"]: check_assign(t) def test_trace_expr_sum_generated(): @tvm.register_func("tvm.tir.trace_callback3") def trace_buffer(x): return def check_expr_sum(dtype): n = 4 a = te.placeholder((n, n, n), name="a", dtype=dtype) b = te.placeholder((n, n, n), name="b", dtype=dtype) c = te.compute( a.shape, lambda i, j, k: tvm.tir.trace([a[i][j][k]], "tvm.tir.trace_callback3") + tvm.tir.trace([b[i][j][k]], "tvm.tir.trace_callback3"), ) s = te.create_schedule(c.op) f = tvm.build(s, [a, b, c]) xnd = tvm.nd.array(np.array(np.ones((n, n, n), dtype=a.dtype))) ynd = tvm.nd.array(np.array(np.ones((n, n, n), dtype=b.dtype))) znd = tvm.nd.array(np.zeros((n, n, n), dtype=c.dtype)) f(xnd, ynd, znd) assert np.array_equal(znd.numpy(), xnd.numpy() + ynd.numpy()) for t in ["float64", "float32", "int64", "int32"]: check_expr_sum(t) def test_trace_expr_sum_args(): @tvm.register_func("tvm.tir.trace_silent") def silent(*args): return def check_expr_sum(dtype): n = 4 a = te.placeholder((n, n, n), name="a", dtype=dtype) b = te.placeholder((n, n, n), name="b", dtype=dtype) e = te.placeholder((n, n, n), name="e", dtype=dtype) d = te.placeholder((n, n, n), name="d", dtype=dtype) c = te.compute( a.shape, lambda i, j, k: tvm.tir.trace([i, j, k, a[i][j][k]], "tvm.tir.trace_silent") + tvm.tir.trace([i, j, k, b[i][j][k]], "tvm.tir.trace_silent") + tvm.tir.trace([i, j, k, d[i][j][k]], "tvm.tir.trace_silent") + tvm.tir.trace([i, j, k, e[i][j][k]], "tvm.tir.trace_silent"), ) s = te.create_schedule(c.op) f = tvm.build(s, [a, b, d, e, c]) a_nd = tvm.nd.array(np.array(np.ones((n, n, n), dtype=a.dtype))) b_nd = tvm.nd.array(np.array(np.ones((n, n, n), dtype=b.dtype))) d_nd = tvm.nd.array(np.array(np.ones((n, n, n), dtype=d.dtype))) e_nd = tvm.nd.array(np.array(np.ones((n, n, n), dtype=e.dtype))) c_nd = tvm.nd.array(np.zeros((n, n, n), dtype=c.dtype)) f(a_nd, b_nd, d_nd, e_nd, c_nd) assert np.array_equal( c_nd.numpy(), a_nd.numpy() + b_nd.numpy() + d_nd.numpy() + e_nd.numpy() ) for t in ["float64", "float32", "int64", "int32"]: check_expr_sum(t) def test_trace_expr_sum_custom(): @tvm.register_func("tvm.tir.trace_callback4") def trace_buffer(x): return def check_expr_sum_custom(dtype): n = 4 a = te.placeholder((n, n), name="a", dtype=dtype) b = te.placeholder((n, n), name="b", dtype=dtype) c = te.compute( a.shape, lambda i, j: tvm.tir.trace([a[i][j]], "tvm.tir.trace_callback4") + tvm.tir.trace([b[i][j]], "tvm.tir.trace_callback4"), ) s = te.create_schedule(c.op) f = tvm.build(s, [a, b, c]) npa = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=a.dtype) npb = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=a.dtype) xnd = tvm.nd.array(npa) ynd = tvm.nd.array(npb) znd = tvm.nd.array(np.zeros((n, n), dtype=c.dtype)) f(xnd, ynd, znd) assert np.array_equal(znd.numpy(), npa + npb) for t in ["float64", "float32", "int64", "int32"]: check_expr_sum_custom(t) def test_trace_can_change_traced_value_int(): @tvm.register_func("tvm.tir.trace_change_int_first") def trace_buffer(x): return 13 @tvm.register_func("tvm.tir.trace_change_int_second") def trace_buffer(x): return 14 def check_assign(dtype): n = 4 x = te.placeholder((n,), name="X", dtype=dtype) y = te.compute(x.shape, lambda i: tvm.tir.trace([x[i]], "tvm.tir.trace_change_int_first")) z = te.compute(x.shape, lambda i: tvm.tir.trace([y[i]], "tvm.tir.trace_change_int_second")) s = te.create_schedule(z.op) f = tvm.build(s, [x, y, z], "llvm") xnd = tvm.nd.array(np.ones((n,), dtype=x.dtype)) ynd = tvm.nd.array(np.zeros((n,), dtype=y.dtype)) znd = tvm.nd.array(np.zeros((n,), dtype=z.dtype)) f(xnd, ynd, znd) check_array_first = np.array([13, 13, 13, 13]) check_array_second = np.array([14, 14, 14, 14]) assert np.array_equal(ynd.numpy(), check_array_first) assert np.array_equal(znd.numpy(), check_array_second) for t in ["int64", "int32"]: check_assign(t) def test_trace_can_change_traced_value_float(): @tvm.register_func("tvm.tir.trace_change_float_first") def trace_buffer(x): return 13.0 @tvm.register_func("tvm.tir.trace_change_float_second") def trace_buffer(x): return 14.0 def check_assign(dtype): n = 4 x = te.placeholder((n,), name="X", dtype=dtype) y = te.compute(x.shape, lambda i: tvm.tir.trace([x[i]], "tvm.tir.trace_change_float_first")) z = te.compute( x.shape, lambda i: tvm.tir.trace([y[i]], "tvm.tir.trace_change_float_second") ) s = te.create_schedule(z.op) f = tvm.build(s, [x, y, z], "llvm") xnd = tvm.nd.array(np.ones((n,), dtype=x.dtype)) ynd = tvm.nd.array(np.zeros((n,), dtype=y.dtype)) znd = tvm.nd.array(np.zeros((n,), dtype=z.dtype)) f(xnd, ynd, znd) check_array_first = np.array([13.0, 13.0, 13.0, 13.0]) check_array_second = np.array([14.0, 14.0, 14.0, 14.0]) assert np.array_equal(ynd.numpy(), check_array_first) assert np.array_equal(znd.numpy(), check_array_second) for t in ["float64", "float32"]: check_assign(t) if __name__ == "__main__": test_trace_expr_assign() test_trace_expr_sum_generated() test_trace_expr_sum_custom() test_trace_expr_sum_args() test_trace_default_action() test_trace_can_change_traced_value_int() test_trace_can_change_traced_value_float()
8,335
36.54955
100
py
tvm
tvm-main/tests/python/unittest/test_tir_block_dependence_info.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=missing-function-docstring,missing-module-docstring import gc import sys import pytest import tvm import tvm.testing from tvm import tir from tvm.ir import IRModule from tvm.script import tir as T from tvm.tir import PrimFunc, BlockDependenceInfo from tvm.tir.stmt_functor import post_order_visit from tvm.tir.block_scope import DepKind # pylint: disable=no-member,invalid-name,unused-variable @T.prim_func def elementwise(a: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128), "float32") C = T.match_buffer(c, (128, 128), "float32") B = T.alloc_buffer((128, 128), "float32") for i, j in T.grid(128, 128): with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 for i, j in T.grid(128, 128): with T.block("D"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 @T.prim_func def war_dependency(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, (128, 128)) B = T.match_buffer(b, (128, 128)) C = T.match_buffer(c, (128, 128)) for i, j in T.grid(128, 128): with T.block("C"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = B[vi, vj] + 1.0 with T.block("B"): vi, vj = T.axis.remap("SS", [i, j]) B[vi, vj] = A[vi, vj] * 2.0 @T.prim_func def matmul(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer(a, [128, 128]) B = T.match_buffer(b, [128, 128]) C = T.match_buffer(c, [128, 128]) for i, j in T.grid(128, 128): with T.block("init"): vi, vj = T.axis.remap("SS", [i, j]) C[vi, vj] = T.float32(0) for k in range(0, 128): with T.block("update"): vi, vj, vk = T.axis.remap("SSR", [i, j, k]) C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk] # pylint: enable=no-member,invalid-name,unused-variable def get_blocks(func: PrimFunc): blocks = {} def update_blocks(node): if isinstance(node, tvm.tir.Block): blocks[node.name_hint] = node # post_order_visit(func.body, lambda node: blocks[node.name_hint] = node if isinstance(node, tvm.tir.Block) else None) post_order_visit(func.body, update_blocks) return blocks def _verify_dependence(dependence_info, src_block, dst_block, kind): src_sref = dependence_info.get_sref(src_block) dst_sref = dependence_info.get_sref(dst_block) scope = dependence_info.get_block_scope(src_sref.parent) def _find_dependence(deps): for dep in deps: if dep.src == src_sref and dep.dst == dst_sref and dep.kind == kind: return dep return None def _get_dependency_kind_name(dep_kind): if isinstance(dep_kind, int): dep_kind = DepKind(dep_kind) return dep_kind.name # Check dependences by src deps_by_src = scope.get_deps_by_src(src_sref) dependence = _find_dependence(deps_by_src) assert ( dependence ), f"Expected a dependency with src block {src_block.name_hint} and dst block {dst_block.name_hint} of kind {kind.name}" # Check dependences by dst deps_by_dst = scope.get_deps_by_dst(dst_sref) dependence = _find_dependence(deps_by_dst) assert ( dependence ), f"Expected a dependency with src block {src_block.name_hint} and dst block {dst_block.name_hint}" def test_RAW_dependences(): func = elementwise dependence_info = BlockDependenceInfo(func) blocks = get_blocks(func) _verify_dependence(dependence_info, blocks["B"], blocks["C"], DepKind.RAW) def test_WAR_dependences(): func = war_dependency dependence_info = BlockDependenceInfo(func) blocks = get_blocks(func) _verify_dependence(dependence_info, blocks["C"], blocks["B"], DepKind.WAR) def test_RAW_and_WAW_dependences(): func = matmul dependence_info = BlockDependenceInfo(func) blocks = get_blocks(func) _verify_dependence(dependence_info, blocks["init"], blocks["update"], DepKind.RAW) _verify_dependence(dependence_info, blocks["init"], blocks["update"], DepKind.WAW) if __name__ == "__main__": tvm.testing.main()
5,166
32.771242
124
py