repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/tests/python/unittest/test_ir_container.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
import numpy as np
def test_array():
a = tvm.runtime.convert([1, 2, 3])
assert len(a) == 3
assert a[-1].value == 3
a_slice = a[-3:-1]
assert (a_slice[0].value, a_slice[1].value) == (1, 2)
def test_array_save_load_json():
a = tvm.runtime.convert([1, 2, 3])
json_str = tvm.ir.save_json(a)
a_loaded = tvm.ir.load_json(json_str)
assert a_loaded[1].value == 2
def test_dir_array():
a = tvm.runtime.convert([1, 2, 3])
assert dir(a)
def test_getattr_array():
a = tvm.runtime.convert([1, 2, 3])
assert getattr(a, "type_key") == "Array"
assert not hasattr(a, "test_key")
def test_map():
a = te.var("a")
b = te.var("b")
amap = tvm.runtime.convert({a: 2, b: 3})
assert a in amap
assert len(amap) == 2
dd = dict(amap.items())
assert a in dd
assert b in dd
assert a + 1 not in amap
assert {x for x in amap} == {a, b}
assert set(amap.keys()) == {a, b}
assert set(amap.values()) == {2, 3}
def test_str_map():
amap = tvm.runtime.convert({"a": 2, "b": 3})
assert "a" in amap
assert len(amap) == 2
dd = dict(amap.items())
assert amap["a"].value == 2
assert "a" in dd
assert "b" in dd
def test_map_save_load_json():
a = te.var("a")
b = te.var("b")
amap = tvm.runtime.convert({a: 2, b: 3})
json_str = tvm.ir.save_json(amap)
amap = tvm.ir.load_json(json_str)
assert len(amap) == 2
dd = {kv[0].name: kv[1].value for kv in amap.items()}
assert dd == {"a": 2, "b": 3}
def test_dir_map():
a = te.var("a")
b = te.var("b")
amap = tvm.runtime.convert({a: 2, b: 3})
assert dir(amap)
def test_getattr_map():
a = te.var("a")
b = te.var("b")
amap = tvm.runtime.convert({a: 2, b: 3})
assert getattr(amap, "type_key") == "Map"
assert not hasattr(amap, "test_key")
def test_in_container():
arr = tvm.runtime.convert(["a", "b", "c"])
assert "a" in arr
assert tvm.tir.StringImm("a") in arr
assert "d" not in arr
def test_ndarray_container():
x = tvm.nd.array([1, 2, 3])
arr = tvm.runtime.convert([x, x])
assert arr[0].same_as(x)
assert arr[1].same_as(x)
assert isinstance(arr[0], tvm.nd.NDArray)
if __name__ == "__main__":
tvm.testing.main()
| 3,098 | 25.487179 | 62 | py |
tvm | tvm-main/tests/python/unittest/test_target_codegen_aarch64.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm.script import tir as TIR
import re
import os
import ctypes
import pytest
from tvm.target.codegen import llvm_version_major
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_mul(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] * B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and mul instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"mul\tz[0-9].[shdb],( p[0-9]/[m],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_add(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] + B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and add instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"add\tz[0-9].[shdb],( p[0-9]/[m],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_sub(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] - B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and sub instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"sub\tz[0-9].[shdb],( p[0-9]/[m],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_muladd(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.placeholder(m, dtype=type, name="C")
D = te.compute((m), lambda i: A[i] * B[i] + C[i], name="D")
s = te.create_schedule([D.op])
f = tvm.build(s, [A, B, C, D], target)
# Verify we see SVE load instructions and either mad or mla instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"mad|mla\tz[0-9].[shdb],( p[0-9]/[m],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_max(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: tvm.te.max(A[i], B[i]))
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and cmgt + sel instructions or a max instruction, all using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
compare = re.findall(
r"cmgt\tp[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
select = re.findall("sel\tz[0-9].[shdb], p[0-9], z[0-9].[shdb], z[0-9].[shdb]", assembly)
max = re.findall(
r"max\tz[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert (len(compare) > 1 and len(select) == len(compare)) or len(max) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_min(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: tvm.te.min(A[i], B[i]))
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and cmgt + sel instructions or a min instruction, all using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
compare = re.findall(
r"cmgt\tp[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
select = re.findall("sel\tz[0-9].[shdb], p[0-9], z[0-9].[shdb], z[0-9].[shdb]", assembly)
min = re.findall(
r"min\tz[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert (len(compare) > 1 and len(select) == len(compare)) or len(min) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_div(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: tvm.te.div(A[i], B[i]))
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and div instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"div\tz[0-9].[shdb],( p[0-9]/[m],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype", ["uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"]
)
def test_mod(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: tvm.te.floormod(A[i], B[i]), name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and mls instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"mls\tz[0-9].[shdb],( p[0-9]/[m],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 0
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_eq(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] == B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and cmpeq or cmeq instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"cm(p)?eq\tp[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype",
["float", "float16", "uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"],
)
def test_neq(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] != B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and cmpgt, cmgt, cmpne or cmne instructions, all using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"cm(p)?(gt|ne)\tp[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype", ["uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"]
)
def test_or(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] | B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and orr instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"orr\tz[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype", ["uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"]
)
def test_and(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype=type, name="B")
C = te.compute((m), lambda i: A[i] & B[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see SVE load instructions and and instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"and\tz[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.parametrize(
"dtype", ["uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"]
)
def test_not(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
C = te.compute((m), lambda i: ~A[i], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, C], target)
# Verify we see SVE load instructions and eor instructions using z registers
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
matches = re.findall(
r"eor\tz[0-9].[shdb],( p[0-9]/[zm],)? z[0-9].[shdb], z[0-9].[shdb]", assembly
)
assert len(loads) > 1
assert len(matches) > 1
check_correct_assembly(type=dtype)
@pytest.mark.skipif(
llvm_version_major() < 15, reason="Test requires an LLVM version of at least 15 to target SVE"
)
@pytest.mark.xfail(
reason="Awaiting llvm support for gathered loads",
strict=True,
)
@pytest.mark.parametrize(
"dtype", ["uint8", "uint16", "uint32", "uint64", "int8", "int16", "int32", "int64"]
)
def test_memcpy(dtype):
target = "llvm -mtriple=aarch64-linux-gnu -mattr=+sve"
def check_correct_assembly(type):
m = te.var("m")
A = te.placeholder(m, dtype=type, name="A")
B = te.placeholder(m, dtype="int32", name="B")
C = te.compute((m), lambda i: A[B[i]], name="C")
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], target)
# Verify we see gather instructions in the assembly
assembly = f.get_source("asm")
loads = re.findall("ld1[whdb] { z", assembly)
assert len(loads) > 0
check_correct_assembly(type=dtype)
if __name__ == "__main__":
tvm.testing.main()
| 16,454 | 33.209979 | 117 | py |
tvm | tvm-main/tests/python/unittest/test_tir_schedule_state_cached_flags.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import sys
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.state import CachedFlags
from tvm.tir.stmt_functor import post_order_visit
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
# fmt: off
@T.prim_func
def elementwise(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j in T.grid(128, 128):
with T.block("init"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = 0.0
for k in range(0, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def block_in_opaque_block(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
B = T.match_buffer(b, (128, 128), "float32")
for i in range(128):
with T.block("B"):
vi = T.axis.S(128, i)
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
B[vi, 0] = A[vi, 0]
if A[vi, 0] == 0.0:
with T.block("C"):
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
for j in range(128):
with T.block("D"):
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 3.0
else:
with T.block("E"):
T.reads([A[0:128, 0:128]])
T.writes([B[0:128, 0:128]])
for j in range(128):
with T.block("F"):
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def write_after_read(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128))
B = T.match_buffer(b, (128, 128))
C = T.match_buffer(c, (128, 128))
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
@T.prim_func
def loop_carried_dependency(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
C = T.match_buffer(c, (128,))
for i in range(0, 128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi] * 2.0
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = T.if_then_else(vi >= 1, B[vi - 1] + 1.0, 0.0, dtype="float32")
@T.prim_func
def concatenate_multi_producer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
for i in range(0, 64):
with T.block("A_0"):
vi = T.axis.S(64, i)
A[vi] = vi + 1
for i in range(0, 64):
with T.block("A_1"):
vi = T.axis.S(64, i + 64)
A[vi] = vi + 2
for i in range(0, 128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi] * 2.0
@T.prim_func
def concatenate_multi_producer_uncovered(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
for i in range(0, 63):
with T.block("A_0"):
vi = T.axis.S(63, i)
A[vi] = vi + 1
for i in range(0, 64):
with T.block("A_1"):
vi = T.axis.S(64, i + 64)
A[vi] = vi + 2
for i in range(0, 128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi] * 2.0
@T.prim_func
def lca_at_loop(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
C = T.match_buffer(c, (128,))
for i in range(0, 128):
with T.block("B"):
vi = T.axis.S(128, i)
B[vi] = A[vi] * 2.0
with T.block("C"):
vi = T.axis.S(128, i)
C[vi] = B[vi] + 1.0
@T.prim_func
def multi_producer_consumer(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128,))
B = T.match_buffer(b, (128,))
for i in range(0, 64):
with T.block("A_0"):
vi = T.axis.S(64, i)
A[vi] = vi + 1
for i in range(0, 64):
with T.block("A_1"):
vi = T.axis.S(64, i + 64)
A[vi] = vi + 2
for i in range(0, 64):
with T.block("B_0"):
vi = T.axis.S(64, i)
B[vi] = A[vi] + 2.0
for i in range(0, 64):
with T.block("B_1"):
vi = T.axis.S(64, i + 64)
B[vi] = A[vi] + 3.0
@T.prim_func
def elementwise_affine_producer(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j, k, l in T.grid(16, 2, 32, 16):
with T.block("B"):
vi = T.axis.S(128, i * 8 + j * 4 + k // 8)
vj = T.axis.S(128, k % 8 * 16 + l)
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_subblock(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(32, 32):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([A[vi * 4 : vi * 4 + 4, vj * 4 : vj * 4 + 4]])
T.writes([B[vi * 4 : vi * 4 + 4, vj * 4 : vj * 4 + 4]])
for ii, jj in T.grid(4, 4):
with T.block("B_sub"):
vi_i, vj_i = T.axis.remap("SS", [ii, jj])
B[vi * 4 + vi_i, vj * 4 + vj_i] = A[vi * 4 + vi_i, vj * 4 + vj_i] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def elementwise_subblock_uncovered(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (128, 128), "float32")
C = T.match_buffer(c, (128, 128), "float32")
B = T.alloc_buffer((128, 128), "float32")
for i, j in T.grid(32, 32):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads([A[vi * 4 : vi * 4 + 2, vj * 4 : vj * 4 + 2]])
T.writes([B[vi * 4 : vi * 4 + 2, vj * 4 : vj * 4 + 2]])
for ii, jj in T.grid(2, 2):
with T.block("B_sub"):
vi_i, vj_i = T.axis.remap("SS", [ii, jj])
B[vi * 4 + vi_i, vj * 4 + vj_i] = A[vi * 4 + vi_i, vj * 4 + vj_i] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def bound_to_thread(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
C = T.match_buffer(c, [128, 128])
B = T.alloc_buffer([128, 128], scope="shared")
for i in T.thread_binding(0, 128, thread="threadIdx.x"):
for j in T.serial(0, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vj, vi] = B[vj, vi] + 1.0
@T.prim_func
def equal_ranked_threads(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
C = T.match_buffer(c, [128, 128])
B = T.alloc_buffer([128, 128], scope="shared")
for i_o in T.thread_binding(0, 16, thread="threadIdx.x"):
for i_i in T.thread_binding(0, 8, thread="threadIdx.y"):
for j in T.serial(0, 128):
with T.block("B"):
vi = T.axis.S(128, i_o * 8 + i_i)
vj = T.axis.S(128, j)
B[vi, vj] = A[vi, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
vi = T.axis.S(128, i_o * 8 + i_i)
vj = T.axis.S(128, j)
C[vj, vi] = B[vj, vi] + 1.0
@T.prim_func
def warp_memory(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
C = T.match_buffer(c, [128, 128])
B = T.alloc_buffer([128, 4, 32], scope="warp")
for i_o in T.thread_binding(0, 4, thread="threadIdx.y"):
for i_i in T.thread_binding(0, 32, thread="threadIdx.x"):
for j in T.serial(0, 128):
with T.block("B"):
warp_id, lane_id, vj = T.axis.remap("SSS", [i_o, i_i, j])
B[vj, warp_id, lane_id] = A[warp_id * 32 + lane_id, vj] * 2.0
for j in T.serial(0, 128):
with T.block("C"):
warp_id, lane_id, vj = T.axis.remap("SSS", [i_o, i_i, j])
C[warp_id * 32 + lane_id, vj] = B[vj, warp_id, lane_id] + 1.0
@T.prim_func
def warp_memory_negative(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
C = T.match_buffer(c, [128, 128])
B = T.alloc_buffer([128, 4, 32], scope="warp")
for i_o in T.thread_binding(0, 4, thread="threadIdx.y"):
for i_i in T.thread_binding(0, 32, thread="threadIdx.x"):
for j in T.serial(0, 128):
with T.block("B"):
warp_id, lane_id, vj = T.axis.remap("SSS", [i_o, i_i, j])
B[vj, warp_id, lane_id] = A[warp_id * 32 + lane_id, vj] * 2.0
for i_o_prime in T.thread_binding(0, 4, thread="threadIdx.y"):
for j in T.serial(0, 128):
with T.block("C"):
_warp_id, warp_id, lane_id, vj = T.axis.remap(
"SSSS", [i_o, i_i, i_o_prime, j]
)
C[warp_id * 32 + lane_id, vj] = B[vj, warp_id, lane_id] + 1.0
@T.prim_func
def non_perfect_tiling_cache(a: T.handle, b: T.handle) -> None:
X = T.match_buffer(a, [224, 224], dtype="float32")
Y = T.match_buffer(b, [224, 224], dtype="float32")
cache = T.alloc_buffer([224, 224], dtype="float32")
for hh_0, ww_0 in T.grid(28, 28):
for ax0 in T.serial(0, 10):
for ax1 in T.serial(0, 10):
with T.block("cache"):
h = T.axis.spatial(224, hh_0 * 8 - 1 + ax0)
w = T.axis.spatial(224, ww_0 * 8 - 1 + ax1)
T.where(
1 <= hh_0 * 8 + ax0
and hh_0 * 8 + ax0 < 225
and 1 <= ww_0 * 8 + ax1
and ww_0 * 8 + ax1 < 225
)
cache[h, w] = X[h, w]
for hh_1, ww_1, khh, kww in T.grid(8, 8, 3, 3):
with T.block("compute"):
h = T.axis.spatial(224, hh_0 * 8 + hh_1)
w = T.axis.spatial(224, ww_0 * 8 + ww_1)
kh, kw = T.axis.remap("RR", [khh, kww])
with T.init():
Y[h, w] = 0.0
Y[h, w] = T.max(
Y[h, w],
T.if_then_else(
T.likely(1 <= h + kh, dtype="bool")
and T.likely(h + kh < 225, dtype="bool")
and T.likely(1 <= w + kw, dtype="bool")
and T.likely(w + kw < 225, dtype="bool"),
cache[h + kh - 1, w + kw - 1],
0.0,
dtype="float32",
),
)
@T.prim_func
def uncovered_producer_region(A: T.Buffer((128,), "float32"), B: T.Buffer((128,), "float32")):
for i in range(120):
with T.block("producer"):
vi = T.axis.S((0, 120), i)
A[vi] = 1.0
for i in range(120):
with T.block("consumer"):
vi = T.axis.S((8, 128), i + 8)
B[vi] = A[vi]
@T.prim_func
def matmul_relu_padding(A: T.Buffer((127, 127), "float16"), B: T.Buffer((127, 127), "float16"), compute: T.Buffer((127, 127), "float32")) -> None:
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
C = T.alloc_buffer([127, 127], dtype="float32")
A_reindex = T.alloc_buffer([128, 128], dtype="float16")
B_reindex = T.alloc_buffer([128, 128], dtype="float16")
C_reindex_shared = T.alloc_buffer([128, 128], dtype="float32", scope="shared")
C_reindex_shared_wmma_accumulator = T.alloc_buffer([128, 128], dtype="float32", scope="wmma.accumulator")
for ax0, ax1, ax2 in T.grid(128, 1, 128):
with T.block("A_reindex"):
v0, v1, v2 = T.axis.remap("SSS", [ax0, ax1, ax2])
T.reads(A[v0, v2])
T.writes(A_reindex[v0, v2])
A_reindex[v0, v2] = T.if_then_else(v0 < 127 and v2 < 127, A[v0, v2], T.float16(0), dtype="float16")
for ax0, ax1, ax2 in T.grid(1, 128, 128):
with T.block("B_reindex"):
v0, v1, v2 = T.axis.remap("SSS", [ax0, ax1, ax2])
T.reads(B[v2, v1])
T.writes(B_reindex[v2, v1])
B_reindex[v2, v1] = T.if_then_else(v2 < 127 and v1 < 127, B[v2, v1], T.float16(0), dtype="float16")
for ax0_0_0_ax1_0_0_fused in T.thread_binding(2, thread="blockIdx.y"):
for ax0_0_1_ax1_0_1_fused in T.thread_binding(1, thread="blockIdx.x"):
for ax0_0_2_ax1_0_2_fused in T.thread_binding(16, thread="threadIdx.y"):
for ax2_0_0, ax2_0_1, ax0_0_3, ax1_0_3, ax2_0_2, ax0_0_4, ax1_0_4 in T.grid(2, 2, 1, 2, 2, 1, 1):
with T.block("C_o"):
v0_o = T.axis.spatial(8, ax0_0_2_ax1_0_2_fused // 2 + ax0_0_3 + ax0_0_4)
v1_o = T.axis.spatial(8, ax1_0_4 + ax0_0_0_ax1_0_0_fused * 4 + ax0_0_2_ax1_0_2_fused % 2 * 2 + ax1_0_3)
v2_o = T.axis.reduce(8, ax2_0_0 * 4 + ax2_0_1 * 2 + ax2_0_2)
T.reads(A_reindex[v0_o * 16 : v0_o * 16 + 16, v2_o * 16 : v2_o * 16 + 16], B_reindex[v2_o * 16 : v2_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 : v0_o * 16 + 16, v1_o * 16 : v1_o * 16 + 16])
T.block_attr({"meta_schedule.auto_tensorize":"wmma_sync_16x16x16_f16f16f32", "meta_schedule.auto_tensorize_init":"wmma_fill_16x16x16_f32", "warp_execution":1})
with T.init():
for ax0_1, ax1_1 in T.grid(16, 16):
with T.block("C_init"):
v0_i_init, v1_i_init = T.axis.remap("SS", [ax0_1, ax1_1])
T.reads()
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init])
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i_init, v1_o * 16 + v1_i_init] = T.float32(0)
for ax0_1, ax1_1, ax2_1 in T.grid(16, 16, 16):
with T.block("C"):
v0_i, v1_i, v2_i = T.axis.remap("SSR", [ax0_1, ax1_1, ax2_1])
T.reads(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i], A_reindex[v0_o * 16 + v0_i, v2_o * 16 + v2_i], B_reindex[v2_o * 16 + v2_i, v1_o * 16 + v1_i])
T.writes(C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i])
T.block_attr({"meta_schedule.tiling_structure":"SSSRRSRS"})
C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] = C_reindex_shared_wmma_accumulator[v0_o * 16 + v0_i, v1_o * 16 + v1_i] + T.cast(A_reindex[v0_o * 16 + v0_i, v2_o * 16 + v2_i], "float32") * T.cast(B_reindex[v2_o * 16 + v2_i, v1_o * 16 + v1_i], "float32")
for ax0, ax1 in T.grid(16, 32):
with T.block("C_reindex_shared_wmma.accumulator"):
v0 = T.axis.spatial(128, ax0_0_2_ax1_0_2_fused // 2 * 16 + ax0)
v1 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused * 64 + ax0_0_2_ax1_0_2_fused % 2 * 32 + ax1)
T.reads(C_reindex_shared_wmma_accumulator[v0, v1])
T.writes(C_reindex_shared[v0, v1])
C_reindex_shared[v0, v1] = C_reindex_shared_wmma_accumulator[v0, v1]
for ax0, ax1 in T.grid(128, 64):
with T.block("C_reindex_shared"):
v0 = T.axis.spatial(128, ax0)
v1 = T.axis.spatial(128, ax0_0_0_ax1_0_0_fused * 64 + ax1)
T.where(ax0 < 127 and ax0_0_0_ax1_0_0_fused * 64 + ax1 < 127)
T.reads(C_reindex_shared[v0, v1])
T.writes(C[v0, v1])
T.block_attr({"meta_schedule.cooperative_fetch":3})
C[v0, v1] = C_reindex_shared[v0, v1]
for i0, i1 in T.grid(127, 127):
with T.block("compute"):
i0_1, i1_1 = T.axis.remap("SS", [i0, i1])
T.reads(C[i0_1, i1_1])
T.writes(compute[i0_1, i1_1])
compute[i0_1, i1_1] = T.max(C[i0_1, i1_1], T.float32(0))
@T.prim_func
def splitted_square_sum_with_predicate(
A: T.Buffer((1, 7, 7, 512), "float32"), B: T.Buffer((1, 1, 1, 512), "float32")
) -> None:
for i0_i1_i2_i3_0_fused, ax0, ax1, ax2, ax3 in T.grid(2, 1, 1, 1, 256):
for ax4_ax5_fused_0, ax4_ax5_fused_1 in T.grid(1, 256):
with T.block("B"):
T.where(ax4_ax5_fused_0 * 256 + ax4_ax5_fused_1 < 49)
ax0_1, ax1_1, ax2_1 = T.axis.remap("SSS", [ax0, ax1, ax2])
ax3_1 = T.axis.spatial(512, i0_i1_i2_i3_0_fused * 256 + ax3)
rv0 = T.axis.reduce(7, (ax4_ax5_fused_0 * 256 + ax4_ax5_fused_1) // 7)
rv1 = T.axis.reduce(7, (ax4_ax5_fused_0 * 256 + ax4_ax5_fused_1) % 7)
T.reads(A[ax0_1, ax1_1 * 7 + rv0, ax2_1 * 7 + rv1, ax3_1])
T.writes(B[ax0_1, ax1_1, ax2_1, ax3_1])
with T.init():
B[ax0_1, ax1_1, ax2_1, ax3_1] = T.float32(0)
B[ax0_1, ax1_1, ax2_1, ax3_1] += A[ax0_1, ax1_1 * 7 + rv0, ax2_1 * 7 + rv1, ax3_1]
# pylint: enable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
# fmt: on
def _get_block(s: tir.ScheduleState, name_hint: str) -> tir.StmtSRef:
result = None
def f_visit(node):
nonlocal result
if isinstance(node, tvm.tir.Block) and node.name_hint == name_hint:
result = node
func = s.mod["main"]
post_order_visit(func.body, f_visit)
assert result is not None and isinstance(result, tvm.tir.Block)
return s.get_sref(result)
def test_elementwise():
s = tir.ScheduleState(elementwise, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_matmul():
s = tir.ScheduleState(matmul, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "init")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "update")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_block_in_opaque_block():
s = tir.ScheduleState(block_in_opaque_block, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "E")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "F")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_write_after_read():
s = tir.ScheduleState(write_after_read, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
# pylint: enable=protected-access
def test_loop_carried_dependency():
s = tir.ScheduleState(loop_carried_dependency, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
# pylint: enable=protected-access
def test_concatenate_multi_producer_covered(): # pylint: disable=invalid-name
s = tir.ScheduleState(concatenate_multi_producer, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "A_0")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "A_1")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_concatenate_multi_producer_uncovered(): # pylint: disable=invalid-name
s = tir.ScheduleState(concatenate_multi_producer_uncovered, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "A_0")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "A_1")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
# pylint: enable=protected-access
def test_lca_at_loop():
s = tir.ScheduleState(lca_at_loop, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_multi_producer_consumer():
s = tir.ScheduleState(multi_producer_consumer, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "A_0")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "A_1")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B_0")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B_1")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_elementwise_affine_producer():
s = tir.ScheduleState(elementwise_affine_producer, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_subblock():
s = tir.ScheduleState(elementwise_subblock, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B_sub")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_subblock_uncovered():
s = tir.ScheduleState(elementwise_subblock_uncovered, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B_sub")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_thread_binding():
s = tir.ScheduleState(bound_to_thread, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_equal_ranked_threads():
s = tir.ScheduleState(equal_ranked_threads, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_warp_memory():
s = tir.ScheduleState(warp_memory, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_warp_memory_negative():
s = tir.ScheduleState(warp_memory_negative, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "root")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=False,
)
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "C")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_non_perfect_tiling_cache():
s = tir.ScheduleState(non_perfect_tiling_cache, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "cache")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
assert s._get_cached_flags(_get_block(s, "compute")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_uncovered_producer_region():
s = tir.ScheduleState(uncovered_producer_region, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "consumer")) == CachedFlags(
affine_binding=True,
region_cover=False,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_matmul_relu_padding():
s = tir.ScheduleState(matmul_relu_padding, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "C_reindex_shared")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
def test_splitted_square_sum_with_predicate():
s = tir.ScheduleState(splitted_square_sum_with_predicate, debug_mask="all")
# pylint: disable=protected-access
assert s._get_cached_flags(_get_block(s, "B")) == CachedFlags(
affine_binding=True,
region_cover=True,
stage_pipeline=True,
)
# pylint: enable=protected-access
if __name__ == "__main__":
tvm.testing.main()
| 33,789 | 36.544444 | 307 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_tune_tir.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring,no-member,invalid-name,unused-variable
import logging
import tempfile
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import meta_schedule as ms
from tvm.meta_schedule.testing.custom_builder_runner import run_module_via_rpc
from tvm.meta_schedule.testing.local_rpc import LocalRPC
from tvm.script import tir as T
from tvm.target import Target
from tvm.tir.schedule import BlockRV, Schedule
logging.basicConfig()
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
@T.prim_func
def matmul(a: T.handle, b: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
C = T.match_buffer(c, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def two_step(a: T.handle, c: T.handle) -> None:
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.alloc_buffer((1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j in T.grid(1024, 1024):
with T.block("A"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(1024, 1024):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 3.0
@pytest.mark.skip("Integration test")
@tvm.testing.requires_llvm
def test_tune_matmul_cpu():
with tempfile.TemporaryDirectory() as work_dir:
target = Target("llvm --num-cores=16")
database = ms.tir_integration.tune_tir(
mod=matmul,
target=target,
work_dir=work_dir,
max_trials_global=32,
num_trials_per_iter=16,
)
sch = ms.tir_integration.compile_tir(database, matmul, target)
if sch is None:
print("No valid schedule found!")
else:
sch.mod.show()
sch.trace.show()
@pytest.mark.skip("Integration test")
@tvm.testing.requires_cuda
def test_tune_matmul_cuda():
with tempfile.TemporaryDirectory() as work_dir:
target = Target("nvidia/geforce-rtx-3070")
database = ms.tir_integration.tune_tir(
mod=matmul,
target=target,
work_dir=work_dir,
max_trials_global=32,
num_trials_per_iter=16,
)
sch = ms.tir_integration.compile_tir(database, matmul, target)
if sch is None:
print("No valid schedule found!")
else:
sch.mod.show()
sch.trace.show()
@pytest.mark.skip("Integration test")
def test_tune_run_module_via_rpc():
target = tvm.target.Target("llvm")
rt_mod = tvm.build(matmul, target)
# construct the input
input_data = {}
input_shape = (128, 128)
input_dtype = "float32"
a_np = np.random.uniform(size=input_shape).astype(input_dtype)
b_np = np.random.uniform(size=input_shape).astype(input_dtype)
c_np = np.zeros(input_shape).astype(input_dtype)
for i in range(128):
for j in range(128):
for k in range(128):
c_np[i, j] = c_np[i, j] + a_np[i, k] * b_np[j, k]
input_data["a"] = a_np
input_data["b"] = b_np
input_data["c"] = np.zeros(input_shape).astype(input_dtype)
with LocalRPC() as rpc:
rpc_config = ms.runner.RPCConfig(
tracker_host=rpc.tracker_host,
tracker_port=rpc.tracker_port,
tracker_key=rpc.tracker_key,
session_priority=1,
session_timeout_sec=100,
)
def f_timer(rt_mod, dev, input_data):
rt_mod(input_data["a"], input_data["b"], input_data["c"])
return input_data["c"]
result = run_module_via_rpc(
rpc_config=rpc_config,
lib=rt_mod,
dev_type=target.kind.name,
args=input_data,
continuation=f_timer,
)
tvm.testing.assert_allclose(result.numpy(), c_np, rtol=1e-3)
@pytest.mark.skip("Integration test")
def test_tune_block_cpu():
@ms.derived_object
class RemoveBlock(ms.schedule_rule.PyScheduleRule):
def _initialize_with_tune_context(self, context: ms.TuneContext) -> None:
pass
def apply(self, sch: Schedule, block: BlockRV):
if sch.get(block).name_hint == "root":
return [sch]
sch = sch.copy()
sch.compute_inline(block)
return [sch]
def clone(self) -> "RemoveBlock":
return RemoveBlock()
with tempfile.TemporaryDirectory() as work_dir:
target = Target("llvm --num-cores=16")
database = ms.tir_integration.tune_tir(
mod=two_step,
target=target,
work_dir=work_dir,
max_trials_global=32,
num_trials_per_iter=16,
space=ms.space_generator.PostOrderApply(
f_block_filter=lambda block: block.name_hint == "A",
sch_rules=[RemoveBlock()],
postprocs=[],
mutator_probs={},
),
)
sch = ms.tir_integration.compile_tir(database, two_step, target)
assert sch is not None
sch.mod.show()
sch.trace.show()
if __name__ == """__main__""":
test_tune_matmul_cpu()
test_tune_matmul_cuda()
test_tune_run_module_via_rpc()
test_tune_block_cpu()
| 6,338 | 32.188482 | 81 | py |
tvm | tvm-main/tests/python/unittest/test_auto_scheduler_search_policy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test search policy"""
import random
import multiprocessing
import numpy as np
import tempfile
import tvm
import tvm.testing
from tvm import auto_scheduler
from tvm.auto_scheduler.utils import get_const_tuple
from tvm.testing.auto_scheduler import (
matmul_auto_scheduler_test,
zero_rank_compute_auto_scheduler_test,
zero_rank_reduce_auto_scheduler_test,
)
import multiprocessing
class CustomMeasureCallback(auto_scheduler.measure.PythonBasedMeasureCallback):
"""A simple Python-based callback for testing."""
def callback(self, policy, inputs, results):
assert isinstance(policy, auto_scheduler.search_policy.SearchPolicy)
for inp, res in zip(inputs, results):
assert isinstance(inp, auto_scheduler.MeasureInput)
assert isinstance(res, auto_scheduler.MeasureResult)
def search_common(
task=None,
target="llvm",
search_policy="sketch",
runner="local",
num_measure_trials=100,
cost_model=auto_scheduler.RandomModel(),
init_search_callbacks=None,
):
if task is None:
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(64, 64, 64), target=target
)
target = task.target
print("Test search policy '%s' for '%s'" % (search_policy, target))
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
init_search_callbacks = init_search_callbacks or []
init_search_callbacks.append(auto_scheduler.PreloadMeasuredStates(log_file))
if search_policy == "empty":
search_policy = auto_scheduler.EmptyPolicy(task)
elif search_policy == "sketch":
search_policy = auto_scheduler.SketchPolicy(
task, program_cost_model=cost_model, init_search_callbacks=init_search_callbacks
)
else:
raise ValueError("Invalid policy: " + search_policy)
# Tune
tuning_options = auto_scheduler.TuningOptions(
num_measure_trials=num_measure_trials,
num_measures_per_round=2,
early_stopping=1,
runner=runner,
measure_callbacks=[auto_scheduler.RecordToFile(log_file), CustomMeasureCallback()],
)
task.tune(tuning_options=tuning_options, search_policy=search_policy)
# Compile with the best schedule
sch, args = task.apply_best(log_file)
mod = tvm.build(sch, args, target)
# Compile with naive schedule for correctness check
sch, args = task.compute_dag.apply_steps_from_state(task.compute_dag.init_state)
mod_ref = tvm.build(sch, args, "llvm")
ctx = tvm.device(str(target), 0)
np_arrays = [np.random.uniform(size=get_const_tuple(x.shape)).astype(x.dtype) for x in args]
tvm_arrays = [tvm.nd.array(x, ctx) for x in np_arrays]
mod(*tvm_arrays)
actual = [x.numpy() for x in tvm_arrays]
tvm_arrays = [tvm.nd.array(x) for x in np_arrays]
mod_ref(*tvm_arrays)
expected = [x.numpy() for x in tvm_arrays]
for x, y in zip(actual, expected):
tvm.testing.assert_allclose(x, y, rtol=1e-5)
@tvm.testing.requires_llvm
def test_workload_registry_empty_policy():
search_common(search_policy="empty", num_measure_trials=2)
N = 64
target = "llvm"
search_common(
task=auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test", args=(N, N, N), target=target
),
num_measure_trials=2,
search_policy="empty",
)
search_common(
task=auto_scheduler.SearchTask(
func="matmul_auto_scheduler_test_rename_1", args=(N, N, N), target=target
),
num_measure_trials=2,
search_policy="empty",
)
@tvm.testing.requires_llvm
def test_sketch_search_policy_basic():
search_common()
def sketch_search_policy_basic_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_sketch_search_policy_basic()
@tvm.testing.requires_llvm
def test_sketch_search_policy_basic_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=sketch_search_policy_basic_spawn)
p.start()
p.join()
@tvm.testing.requires_llvm
def test_sketch_search_policy_xgbmodel():
search_common(cost_model=auto_scheduler.XGBModel())
@tvm.testing.requires_cuda
def test_sketch_search_policy_cuda_rpc_runner():
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
search_common(target="cuda", runner=measure_ctx.runner)
@tvm.testing.requires_cuda
def test_sketch_search_policy_cuda_xgbmodel_rpc_runner():
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
search_common(target="cuda", runner=measure_ctx.runner, cost_model=auto_scheduler.XGBModel())
@tvm.testing.requires_llvm
@tvm.testing.requires_cuda
def test_sketch_search_policy_zero_rank():
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
for target in ["llvm", "cuda"]:
task = auto_scheduler.SearchTask(
func=zero_rank_compute_auto_scheduler_test, args=(10,), target=target
)
search_common(task, runner=measure_ctx.runner)
task = auto_scheduler.SearchTask(
func=zero_rank_reduce_auto_scheduler_test, args=(10,), target=target
)
search_common(task, runner=measure_ctx.runner)
@tvm.testing.requires_llvm
def test_sketch_search_policy_custom_sketch():
def meet_condition_func(search_policy, state, stage_id):
return auto_scheduler.PreloadCustomSketchRule.APPLY_AND_SKIP_REST
def apply_func(search_policy, state, stage_id):
ret = []
state = auto_scheduler.loop_state.State(state, search_policy.search_task.compute_dag)
C = state.stage_ops[2]
ret.append([state.state_object, -1])
s1 = state.copy()
i, _, _ = s1[C].iters
s1.split(C, i, [8])
ret.append([s1.state_object, -1])
return ret
search_common(
cost_model=auto_scheduler.XGBModel(),
init_search_callbacks=[
auto_scheduler.PreloadCustomSketchRule(meet_condition_func, apply_func)
],
)
if __name__ == "__main__":
test_workload_registry_empty_policy()
test_sketch_search_policy_basic()
test_sketch_search_policy_basic_spawn()
test_sketch_search_policy_xgbmodel()
test_sketch_search_policy_cuda_rpc_runner()
test_sketch_search_policy_cuda_xgbmodel_rpc_runner()
test_sketch_search_policy_zero_rank()
test_sketch_search_policy_custom_sketch()
| 7,301 | 32.040724 | 100 | py |
tvm | tvm-main/tests/python/unittest/test_ir_attrs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import pytest
import tvm.ir._ffi_api
def test_make_attrs():
with pytest.raises(AttributeError):
x = tvm.ir.make_node("attrs.TestAttrs", unknown_key=1, name="xx")
with pytest.raises(AttributeError):
x = tvm.ir.make_node("attrs.TestAttrs", axis=100, name="xx")
x = tvm.ir.make_node("attrs.TestAttrs", name="xx", padding=(3, 4))
assert x.name == "xx"
assert x.padding[0].value == 3
assert x.padding[1].value == 4
assert x.axis == 10
def test_dict_attrs():
dattr = tvm.ir.make_node("DictAttrs", x=1, y=10, name="xyz", padding=(0, 0))
assert dattr.x.value == 1
datrr = tvm.ir.load_json(tvm.ir.save_json(dattr))
assert dattr.name == "xyz"
assert isinstance(dattr, tvm.ir.DictAttrs)
assert "name" in dattr
assert dattr["x"].value == 1
assert len(dattr) == 4
assert len([x for x in dattr.keys()]) == 4
assert len(dattr.items()) == 4
def test_attrs_equal():
dattr0 = tvm.ir.make_node("DictAttrs", x=1, y=[10, 20])
dattr1 = tvm.ir.make_node("DictAttrs", y=[10, 20], x=1)
dattr2 = tvm.ir.make_node("DictAttrs", x=1, y=None)
assert tvm.ir.structural_equal(dattr0, dattr1)
assert not tvm.ir.structural_equal(dattr0, dattr2)
assert not tvm.ir.structural_equal({"x": 1}, tvm.runtime.convert(1))
assert not tvm.ir.structural_equal([1, 2], tvm.runtime.convert(1))
if __name__ == "__main__":
test_make_attrs()
test_dict_attrs()
test_attrs_equal()
| 2,259 | 34.873016 | 80 | py |
tvm | tvm-main/tests/python/relay/test_analysis_extract_fused_functions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test function extraction"""
import tvm
from tvm import relay
from tvm.relay.testing.synthetic import get_workload
def get_conv_net():
"""This gets the net for a case described in fuse_ops.cc:
conv2d
/ | \
/ | \
op op op
\ | /
\ | /
elemwise add
|
"""
dshape = (1, 1, 5, 1)
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x2 = relay.nn.conv2d(y, relay.var("w3"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x3 = relay.nn.conv2d(y, relay.var("w4"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(x1, x2)
z = relay.add(x3, z)
return tvm.IRModule.from_expr(z)
def get_conv2d():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
return tvm.IRModule.from_expr(y)
def test_extract_identity():
mod = get_conv2d()
items = relay.analysis.extract_fused_functions(mod)
assert len(items) == 1
mod["main"] = mod["main"].with_attr("Primitive", tvm.tir.IntImm("int32", 1))
tvm.ir.structural_equal(list(items.values())[0], mod["main"])
def test_extract_conv_net():
mod = get_conv_net()
items = relay.analysis.extract_fused_functions(mod)
functions = list(items.values())
assert len(functions) == 2
x = functions[0]
y = functions[1]
def is_conv(func):
conv2d = relay.op.op.get("nn.conv2d")
call_node = func.body
return call_node.op == conv2d
def is_conv_add(func):
add = relay.op.op.get("add")
call_node = func.body
maybe_conv_module = tvm.IRModule.from_expr(call_node.args[0])
return call_node.op == add and is_conv(maybe_conv_module["main"])
# Function traversal order isn't obvious, so checking both orders is more consistent
assert (is_conv(x) and is_conv_add(y)) or (is_conv_add(x) and is_conv(y))
def test_extract_resnet():
mod, _params = get_workload()
items = relay.analysis.extract_fused_functions(mod)
assert len(items) == 7
if __name__ == "__main__":
test_extract_identity()
test_extract_conv_net()
test_extract_resnet()
| 3,345 | 30.566038 | 92 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import run_infer_type
from tvm.contrib import graph_executor
from tvm.relay.testing.temp_op_attr import TempOpAttr
# We use llvm target for testing functionality. `llvm` points to an older Intel
# generation machine, that legalizes to a simple lowering. Therefore, the
# legalization is overwritten such that it can be skipped and we use the
# QNNCanonicalizeOps lowering for the testing.
def legalize_qnn_conv2d(attrs, inputs, types):
return None
def get_ref_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups,
channels=None,
):
if isinstance(input_zero_point, (int, float)):
input_zero_point = relay.const(input_zero_point, "int32")
if isinstance(kernel_zero_point, (int, float)):
kernel_zero_point = relay.const(kernel_zero_point, "int32")
else:
# Kernel zero point expression requires manual broadcasting for some layouts.
if kernel_layout == "OIHW":
kernel_zero_point = relay.reshape(kernel_zero_point, [-1, 1, 1, 1])
elif kernel_layout == "HWOI":
kernel_zero_point = relay.reshape(kernel_zero_point, [1, 1, -1, 1])
casted_data = relay.op.cast(data, "int32")
casted_kernel = relay.op.cast(kernel, "int32")
shifted_data = relay.op.subtract(casted_data, input_zero_point)
shifted_kernel = relay.op.subtract(casted_kernel, kernel_zero_point)
func = relay.op.nn.conv2d(
shifted_data,
shifted_kernel,
padding=padding,
strides=strides,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
out_dtype=out_dtype,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
func = relay.Function(relay.analysis.free_vars(func), func)
return func
def get_qnn_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
channels,
groups,
):
if isinstance(input_zero_point, (int, float)):
input_zero_point = relay.const(input_zero_point, "int32")
if isinstance(kernel_zero_point, (int, float)):
kernel_zero_point = relay.const(kernel_zero_point, "int32")
func = relay.qnn.op.conv2d(
data,
kernel,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=relay.const(input_scale, "float32"),
kernel_scale=relay.const(kernel_scale, "float32"),
kernel_size=kernel_size,
strides=strides,
dilation=dilation,
padding=padding,
out_dtype=out_dtype,
groups=groups,
channels=channels,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
mod = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(mod)
return mod
def get_funcs(
data_shape,
data_dtype,
kernel_shape,
kernel_dtype,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups=1,
channels=None,
):
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
ref_func = get_ref_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups,
channels,
)
ref_func = run_infer_type(ref_func)
ref_func = tvm.IRModule.from_expr(ref_func)
qnn_func = get_qnn_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
channels,
groups,
)
return (ref_func, qnn_func)
def verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype):
def get_inputs(data_shape, data_dtype, kernel_shape, kernel_dtype):
# Keeping inputs multiple of 4 because of a bug in Average Pool2d
# https://discuss.tvm.apache.org/t/pool2d-gives-bad-output-for-integer-inputs/3377
low = -128
high = 127
if data_dtype == "uint8":
low = 0
high = 255
golden_data = np.random.randint(low=low, high=high, size=data_shape).astype(data_dtype)
low = -128
high = 127
if kernel_dtype == "uint8":
low = 0
high = 255
golden_weight = np.random.randint(low=low, high=high, size=kernel_shape).astype(
kernel_dtype
)
return (golden_data, golden_weight)
def get_output(func, golden_inputs):
with tvm.transform.PassContext(opt_level=2):
golden_data, golden_weight = golden_inputs
params = {"kernel": golden_weight}
graph, lib, params = relay.build(func, "llvm", params=params)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**params)
mod.run()
res = mod.get_output(0).numpy()
return res
golden_inputs = get_inputs(data_shape, data_dtype, kernel_shape, kernel_dtype)
golden_output = get_output(ref_func, golden_inputs)
qnn_output = get_output(qnn_func, golden_inputs)
np.testing.assert_equal(qnn_output, golden_output)
def test_no_zero_point():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input
data_shape = (2, 1, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_kernel_zero_point():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=1,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input
data_shape = (2, 1, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=5,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_input_zero_point():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_both_zero_point():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_dynamic_zero_point():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input with non static zero points.
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
input_zero_point = relay.op.multiply(
relay.const(2, dtype="int32"), relay.const(2, dtype="int32")
)
kernel_zero_point = relay.const(np.random.randint(10, size=[3]), "int32")
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_layout():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 2, 4, 4) # NHWC
data_dtype = "uint8"
kernel_shape = (2, 2, 4, 3) # HWIO
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# NHWC and HWOI layout. Used in depthwise conv.
data_shape = (2, 2, 4, 3) # NHWC
data_dtype = "uint8"
kernel_shape = (2, 2, 3, 1) # HWOI
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
groups=3,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_padding():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (1, 4, 2, 2)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=5,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# Try different layout
data_shape = (2, 2, 4, 4) # NHWC
data_dtype = "uint8"
kernel_shape = (2, 2, 4, 3) # HWIO
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# Try asymmetric padding
data_shape = (2, 2, 4, 4) # NHWC
data_dtype = "uint8"
kernel_shape = (2, 2, 4, 3) # HWIO
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1, 2, 2),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_dilation():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# Non-zero kernel point - fall back to simpler lowering.
data_shape = (2, 4, 4, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(2, 2),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# Zero kernel point
data_shape = (2, 4, 4, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(2, 2),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_const_folding():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 2, 2)
kernel_dtype = "uint8"
golden_weight = np.random.randint(low=0, high=255, size=kernel_shape).astype(kernel_dtype)
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.const(golden_weight)
qnn_func = get_qnn_func(
data,
kernel,
input_zero_point=8,
kernel_zero_point=3,
kernel_size=(2, 2),
input_scale=1.0,
kernel_scale=1.0,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
channels=kernel_shape[0],
groups=1,
)
folded_mod = transform.FoldConstant()(qnn_func)
folded_func = folded_mod["main"]
assert "reshape" not in folded_func.astext()
def test_kernel_size_1x1():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 1, 1)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
assert "avg_pool2d" not in qnn_func.astext()
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_kernel_size_1x1_strides_2():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 4, 1, 1)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(1, 1),
padding=(0, 0),
strides=(2, 2),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
assert "avg_pool2d" not in qnn_func.astext()
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_tflite_large_irregular():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (1, 1024, 1, 1)
data_dtype = "uint8"
kernel_shape = (1001, 1024, 1, 1)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=127,
kernel_zero_point=127,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
golden_data = np.full(data_shape, 127).astype("uint8")
golden_weight = np.full(kernel_shape, 127).astype("uint8")
with tvm.transform.PassContext(opt_level=2):
params = {"kernel": golden_weight}
graph, lib, params = relay.build(qnn_func, "llvm", params=params)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**params)
mod.run()
qnn_output = mod.get_output(0).numpy()
golden_output = np.full((1, 1001, 1, 1), 0).astype("uint8")
np.testing.assert_equal(qnn_output, golden_output)
def test_tflite_output_multiplier_greater_than_one():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_scale=1.0,
kernel_scale=1.0,
input_zero_point=128,
kernel_zero_point=128,
kernel_size=(2, 2),
padding=(0, 0),
strides=(2, 2),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
golden_data = 128 + np.array((1, 1, 1, 1, 2, 2, 2, 2, 1, 2, 3, 4, 1, 2, 3, 4)).reshape(
data_shape
).astype("uint8")
golden_weight = 128 + np.array((1, 2, 3, 4, -1, 1, -1, 1, -1, -1, 1, 1)).reshape(
kernel_shape
)
golden_weight = golden_weight.astype("uint8")
with tvm.transform.PassContext(opt_level=2):
params = {"kernel": golden_weight}
graph, lib, params = relay.build(qnn_func, "llvm", params=params)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**params)
mod.run()
qnn_output = mod.get_output(0).numpy()
golden_output = np.array((17, 17, 0, 0, 2, 2, 16, 36, 2, 2, 0, 0)).reshape(2, 3, 1, 2)
np.testing.assert_equal(qnn_output, golden_output)
def test_tflite_anistropic_strides():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input
data_shape = (1, 1, 3, 6)
data_dtype = "uint8"
kernel_shape = (1, 1, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=127,
kernel_zero_point=127,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 3),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
golden_data = np.array(
(
133,
131,
129,
125,
123,
121,
135,
133,
131,
123,
121,
119,
137,
135,
133,
121,
119,
117,
)
).reshape(data_shape)
golden_data = golden_data.astype("uint8")
golden_weight = np.array((129, 131, 133, 135)).reshape(kernel_shape)
golden_weight = golden_weight.astype("uint8")
with tvm.transform.PassContext(opt_level=2):
params = {"kernel": golden_weight}
graph, lib, params = relay.build(qnn_func, "llvm", params=params)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**params)
mod.run()
qnn_output = mod.get_output(0).numpy()
golden_output = np.array((124, -92, 164, -132)).reshape(1, 1, 2, 2)
np.testing.assert_equal(qnn_output, golden_output)
def test_broadcast_layout():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# Test broadcast support for NHWC layout.
data_shape = (1, 229, 229, 3) # NHWC
data_dtype = "uint8"
kernel_shape = (7, 7, 3, 64) # HWIO
kernel_dtype = "int8"
_, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(7, 7),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
func = qnn_func["main"].body
bias = relay.var("bias", shape=(64,), dtype="int32")
bias2 = relay.var("bias2", shape=(1, 225, 225, 1), dtype="int32")
# Check broadcast support on both lhs and rhs
func = relay.add(func, bias2)
func = relay.add(bias2, func)
func = relay.add(bias, func)
func = relay.add(func, bias)
func = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(func)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm -mcpu=skylake-avx512")
def test_depthwise_depth_multiplier():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
# uint8 input, NCHW and OIHW
# Depthwise multiplier = 1
data_shape = (2, 4, 16, 16)
data_dtype = "uint8"
kernel_shape = (4, 1, 3, 3)
kernel_dtype = "uint8"
input_zero_point = relay.op.multiply(
relay.const(2, dtype="int32"), relay.const(2, dtype="int32")
)
kernel_zero_point = relay.const(np.random.randint(10, size=[4]), "int32")
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(3, 3),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
groups=4,
channels=4,
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# Depthwise multiplier = 2
data_shape = (10, 4, 16, 16)
data_dtype = "uint8"
kernel_shape = (4, 2, 3, 3)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(3, 3),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
groups=4,
channels=8,
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# uint8 input, NHWC and HWOI
# Depthwise multiplier = 1
data_shape = (2, 16, 16, 4)
data_dtype = "uint8"
kernel_shape = (3, 3, 4, 1)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(3, 3),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
groups=4,
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# Depthwise multiplier = 2
data_shape = (2, 16, 16, 4)
data_dtype = "uint8"
kernel_shape = (3, 3, 4, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(3, 3),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
groups=4,
channels=8,
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_per_channel_kernel_scale():
with TempOpAttr("qnn.conv2d", "FTVMQnnLegalize", legalize_qnn_conv2d):
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (3, 1, 2, 2)
kernel_dtype = "uint8"
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
kernel_scales = [2, 2, 2]
kernel_scales = relay.const(np.array(kernel_scales).astype("float32"))
func = relay.qnn.op.conv2d(
data,
kernel,
input_zero_point=relay.const(0, "int32"),
kernel_zero_point=relay.const(0, "int32"),
input_scale=relay.const(2.0, "float32"),
kernel_scale=kernel_scales,
kernel_size=(2, 2),
channels=kernel_shape[0],
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32",
)
mod = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(mod)
if __name__ == "__main__":
test_no_zero_point()
test_input_zero_point()
test_kernel_zero_point()
test_both_zero_point()
test_layout()
test_padding()
test_dilation()
test_const_folding()
test_kernel_size_1x1()
test_kernel_size_1x1_strides_2()
test_tflite_large_irregular()
test_broadcast_layout()
test_tflite_output_multiplier_greater_than_one()
test_tflite_anistropic_strides()
test_depthwise_depth_multiplier()
test_per_channel_kernel_scale()
| 36,415 | 31.630824 | 98 | py |
tvm | tvm-main/tests/python/relay/test_ir_well_formed.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay.analysis import well_formed
from tvm.relay.prelude import Prelude
def test_let():
x = relay.Var("x")
assert well_formed(x)
v = relay.Constant(tvm.nd.array(10))
ty = None
let = relay.Let(x, v, x)
assert well_formed(let)
assert not well_formed(relay.Let(x, v, let))
f = relay.Function([x], x, ty)
assert well_formed(f)
assert well_formed(relay.Let(relay.Var("y"), f, relay.Let(relay.Var("z"), f, v)))
def test_tuple():
x = relay.Var("x")
assert well_formed(x)
v = relay.Constant(tvm.nd.array(10))
let = relay.Let(x, v, x)
assert well_formed(let)
assert well_formed(relay.Tuple([v, v]))
assert not well_formed(relay.Tuple([let, relay.Let(x, v, x)]))
def test_tuple_get_item():
t = relay.Var("t")
assert well_formed(relay.TupleGetItem(t, 2))
def test_adt():
mod = tvm.IRModule()
p = Prelude(mod)
_, none, some = p.mod.get_type("Option")
x = relay.Var("x")
some_case = relay.Clause(relay.PatternConstructor(some, [relay.PatternVar(x)]), x)
default_case = relay.Clause(relay.PatternVar(x), x)
m0 = relay.Match(none(), [default_case])
m1 = relay.Match(none(), [some_case, default_case])
assert well_formed(m0)
assert not well_formed(m1)
if __name__ == "__main__":
test_let()
test_tuple()
test_tuple_get_item()
test_adt()
| 2,208 | 30.557143 | 86 | py |
tvm | tvm-main/tests/python/relay/test_op_level2.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level2 operator test cases.
"""
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import autotvm, relay, te
from tvm.contrib import utils, cudnn
from tvm.ir.module import IRModule
from tvm.relay import transform
from tvm.relay.testing import run_infer_type
from tvm.topi.cuda.conv3d_winograd import _infer_tile_size
executor_kind = tvm.testing.parameter("graph", "vm")
@tvm.testing.uses_gpu
def test_conv1d_infer_type():
# symbolic in batch dimension
n, c, w = te.var("n"), 10, 224
x = relay.var("x", relay.ty.TensorType((n, c, w), "float32"))
w = relay.var("w")
y = relay.nn.conv1d(x, w, kernel_size=3, padding=(1, 1), channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType((2, 10, 3), "float32")
# infer by shape of w, mixed precision
n, c, w = te.var("n"), 10, 224
x = relay.var("x", relay.TensorType((n, c, w), "int8"))
w = relay.var("w", relay.TensorType((2, 10, 3), "int8"))
y = relay.nn.conv1d(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 222), "int32")
# infer shape in case of different dtypes for input and weight.
n, c, w = te.var("n"), 10, 224
x = relay.var("x", relay.TensorType((n, c, w), "uint8"))
w = relay.var("w", relay.TensorType((2, 10, 3), "int8"))
y = relay.nn.conv1d(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 222), "int32")
# Infer with NWC
n, c, w = 4, 32, 224
x = relay.var("x", relay.TensorType((n, w, c), "int8"))
wt = relay.var("w")
y = relay.nn.conv1d(
x, wt, kernel_size=3, padding=(1, 1), channels=16, data_layout="NWC", out_dtype="int32"
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, w, 16), "int32")
@tvm.testing.uses_gpu
def test_conv1d_run():
def run_test_conv1d(
dtype,
out_dtype,
scale,
dshape,
kshape,
padding=(1, 1),
fref=None,
dilation=1,
except_targets=None,
**attrs,
):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv1d(x, w, padding=padding, dilation=dilation, **attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
ref_res = tvm.topi.testing.conv1d_ncw_python(
data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, dilation
)
for target, dev in tvm.testing.enabled_targets():
if target in except_targets:
continue
dev = tvm.device(target, 0)
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
# normal conv1d
dshape = (1, 3, 224)
kshape = (10, 3, 3)
run_test_conv1d(
"float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3
)
# mixed precision
run_test_conv1d("int8", "int32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=3)
# dilated conv2d
dshape = (1, 3, 18)
kshape = (10, 3, 3)
run_test_conv1d(
"float32",
"float32",
1,
dshape,
kshape,
padding=(1, 1),
channels=10,
kernel_size=3,
dilation=3,
)
@tvm.testing.uses_gpu
def test_conv2d_infer_type():
# symbolic in batch dimension
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32"))
w = relay.var("w")
y = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 224, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType((2, 10, 3, 3), "float32")
# infer by shape of w, mixed precision
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
y = relay.nn.conv2d(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 222, 222), "int32")
# infer shape in case of different dtypes for input and weight.
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "uint8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8"))
y = relay.nn.conv2d(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 222, 222), "int32")
# Infer with a different layout
n, c, h, w = 4, 32, 224, 224
x = relay.var("x", relay.TensorType((n // 4, c // 4, h, w, 4, 4), "int8"))
wt = relay.var("w")
y = relay.nn.conv2d(
x,
wt,
kernel_size=(3, 3),
padding=(1, 1),
channels=16,
data_layout="NCHW4n4c",
kernel_layout="OIHW4o4i",
out_dtype="int32",
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 4, 224, 224, 4, 4), "int32")
assert yy.args[1].checked_type == relay.TensorType((4, 8, 3, 3, 4, 4), "int8")
# Infer with NHWC
n, c, h, w = 4, 32, 224, 224
x = relay.var("x", relay.TensorType((n, h, w, c), "int8"))
wt = relay.var("w")
y = relay.nn.conv2d(
x,
wt,
kernel_size=(3, 3),
padding=(1, 1),
channels=16,
data_layout="NHWC",
out_dtype="int32",
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, h, w, 16), "int32")
class TestConv2D:
config = {
"group1": dict(
dtype="float32",
out_dtype="float32",
scale=1,
dshape=(1, 32, 18, 18),
kshape=(32, 4, 3, 3),
padding=(1, 1),
channels=32,
groups=8,
kernel_size=(3, 3),
dilation=(1, 1),
),
"group2": dict(
dtype="float32",
out_dtype="float32",
scale=1,
dshape=(1, 32, 18, 18),
kshape=(64, 1, 3, 3),
padding=(1, 1),
channels=64,
groups=32,
kernel_size=(3, 3),
dilation=(1, 1),
),
"normal": dict(
dtype="float32",
out_dtype="float32",
scale=1,
dshape=(1, 3, 224, 224),
kshape=(10, 3, 3, 3),
padding=(1, 1),
channels=10,
groups=1,
kernel_size=(3, 3),
dilation=(1, 1),
),
"mixed_precision_int8_int32_case1": dict(
dtype="int8",
out_dtype="int32",
scale=1,
dshape=(1, 3, 224, 224),
kshape=(10, 3, 3, 3),
padding=(1, 1),
channels=10,
groups=1,
kernel_size=(3, 3),
dilation=(1, 1),
),
"mixed_precision_int8_int32_case2": dict(
dtype="int8",
out_dtype="int32",
scale=1,
dshape=(1, 3, 224, 224),
kshape=(10, 3, 1, 3),
padding=(0, 1),
channels=10,
groups=1,
kernel_size=(1, 3),
dilation=(1, 1),
),
"dilated": dict(
dtype="float32",
out_dtype="float32",
scale=1,
dshape=(1, 3, 18, 18),
kshape=(10, 3, 3, 3),
padding=(1, 1),
channels=10,
groups=1,
kernel_size=(3, 3),
dilation=(3, 3),
),
}
# TODO(Lunderberg): Make a cleaner utility for this type of
# parametrization. It would be much nicer to have the fixture
# name come from the dictionaries themselves, rather than needing
# to be re-packed into tuples.
(
dtype,
out_dtype,
scale,
dshape,
kshape,
padding,
channels,
groups,
kernel_size,
dilation,
) = tvm.testing.parameters(
*[
[
d[p]
for p in [
"dtype",
"out_dtype",
"scale",
"dshape",
"kshape",
"padding",
"channels",
"groups",
"kernel_size",
"dilation",
]
]
for d in config.values()
],
ids=config.keys(),
)
def test_run(
self,
target,
dev,
dtype,
out_dtype,
scale,
dshape,
kshape,
padding,
groups,
dilation,
channels,
kernel_size,
):
target = tvm.target.Target(target)
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(
x,
w,
padding=padding,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
)
func = relay.Function([x, w], y)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
ref_res = tvm.topi.testing.conv2d_nchw_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups
)
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-4, atol=1e-4)
def test_compile_depthwise_conv2d_arm_cpu():
dtype = "float32"
out_dtype = "float32"
scale = 1
dshape = (1, 512, 32, 32)
kshape = (512, 1, 3, 3)
padding = (1, 1)
channels = 512
groups = 512
kernel_size = (3, 3)
dilation = (1, 1)
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(
x,
w,
padding=padding,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
)
func = relay.Function([x, w], y)
mod = tvm.IRModule()
mod["main"] = func
test_schedule = '{"i": ["llvm -device=arm_cpu", "depthwise_conv2d_nchw_spatial_pack.arm_cpu", \
[["TENSOR", [1, 512, 32, 32], "float32"], \
["TENSOR", [512, 1, 3, 3], "float32"], \
[1, 1], [1, 1], [1, 1], "float32"], {}, \
["depthwise_conv2d_nchw_spatial_pack.arm_cpu", [1, 512, 32, 32, "float32"], \
[512, 1, 3, 3, "float32"], [1, 1], [1, 1], [1, 1], "float32"], \
{"i": 743640, "t": "", "c": null, \
"e": [["tile_co", "sp", [32, 16]], ["tile_oh", "sp", [8, 1]], \
["tile_ow", "sp", [1, 8]], \
["reorder_0", "re", [0, 1, 2, 3, 4, 5, 8, 6, 7]], \
["reorder_1", "re", [0, 1, 2, 3, 6, 4, 5]], \
["ann_reduce", "an", ["unroll", "none"]], \
["ann_spatial", "an", ["unroll", "unroll", "vec"]], \
["data_pad_inline", "ot", 4], ["data_vec_inline", "ot", 1], \
["conv_inline", "ot", 0]]}], "r": [[0.0002933163], \
0, 3.1976189613342285, 1570811630.6058347], "v": 0.1}'
temp = utils.tempdir()
with open(temp.relpath("temp.log"), "w") as log_file:
log_file.write(test_schedule)
with autotvm.apply_history_best(temp.relpath("temp.log")):
with tvm.transform.PassContext(opt_level=3):
print("Compiling...")
graph_json, mod, params = tvm.relay.build(mod, target="llvm -device=arm_cpu")
@tvm.testing.uses_gpu
def test_conv2d_winograd():
class WinogradFallback(autotvm.FallbackContext):
def _query_inside(self, target, workload):
key = (target, workload)
if key in self.memory:
return self.memory[key]
cfg = autotvm.task.space.FallbackConfigEntity()
cfg.is_fallback = False
cfg.cost = 0.1 if "winograd" in workload[0] else 1
cfg["tile_b"] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg["tile_y"] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg["tile_x"] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg["tile_rc"] = autotvm.task.space.SplitEntity([-1, 1])
cfg["auto_unroll_max_step"] = autotvm.task.space.OtherOptionEntity(1500)
cfg["unroll_explicit"] = autotvm.task.space.OtherOptionEntity(1)
self.memory[key] = cfg
return cfg
def run_test_conv2d_cuda(
dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs
):
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)
func = relay.Function([x, w], y)
mod = tvm.IRModule()
mod["main"] = func
mod = relay.transform.InferType()(mod)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
ref_res = tvm.topi.testing.conv2d_nchw_python(
data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups
)
with WinogradFallback(), tvm.transform.PassContext(opt_level=3):
for target, dev in tvm.testing.enabled_targets():
if target != "cuda":
continue
dev = tvm.device(target, 0)
params = {"w": tvm.nd.array(kernel)}
graph, lib, params = relay.build_module.build(mod, target=target, params=params)
module = tvm.contrib.graph_executor.create(graph, lib, dev)
module.set_input("x", tvm.nd.array(data))
module.set_input(**params)
module.run()
op_res1 = module.get_output(0)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-3, atol=1e-3)
# normal winograd: stride 1, padding 1, kernel 3x3
dshape = (1, 80, 73, 73)
kshape = (192, 80, 3, 3)
run_test_conv2d_cuda(
"float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=192, kernel_size=(3, 3)
)
# extended winograd: stride 1, padding N, kernel 3x3
run_test_conv2d_cuda(
"float32", "float32", 1, dshape, kshape, padding=(0, 0), channels=192, kernel_size=(3, 3)
)
run_test_conv2d_cuda(
"float32", "float32", 1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(3, 3)
)
# extended winograd: stride 1, padding N, kernel NxN
kshape = (192, 80, 7, 7)
run_test_conv2d_cuda(
"float32", "float32", 1, dshape, kshape, padding=(2, 2), channels=192, kernel_size=(7, 7)
)
@tvm.testing.uses_gpu
def test_conv3d_infer_type():
# symbolic in batch dimension
n, c, d, h, w = te.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, d, h, w), "float32"))
w = relay.var("w")
y = relay.nn.conv3d(x, w, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 224, 224, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType((2, 10, 3, 3, 3), "float32")
# infer by shape of w, mixed precision
n, c, d, h, w = te.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8"))
y = relay.nn.conv3d(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 222, 222, 222), "int32")
# infer shape in case of different dtypes for input and weight.
n, c, d, h, w = te.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "uint8"))
w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8"))
y = relay.nn.conv3d(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 222, 222, 222), "int32")
# Infer with NDHWC
n, c, d, h, w = 4, 32, 224, 224, 224
x = relay.var("x", relay.TensorType((n, d, h, w, c), "int8"))
wt = relay.var("w")
y = relay.nn.conv3d(
x,
wt,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
channels=16,
data_layout="NDHWC",
out_dtype="int32",
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, d, h, w, 16), "int32")
# Infer with groups
x = relay.var("x", relay.TensorType((1, 16, 224, 224, 224), "float32"))
w = relay.var("w", relay.TensorType((4, 4, 1, 1, 1), "float32"))
y = relay.nn.conv3d(x, w, groups=4, kernel_size=(1, 1, 1), channels=4)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 4, 224, 224, 224), "float32")
@tvm.testing.uses_gpu
def test_conv3d_run():
def run_test_conv3d(
dtype,
out_dtype,
scale,
dshape,
kshape,
padding=(1, 1, 1),
fref=None,
groups=1,
dilation=(1, 1, 1),
except_targets=None,
**attrs,
):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
ref_res = tvm.topi.testing.conv3d_ncdhw_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups
)
else:
ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))
for target, dev in tvm.testing.enabled_targets():
if target in except_targets:
continue
dev = tvm.device(target, 0)
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
# normal conv3d
dshape = (1, 3, 5, 224, 224)
kshape = (10, 3, 3, 3, 3)
run_test_conv3d(
"float32",
"float32",
1,
dshape,
kshape,
padding=(1, 1, 1),
channels=10,
kernel_size=(3, 3, 3),
)
@tvm.testing.uses_gpu
def test_conv3d_ndhwc_run():
def run_test_conv3d(
dtype,
out_dtype,
scale,
dshape,
kshape,
padding=(1, 1, 1),
fref=None,
groups=1,
dilation=(1, 1, 1),
except_targets=None,
**attrs,
):
if except_targets is None:
except_targets = []
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", dtype=dtype)
y = relay.nn.conv3d(
x,
w,
padding=padding,
dilation=dilation,
groups=groups,
data_layout="NDHWC",
kernel_layout="DHWIO",
**attrs,
)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation)
if fref is None:
ref_res = tvm.topi.testing.conv3d_ndhwc_python(
data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding
)
else:
ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype))
for target, dev in tvm.testing.enabled_targets():
if target in except_targets:
continue
dev = tvm.device(target, 0)
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
# normal conv3d
dshape = (1, 5, 224, 224, 6)
kshape = (3, 3, 3, 6, 10)
run_test_conv3d(
"float32",
"float32",
1,
dshape,
kshape,
padding=(1, 1, 1),
channels=10,
kernel_size=(3, 3, 3),
except_targets=["cuda"],
)
@tvm.testing.uses_gpu
def test_conv3d_winograd():
class WinogradFallback(autotvm.FallbackContext):
def _query_inside(self, target, workload):
key = (target, workload)
if key in self.memory:
return self.memory[key]
cfg = autotvm.task.space.FallbackConfigEntity()
cfg.is_fallback = False
cfg.cost = 0.1 if "winograd" in workload[0] else 1
cfg["tile_b"] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg["tile_y"] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg["tile_x"] = autotvm.task.space.SplitEntity([-1, 1, 1, 1])
cfg["tile_rc"] = autotvm.task.space.SplitEntity([-1, 1])
cfg["auto_unroll_max_step"] = autotvm.task.space.OtherOptionEntity(0)
cfg["unroll_explicit"] = autotvm.task.space.OtherOptionEntity(1)
self.memory[key] = cfg
return cfg
def run_test_conv3d_cuda(
dtype,
out_dtype,
scale,
dshape,
kshape,
padding=(1, 1, 1),
groups=1,
dilation=(1, 1, 1),
prepack=False,
**attrs,
):
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
if prepack:
tile_size = _infer_tile_size(np.zeros(shape=dshape), np.zeros(shape=kshape))
w_packed = relay.nn.contrib_conv3d_winograd_weight_transform(w, tile_size)
y = relay.nn.contrib_conv3d_winograd_without_weight_transform(
x,
w_packed,
tile_size,
padding=padding,
dilation=dilation,
groups=groups,
channels=kshape[0],
**attrs,
)
else:
y = relay.nn.conv3d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)
func = relay.Function([x, w], y)
mod = tvm.IRModule()
mod["main"] = func
mod = relay.transform.InferType()(mod)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
ref_res = tvm.topi.testing.conv3d_ncdhw_python(
data.astype(out_dtype), kernel.astype(out_dtype), 1, padding, groups=groups
)
with WinogradFallback(), tvm.transform.PassContext(opt_level=3):
for target, dev in tvm.testing.enabled_targets():
if target != "cuda":
continue
dev = tvm.device(target, 0)
params = {"w": tvm.nd.array(kernel)}
graph, lib, params = relay.build_module.build(mod, target=target, params=params)
module = tvm.contrib.graph_executor.create(graph, lib, dev)
module.set_input("x", tvm.nd.array(data))
module.set_input(**params)
module.run()
op_res1 = module.get_output(0)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-3, atol=1e-3)
# normal winograd: stride 1, padding 1, kernel 3x3x3
dshape = (1, 32, 16, 16, 16)
kshape = (64, 32, 3, 3, 3)
run_test_conv3d_cuda(
"float32", "float32", 1, dshape, kshape, padding=(1, 1, 1), kernel_size=(3, 3, 3)
)
# Without depth transform using 1x3x3 kernel.
kshape = (64, 32, 1, 3, 3)
run_test_conv3d_cuda(
"float32", "float32", 1, dshape, kshape, padding=(0, 1, 1), kernel_size=(1, 3, 3)
)
# extended winograd: stride 1, padding N, kernel NxNxN
dshape = (1, 61, 20, 20, 20)
kshape = (120, 61, 5, 5, 5)
run_test_conv3d_cuda(
"float32",
"float32",
1,
dshape,
kshape,
padding=(2, 2, 2),
channels=120,
kernel_size=(5, 5, 5),
)
# Without depth transform
kshape = (120, 61, 1, 5, 5)
run_test_conv3d_cuda(
"float32",
"float32",
1,
dshape,
kshape,
padding=(0, 2, 2),
channels=120,
kernel_size=(1, 5, 5),
)
@tvm.testing.uses_gpu
def test_conv3d_transpose_infer_type():
# symbolic in batch dimension
n, c, d, h, w = te.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, d, h, w), "float32"))
w = relay.var("w")
y = relay.nn.conv3d_transpose(x, w, kernel_size=(3, 3, 3), padding=(1, 1, 1), channels=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 2, 224, 224, 224), "float32")
assert yy.args[1].checked_type == relay.TensorType((10, 2, 3, 3, 3), "float32")
# infer by shape of w, mixed precision
n, c, d, h, w = te.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8"))
w = relay.var("w", relay.TensorType((10, 12, 3, 3, 3), "int8"))
y = relay.nn.conv3d_transpose(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 12, 226, 226, 226), "int32")
# infer shape in case of different dtypes for input and weight.
n, c, d, h, w = te.size_var("n"), 10, 224, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "uint8"))
w = relay.var("w", relay.TensorType((10, 12, 3, 3, 3), "int8"))
y = relay.nn.conv3d_transpose(x, w, out_dtype="int32")
assert 'out_dtype="int32"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 12, 226, 226, 226), "int32")
@tvm.testing.uses_gpu
def test_conv3d_transpose_ncdhw_run():
dshape = (1, 3, 24, 24, 24)
kshape = (3, 4, 2, 2, 2)
x = relay.var("x", shape=dshape)
w = relay.var("w")
y = relay.nn.conv3d_transpose(
x, w, channels=4, kernel_size=(2, 2, 2), strides=(1, 1, 1), padding=(1, 1, 1)
)
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
ref_res = tvm.topi.testing.conv3d_transpose_ncdhw_python(data, kernel, 1, 1, 0)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_conv2d_transpose_infer_type():
# symbolic in batch dimension
n, c, h, w = te.size_var("n"), 10, 10, 12
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.conv2d_transpose(x, w, kernel_size=(3, 3), padding=(1, 1), channels=15)
assert "channels=15" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 15, 10, 12), "float32")
assert yy.args[1].checked_type == relay.TensorType((10, 15, 3, 3), "float32")
# infer by shape of w, mixed precision
n, h, w, c = te.size_var("n"), 10, 10, 12
x = relay.var("x", relay.TensorType((n, h, w, c), "float32"))
w = relay.var("w", relay.TensorType((12, 11, 5, 5), "float32"))
y = relay.nn.conv2d_transpose(x, w, output_padding=(1, 1), channels=11, data_layout="NHWC")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 15, 15, 11), "float32")
@tvm.testing.uses_gpu
def test_conv2d_transpose_nchw_run():
k_layouts = {"OIHW": (10, 3, 3, 3), "IOHW": (3, 10, 3, 3)}
output_padding = (1, 1)
for k_layout, kshape in k_layouts.items():
dshape = (1, 3, 18, 18)
x = relay.var("x", shape=dshape)
w = relay.var("w")
y = relay.nn.conv2d_transpose(
x,
w,
channels=10,
kernel_size=(3, 3),
strides=(2, 2),
padding=(1, 1),
output_padding=output_padding,
kernel_layout=k_layout,
data_layout="NCHW",
)
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
if k_layout != "IOHW":
# Must be OIHW so switch
kernel_iohw = np.transpose(kernel, [1, 0, 2, 3])
else:
kernel_iohw = kernel
ref_res = tvm.topi.testing.conv2d_transpose_nchw_python(
data, kernel_iohw, 2, 1, output_padding
)
enabled_targets = tvm.testing.enabled_targets()
if cudnn.exists() and k_layout == "IOHW":
enabled_targets.append(("cuda -libs=cudnn", tvm.cuda(0)))
for target, dev in enabled_targets:
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_conv2d_transpose_nhwc_run():
dshape_nhwc = (1, 18, 18, 3)
kshape_hwoi = (3, 3, 10, 3)
x = relay.var("x", shape=dshape_nhwc)
w = relay.var("w")
y = relay.nn.conv2d_transpose(
x,
w,
channels=10,
kernel_size=(3, 3),
strides=(2, 2),
padding=(1, 1),
output_padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
)
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape_nhwc).astype(dtype)
kernel = np.random.uniform(size=kshape_hwoi).astype(dtype)
ref_res = tvm.topi.testing.conv2d_transpose_nhwc_python(
data, kernel, "HWOI", 2, 1, output_padding=(1, 1)
)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_conv2d_transpose_nhwc_cudnn():
if not cudnn.exists():
return
dshape_nhwc = (1, 18, 18, 3)
kshape_ihwo = (3, 3, 3, 10)
x = relay.var("x", shape=dshape_nhwc)
w = relay.var("w", shape=kshape_ihwo)
y = relay.nn.conv2d_transpose(
x,
w,
channels=10,
kernel_size=(3, 3),
strides=(2, 2),
padding=(1, 1),
output_padding=(1, 1),
data_layout="NHWC",
kernel_layout="IHWO",
)
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape_nhwc).astype(dtype)
kernel = np.random.uniform(size=kshape_ihwo).astype(dtype)
ref_res = tvm.topi.testing.conv2d_transpose_nhwc_python(
data, np.transpose(kernel, [1, 2, 3, 0]), "HWOI", 2, 1, output_padding=(1, 1)
)
target = "cuda -libs=cudnn"
dev = tvm.cuda(0)
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data, kernel)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_conv1d_transpose_ncw_run():
dshape = (1, 3, 18)
kshape = (3, 10, 3)
oshape = (1, 10, 36)
x = relay.var("x", shape=dshape)
w = relay.var("w")
y = relay.nn.conv1d_transpose(
x, w, channels=10, kernel_size=(3,), strides=(2,), padding=(1,), output_padding=(1,)
)
func = relay.Function([x, w], y)
dtype = "float32"
data = np.random.uniform(size=dshape).astype(dtype)
kernel = np.random.uniform(size=kshape).astype(dtype)
ref_res = tvm.topi.testing.conv1d_transpose_ncw_python(data, kernel, 2, 1, output_padding=(1,))
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_upsampling_infer_type():
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
scale = tvm.tir.const(2.0, "float64")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear")
'method="BINLINEAR"' in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(
n,
c,
tvm.tir.Cast("int32", te.round(h * scale)),
tvm.tir.Cast("int32", te.round(w * scale)),
),
"float32",
)
n, c = te.size_var("n"), te.size_var("c")
x = relay.var("x", relay.TensorType((n, c, 100, 200), "float32"))
y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 200, 400), "float32")
@tvm.testing.uses_gpu
def test_upsampling3d_infer_type():
n, c, d, h, w = (
te.size_var("n"),
te.size_var("c"),
te.size_var("d"),
te.size_var("h"),
te.size_var("w"),
)
scale = tvm.tir.const(2.0, "float64")
x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32"))
y = relay.nn.upsampling3d(
x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear"
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(
(
n,
c,
tvm.tir.Cast("int32", te.round(d * scale)),
tvm.tir.Cast("int32", te.round(h * scale)),
tvm.tir.Cast("int32", te.round(w * scale)),
),
"float32",
)
n, c = te.size_var("n"), te.size_var("c")
x = relay.var("x", relay.TensorType((n, c, 100, 100, 200), "float32"))
y = relay.nn.upsampling3d(
x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear"
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 200, 200, 400), "float32")
def _test_global_pool2d(opfunc, reffunc):
n, c, h, w = te.size_var("n"), te.size_var("c"), 224, 224
x = relay.var("x", relay.TensorType((n, h, w, c), "float32"))
y = opfunc(x, layout="NHWC")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 1, 1, c), "float32")
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 1, 1), "float32")
# test execution
dtype = "float32"
dshape = (1, 1024, 7, 7)
x = relay.var("x", shape=dshape)
y = opfunc(x)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = reffunc(data, axis=(2, 3), keepdims=True)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_pool2d():
def _test_pool2d(opfunc, pool_type, pool_size=2, strides=2, dilation=1, padding=0):
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x, pool_size=(1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224, 224), "float32")
# test execution
dtype = "float32"
dshape = (1, 3, 28, 28)
x = relay.var("x", shape=dshape)
y = opfunc(x, pool_size=pool_size, strides=strides, dilation=dilation, padding=padding)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = tvm.topi.testing.poolnd_python(
data,
[pool_size, pool_size],
[strides, strides],
[dilation, dilation],
[padding, padding],
[padding, padding],
pool_type,
count_include_pad=False,
ceil_mode=False,
)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
def _test_pool2d_int(opfunc, reffunc, dtype):
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
y = opfunc(x, pool_size=(1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224, 224), dtype)
# test execution
dshape = (1, 3, 28, 28)
for shape_dtype in ["int32", "int64"]:
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in dshape], dtype=dtype)
y = opfunc(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
func = relay.Function([x], y)
data = np.random.randint(low=-128, high=128, size=dshape)
ref_res = reffunc(data.reshape(1, 3, 14, 2, 14, 2), axis=(3, 5)).astype(dtype)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_pool2d(relay.nn.max_pool2d, "max")
_test_pool2d(relay.nn.max_pool2d, "max", pool_size=2, strides=2, padding=0)
_test_pool2d(relay.nn.max_pool2d, "max", pool_size=2, strides=2, padding=0, dilation=2)
_test_pool2d(relay.nn.avg_pool2d, "avg")
_test_pool2d(relay.nn.avg_pool2d, "avg", pool_size=2, strides=2, padding=0)
_test_pool2d(relay.nn.avg_pool2d, "avg", pool_size=2, strides=2, padding=0, dilation=2)
_test_pool2d_int(relay.nn.avg_pool2d, np.mean, "int64")
_test_pool2d_int(relay.nn.avg_pool2d, np.mean, "float16")
_test_global_pool2d(relay.nn.global_max_pool2d, np.max)
_test_global_pool2d(relay.nn.global_avg_pool2d, np.mean)
def _test_global_pool1d(opfunc, reffunc):
n, c, w = te.size_var("n"), te.size_var("c"), 224
x = relay.var("x", relay.TensorType((n, w, c), "float32"))
y = opfunc(x, layout="NWC")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 1, c), "float32")
n, c, w = te.size_var("n"), te.size_var("c"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, w), "float32"))
y = opfunc(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, 1), "float32")
# test execution
dtype = "float32"
dshape = (1, 1024, 7)
x = relay.var("x", shape=dshape)
y = opfunc(x)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = reffunc(data, axis=(2,), keepdims=True)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_pool1d():
def _test_pool1d(
opfunc, pool_type, pool_size=2, strides=2, dilation=1, padding=0, dtype="float32"
):
n, c, w = te.var("n"), 10, 224
x = relay.var("x", relay.TensorType((n, c, w), "float32"))
y = opfunc(x, pool_size=(1,))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 224), "float32")
# test execution
dshape = (1, 3, 32)
for shape_dtype in ["int32", "int64"]:
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in dshape], dtype=dtype)
pool_type = "max" if "max" in str(opfunc) else "avg"
y = opfunc(x, pool_size=pool_size, strides=strides, dilation=dilation, padding=padding)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = tvm.topi.testing.poolnd_python(
data,
[pool_size],
[strides],
[dilation],
[padding],
[padding],
pool_type,
count_include_pad=False,
ceil_mode=False,
)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_pool1d(relay.nn.max_pool1d, "max")
_test_pool1d(relay.nn.max_pool1d, "max", dtype="int32")
_test_pool1d(relay.nn.max_pool1d, "max", pool_size=2, strides=2, padding=0)
_test_pool1d(relay.nn.max_pool1d, "max", pool_size=2, strides=2, padding=0, dilation=2)
_test_pool1d(relay.nn.avg_pool1d, "avg")
_test_pool1d(relay.nn.avg_pool1d, "avg", dtype="int64")
_test_pool1d(relay.nn.avg_pool1d, "avg", pool_size=2, strides=2, padding=0)
_test_pool1d(relay.nn.avg_pool1d, "avg", pool_size=2, strides=2, padding=0, dilation=2)
_test_global_pool1d(relay.nn.global_max_pool1d, np.max)
_test_global_pool1d(relay.nn.global_avg_pool1d, np.mean)
@tvm.testing.uses_gpu
def test_pool3d():
def _test_pool3d(
opfunc,
pool_type,
pool_size=2,
strides=2,
dilation=1,
padding=[0, 0, 0, 0, 0, 0],
dtype="float32",
):
n, c, d, h, w = te.size_var("n"), 10, 5, 224, 224
x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32"))
y = opfunc(x, pool_size=(1, 1, 1))
assert "pool_size=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 5, 224, 224), "float32")
# test execution
dtype = "float32"
dshape = (1, 3, 32, 32, 32)
for shape_dtype in ["int32", "int64"]:
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in dshape], dtype=dtype)
pool_type = "max" if "max" in str(opfunc) else "avg"
y = opfunc(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
dilation=dilation,
)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = tvm.topi.testing.poolnd_python(
data,
[pool_size, pool_size, pool_size],
[strides, strides, strides],
[dilation, dilation, dilation],
padding[:3],
padding[3:],
pool_type,
count_include_pad=False,
ceil_mode=False,
)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_pool3d(relay.nn.max_pool3d, "max")
_test_pool3d(relay.nn.max_pool3d, "max", dtype="int32")
_test_pool3d(relay.nn.max_pool3d, "max", padding=(2, 0, 0, 2, 0, 0))
_test_pool3d(relay.nn.max_pool3d, "max", padding=(0, 3, 0, 0, 3, 0))
_test_pool3d(relay.nn.max_pool3d, "max", padding=(0, 0, 4, 0, 0, 4))
_test_pool3d(relay.nn.max_pool3d, "max", pool_size=2, strides=2)
_test_pool3d(relay.nn.max_pool3d, "max", pool_size=2, strides=2, dilation=2)
_test_pool3d(relay.nn.avg_pool3d, "avg")
_test_pool3d(relay.nn.avg_pool3d, "avg", dtype="int32")
_test_pool3d(relay.nn.avg_pool3d, "avg", padding=(2, 0, 0, 2, 0, 0))
_test_pool3d(relay.nn.avg_pool3d, "avg", padding=(0, 3, 0, 0, 3, 0))
_test_pool3d(relay.nn.avg_pool3d, "avg", padding=(0, 0, 4, 0, 0, 4))
_test_pool3d(relay.nn.avg_pool3d, "avg", pool_size=2, strides=2)
_test_pool3d(relay.nn.avg_pool3d, "avg", pool_size=2, strides=2, dilation=2)
@tvm.testing.uses_gpu
def test_avg_pool2d_no_count_pad():
kh, kw = (4, 4)
sh, sw = (2, 2)
ph, pw = (2, 2)
n = 1
(ic, ih, iw) = (3, 28, 28)
(oc, oh, ow) = (3, 15, 15)
dshape = (n, ic, ih, iw)
x = relay.var("x", shape=dshape)
y = relay.nn.avg_pool2d(
x, pool_size=(kh, kw), strides=(sw, sw), padding=(ph, pw), count_include_pad=False
)
func = relay.Function([x], y)
dtype = "float32"
a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
pad_np = np.zeros(shape=(n, ic, ih + 2 * ph, iw + 2 * pw)).astype(dtype)
no_zero = (range(n), range(ic), (range(ph, ih + ph)), (range(pw, iw + pw)))
pad_np[np.ix_(*no_zero)] = a_np
b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype)
for i in range(oh):
for j in range(ow):
pad_count = np.sum(
pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw] > 0, axis=(2, 3)
)
b_np[:, :, i, j] = np.sum(
pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw], axis=(2, 3)
) / np.maximum(pad_count, 1)
ref_res = np.maximum(b_np, 0.0)
data = a_np
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_flatten_infer_type(executor_kind):
d1, d2, d3, d4 = te.size_var("d1"), te.size_var("d2"), te.size_var("d3"), te.size_var("d4")
x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((d2 * d3) * d4)), "float32")
x = relay.var("x", relay.TensorType((3, 2, 4, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((3, 24), "float32")
x = relay.var("x", relay.TensorType((d1, 2, d3, 3), "float32"))
y = relay.nn.batch_flatten(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((d1, ((2 * d3) * 3)), "float32")
shape = (1, 5, 10, 10)
o_shape = (1, 500)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.batch_flatten(x)
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(o_shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = x_data.flatten().reshape(o_shape)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_pad_infer_type():
# entirely concrete cases
n, c, h, w = 1, 2, 3, 4
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((3, 6, 9, 12), "float32")
n, c, h, w = 4, 6, 3, 5
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((-1, -1), (2, -2), (0, -3), (4, 4)), pad_mode="reflect")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((2, 6, 0, 13), "float32")
# some symbolic values
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4)))
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32")
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(t, ((-1, -1), (-2, -2), (1, -3), (4, 4)))
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n + (-2), c + (-4), h + (-2), w + 8), "float32")
# dealing with dynamic vals
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
t = relay.var("t", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.pad(
t, ((1, 1), (2, 2), (3, 3), (4, 4)), pad_value=relay.var("pad_value", "float32")
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n + 2, 6, 9, w + 8), "float32")
def _get_numpy_pad(dshape, data, pad, pad_value=0):
mod_pad = []
for axis, (pad_x, pad_y) in enumerate(pad):
indices = range(dshape[axis])
if pad_x < 0:
indices = indices[abs(pad_x) :]
pad_x = 0
if pad_y < 0:
indices = indices[:pad_y]
pad_y = 0
data = np.take(data, indices, axis)
mod_pad.append((pad_x, pad_y))
return np.pad(data, tuple(mod_pad), "constant", constant_values=pad_value)
@tvm.testing.uses_gpu
def test_pad_run():
def _test_run(dtype):
dshape_list = [(4, 10, 7, 7), (4, 6, 3, 5)]
pad_list = [((1, 1), (2, 2), (3, 3), (4, 4)), ((-1, -1), (2, -2), (0, -2), (4, 4))]
for dshape, pad in zip(dshape_list, pad_list):
x = relay.var("x", shape=dshape)
y = relay.nn.pad(x, pad)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = _get_numpy_pad(dshape, data, pad)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_run("float32")
_test_run("int32")
@tvm.testing.uses_gpu
def test_pad_run_dynamic_pad_value():
def _test_run(dtype):
dshape = (4, 6, 3, 5)
pad = ((-1, -1), (2, -2), (0, -2), (4, 4))
data = relay.var("data", shape=dshape, dtype=dtype)
pad_value = relay.var("pad_value", dtype)
pad_data = relay.nn.pad(data, pad, pad_value=pad_value)
f = relay.Function([data, pad_value], pad_data)
data_arr = np.random.uniform(-10, 10, size=dshape).astype(dtype)
pad_value_arr = 2.0
ref_res = _get_numpy_pad(dshape, data_arr, pad, pad_value=pad_value_arr)
for target, dev in tvm.testing.enabled_targets():
result = relay.create_executor(kind="graph", device=dev, target=target).evaluate(f)(
data_arr, pad_value_arr
)
tvm.testing.assert_allclose(result.numpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_run("float32")
_test_run("int32")
def test_pad_value_in_array():
A = relay.var("A", shape=(32, 32), dtype="int8")
# Extract pad value from an array
p0 = relay.Constant(tvm.nd.array(np.array([2], dtype="int8")))
p1 = relay.nn.pad(A, pad_value=p0, pad_width=((1, 1), (1, 1)))
func = relay.Function(relay.analysis.free_vars(p1), p1)
mod = tvm.IRModule.from_expr(func)
target = "llvm"
lib = relay.build(
mod,
tvm.target.Target(target, host=target),
runtime=relay.backend.Runtime("cpp"),
executor=relay.backend.Executor("aot", {"unpacked-api": False, "interface-api": "packed"}),
)
@tvm.testing.uses_gpu
@pytest.mark.parametrize("dtype", ["float32", "float16"])
def test_lrn(executor_kind, dtype):
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", shape=(n, c, h, w), dtype=dtype)
y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=0.00001, beta=0.75)
"alpha=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), dtype)
shape = (1, 5, 10, 10)
x = relay.var("x", relay.TensorType(shape, dtype))
size = 5
axis = 1
bias = 0.5
alpha = 0.00001
beta = 0.75
z = relay.nn.lrn(x, size=size, axis=axis, bias=bias, alpha=alpha, beta=beta)
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = tvm.topi.testing.lrn_python(x_data, size, axis, bias, alpha, beta)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_l2_normalize(executor_kind):
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", shape=(n, c, h, w))
y = relay.nn.l2_normalize(x, eps=0.001, axis=[1])
"axis=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w))
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
eps = 0.001
axis = 1
z = relay.nn.l2_normalize(x, eps=0.001, axis=[axis])
yy = run_infer_type(z)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = tvm.topi.testing.l2_normalize_python(x_data, eps, axis)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def batch_flatten(data):
shape = data.shape
target_dim = 1
for i in range(len(shape) - 1):
target_dim = target_dim * shape[i + 1]
return np.reshape(data, (shape[0], target_dim))
@tvm.testing.uses_gpu
def test_batch_flatten():
t1 = relay.TensorType((5, 10, 5))
x = relay.Var("x", t1)
func = relay.Function([x], relay.nn.batch_flatten(x))
data = np.random.rand(5, 10, 5).astype(t1.dtype)
ref_res = batch_flatten(data)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def _test_upsampling(layout, method, align_corners=False):
n, c, h, w = te.size_var("n"), 16, 32, 32
scale_h = 2.0
scale_w = 2.0
dtype = "float32"
def get_shape():
if layout == "NCHW":
return (c, h, w), (c, int(round(h * scale_h)), int(round(w * scale_w)))
else:
return (h, w, c), (int(round(h * scale_h)), int(round(w * scale_w)), c)
ishape, oshape = get_shape()
x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
y = relay.nn.upsampling(
x,
scale_h=scale_h,
scale_w=scale_w,
layout=layout,
method=method,
align_corners=align_corners,
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
dshape = (1,) + ishape
x = relay.var("x", shape=dshape)
y = relay.nn.upsampling(
x,
scale_h=scale_h,
scale_w=scale_w,
layout=layout,
method=method,
align_corners=align_corners,
)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref = tvm.topi.testing.resize2d_python(
data,
(scale_h, scale_w),
layout,
method[2:] if method[0:2] == "bi" else method,
"align_corners" if align_corners else "asymmetric",
)
for target, dev in tvm.testing.enabled_targets():
out = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tvm.testing.assert_allclose(out.numpy(), ref, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_upsampling():
_test_upsampling("NCHW", "nearest_neighbor")
_test_upsampling("NCHW", "bilinear", True)
_test_upsampling("NHWC", "nearest_neighbor")
_test_upsampling("NHWC", "bilinear", True)
def _test_upsampling3d(layout, method, coordinate_transformation_mode="half_pixel"):
n, c, d, h, w = te.size_var("n"), 8, 16, 16, 16
scale_d = 2.0
scale_h = 2.0
scale_w = 2.0
dtype = "float32"
def get_shape():
if layout == "NCDHW":
return (c, d, h, w), (
c,
int(round(d * scale_d)),
int(round(h * scale_h)),
int(round(w * scale_w)),
)
else:
return (d, h, w, c), (
int(round(d * scale_d)),
int(round(h * scale_h)),
int(round(w * scale_w)),
c,
)
ishape, oshape = get_shape()
x = relay.var("x", relay.TensorType((n,) + ishape, dtype))
y = relay.nn.upsampling3d(
x,
scale_d=scale_d,
scale_h=scale_h,
scale_w=scale_w,
layout=layout,
method=method,
coordinate_transformation_mode=coordinate_transformation_mode,
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n,) + oshape, dtype)
dshape = (1,) + ishape
x = relay.var("x", shape=dshape)
y = relay.nn.upsampling3d(
x,
scale_d=scale_d,
scale_h=scale_h,
scale_w=scale_w,
layout=layout,
method=method,
coordinate_transformation_mode=coordinate_transformation_mode,
)
func = relay.Function([x], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref = tvm.topi.testing.resize3d_python(
data,
(scale_d, scale_h, scale_w),
layout,
method[3:] if method[0:3] == "tri" else method,
coordinate_transformation_mode,
)
for target, dev in tvm.testing.enabled_targets():
out = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tvm.testing.assert_allclose(out.numpy(), ref, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_upsampling3d():
_test_upsampling3d("NCDHW", "nearest_neighbor", "asymmetric")
_test_upsampling3d("NCDHW", "trilinear", "align_corners")
_test_upsampling3d("NDHWC", "nearest_neighbor", "asymmetric")
_test_upsampling3d("NDHWC", "trilinear", "align_corners")
@tvm.testing.requires_x86
@pytest.mark.skipif(tvm.target.codegen.llvm_version_major() < 8, reason="Requires LLVM 8")
class TestConv2DInt8Intrinsics:
supported_targets = [
"llvm -mcpu=nehalem",
"llvm -mcpu=core-avx2",
"llvm -mcpu=skylake-avx512",
"llvm -mcpu=cascadelake",
]
unsupported_targets = [
"llvm -mcpu=x86-64",
]
data_layout, kernel_layout = tvm.testing.parameters(
("NCHW", "OIHW"),
# TODO(@anijain2305, @icemelon9): disable conv2d_int8 for NHWC data layout.
# Re-enable this after adding conv2d_NCHWc_int8 support for NHWC.
# ("NHWC", "HWIO"),
)
input_channels, output_channels = tvm.testing.parameters(
# Sweep the input channels to check int8 robustness
# Input channels should be a multiple of 4 internally.
(1, 16),
(4, 16),
(6, 16),
# Sweep the output channels to check int8 robustness
# Output channels should be a multiple of 16 internally.
(8, 4),
(8, 16),
(8, 20),
# Check that both non-divisible oc and ic work
(17, 29),
)
@tvm.testing.fixture
def fast_int8_intrinsic(self, target):
if "nehalem" in target or "core-avx2" in target or "skylake-avx512" in target:
return "pmaddubs"
elif "cascadelake" in target:
return "vpdpbusd"
else:
assert False, "Target should be Nehalem or core-avx2 or Skylake or Cascadelake"
@tvm.testing.fixture
def assembly(
self,
target,
dtypes,
input_channels,
output_channels,
data_layout,
kernel_layout,
):
input_dtype, weight_dtype, output_dtype = dtypes
image_size = (64, 64)
kernel_size = (3, 3)
batch_size = 1
h, w = image_size
if data_layout == "NCHW":
data_shape = (batch_size, input_channels, *image_size)
elif data_layout == "NHWC":
data_shape = (batch_size, *image_size, input_channels)
else:
raise ValueError(f"Unsupported data layout: {data_layout}")
x = relay.var("x", relay.TensorType(data_shape, input_dtype))
if kernel_layout == "OIHW":
kernel_shape = (output_channels, input_channels, *kernel_size)
elif kernel_layout == "HWIO":
kernel_shape = (*kernel_size, input_channels, output_channels)
else:
raise ValueError("Not supported")
weight = relay.var("weight", relay.TensorType(kernel_shape, weight_dtype))
y = relay.nn.conv2d(
x,
weight,
kernel_size=kernel_size,
channels=output_channels,
padding=(0, 0, 0, 1),
dilation=(1, 1),
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype=output_dtype,
)
func = relay.Function([x, weight], y)
wdata = np.random.rand(*kernel_shape) * 10
parameters = {"weight": tvm.nd.array(wdata.astype(weight_dtype))}
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(func, target, params=parameters)
return lib.get_source("asm")
# Ensure that code uses the fast int8 instructions when available.
@tvm.testing.parametrize_targets(*supported_targets)
@pytest.mark.parametrize(
"dtypes",
[
# compile conv2d for x86 (skylake, cascadelake) and test
# assembly contains *pmadd* instructions
("uint8", "int8", "int32"),
# Check that int8 x int8 goes through legalization so that
# fast instructions can be picked up.
("int8", "int8", "int32"),
],
)
def test_uses_intrinsic(
self,
fast_int8_intrinsic,
assembly,
):
assert fast_int8_intrinsic in assembly
# For datatypes that don't have HW support, ensure that code is
# generated without the fast int8 intrinsic.
@tvm.testing.parametrize_targets(*supported_targets)
@pytest.mark.parametrize("dtypes", [("uint8", "uint8", "int32")])
def test_no_intrinsic(
self,
fast_int8_intrinsic,
assembly,
):
assert fast_int8_intrinsic not in assembly
# Check that a vectorized instruction is generated for older Intel
# generations, because we default to NCHWc layout.
@tvm.testing.parametrize_targets(*unsupported_targets)
@pytest.mark.parametrize("dtypes", [("uint8", "int8", "int32")])
def test_uses_vectorized_instruction(self, assembly):
assert "pmulhw" in assembly or "pmaddwd" in assembly
assert "paddd" in assembly
@tvm.testing.uses_gpu
def test_depthwise_conv2d_int8():
input_dtype = "uint8"
weight_dtype = "int8"
output_dtype = "int32"
data_shape = (1, 64, 56, 56)
x = relay.var("x", relay.TensorType(data_shape, input_dtype))
kernel_shape = (64, 1, 3, 3)
weight = relay.var("weight", relay.TensorType(kernel_shape, weight_dtype))
y = relay.nn.conv2d(
x,
weight,
kernel_size=(3, 3),
groups=64,
padding=(1, 1),
dilation=(1, 1),
out_dtype=output_dtype,
)
func = relay.Function([x, weight], y)
wdata = np.random.rand(*kernel_shape) * 10
parameters = {"weight": tvm.nd.array(wdata.astype(weight_dtype))}
targets = ["llvm -mcpu=skylake-avx512", "llvm -mcpu=cascadelake"]
llvm_version = tvm.target.codegen.llvm_version_major()
for target in targets:
if llvm_version >= 8:
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(func, target, params=parameters)
@tvm.testing.uses_gpu
def test_bitserial_conv2d_infer_type():
# Basic shape test with ambiguous batch.
n, c, h, w = te.size_var("n"), 32, 224, 224
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "int16"))
w = relay.var("w", relay.ty.TensorType((32, 32, 3, 3), "int16"))
y = relay.nn.bitserial_conv2d(x, w, kernel_size=(3, 3), padding=(0, 0), channels=32)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 32, 222, 222), "int16")
@tvm.testing.uses_gpu
def test_bitpack_infer_type():
# Test axis packing shape inference.
o, i, h, w = 32, 32, 128, 128
x = relay.var("x", relay.ty.TensorType((o, i, h, w), "int16"))
y = relay.nn.bitpack(x, bit_axis=4, pack_axis=1, pack_type="uint16", bits=1)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((32, 2, 128, 128, 1), "uint16")
# TODO(@jwfromm): Need to add bitserial_conv2d & bitpack run test cases
@tvm.testing.uses_gpu
def test_correlation():
def _test_correlation(
data_shape,
kernel_size,
max_displacement,
stride1,
stride2,
padding,
is_multiply,
dtype="float32",
):
data1 = relay.var("data1", relay.ty.TensorType(data_shape, dtype))
data2 = relay.var("data2", relay.ty.TensorType(data_shape, dtype))
y = relay.nn.correlation(
data1,
data2,
kernel_size,
max_displacement,
stride1,
stride2,
padding,
is_multiply,
"NCHW",
)
yy = run_infer_type(y)
padded_height = data_shape[2] + 2 * padding
padded_width = data_shape[3] + 2 * padding
border_size = (kernel_size - 1) // 2 + max_displacement
displacement_radius = max_displacement // stride2
out_channel = ((2 * displacement_radius) + 1) ** 2
out_height = (padded_height - 2 * border_size + stride1 - 1) // stride1
out_width = (padded_width - 2 * border_size + stride1 - 1) // stride1
assert yy.checked_type == relay.TensorType(
(data_shape[0], out_channel, out_height, out_width), dtype
)
func = relay.Function([data1, data2], y)
data1_np = np.random.uniform(size=data_shape).astype(dtype)
data2_np = np.random.uniform(size=data_shape).astype(dtype)
ref_res = tvm.topi.testing.correlation_nchw_python(
data1_np,
data2_np,
kernel_size,
max_displacement,
stride1,
stride2,
padding,
is_multiply,
)
for target, dev in tvm.testing.enabled_targets():
op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
data1_np, data2_np
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
_test_correlation(
(1, 3, 10, 10),
kernel_size=1,
max_displacement=4,
stride1=1,
stride2=1,
padding=4,
is_multiply=True,
)
_test_correlation(
(1, 3, 10, 10),
kernel_size=1,
max_displacement=5,
stride1=1,
stride2=1,
padding=5,
is_multiply=True,
)
_test_correlation(
(5, 1, 4, 4),
kernel_size=3,
max_displacement=1,
stride1=2,
stride2=1,
padding=2,
is_multiply=True,
)
_test_correlation(
(5, 1, 6, 4),
kernel_size=3,
max_displacement=1,
stride1=2,
stride2=2,
padding=2,
is_multiply=False,
)
_test_correlation(
(5, 1, 11, 11),
kernel_size=5,
max_displacement=1,
stride1=1,
stride2=1,
padding=2,
is_multiply=False,
)
@pytest.mark.skip("Requires GFX10 AMDGPU")
def test_conv2d_rocm_sdot4():
d_shape = (1, 64, 56, 56)
w_shape = (64, 64, 3, 3)
padding = (1, 1)
strides = (1, 1)
data_dtype = "int8"
weight_dtype = "int8"
out_dtype = "int32"
data = relay.var("data", shape=d_shape, dtype=data_dtype)
weight = relay.var("weight", shape=w_shape, dtype=weight_dtype)
out_channel = w_shape[0]
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
strides=strides,
out_dtype=out_dtype,
)
mod = tvm.IRModule.from_expr(conv2d)
data_np = np.random.uniform(1, 10, d_shape).astype("int8")
weight_np = np.random.uniform(1, 10, size=w_shape).astype("int8")
target = "rocm -mattr=+dotprod"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params={"weight": weight_np})
asm = lib.lib.imported_modules[0].get_source("asm")
assert "v_dot4_i32_i8" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = tvm.topi.testing.conv2d_nchw_python(
data_np.astype("int32"), weight_np.astype("int32"), strides, padding
)
np.testing.assert_equal(out, ref)
def np_float2tvm_bf16(arr):
"""Convert a numpy array of float to a TVM array
of bf16"""
orig = arr.view("<u4")
bias = np.bitwise_and(np.right_shift(orig, 16), 1) + 0x7FFF
nparr = np.right_shift(orig + bias, 16).astype("uint16")
return tvm.nd.empty(nparr.shape, "bfloat16").copyfrom(nparr)
def np_bf162np_float(arr):
"""Convert a numpy array of bf16 (uint16) to a numpy array
of float"""
u32 = np.left_shift(arr.astype("uint32"), 16)
return u32.view("<f4")
@tvm.testing.requires_x86
def test_conv2d_nchw_dnnl():
if not tvm.get_global_func("tvm.contrib.dnnl.conv2d", allow_missing=True):
print(
"skip because extern dnnl function is not available, \
built with dnnl=ON"
)
return
d_shape = (1, 64, 56, 56)
w_shape = (64, 64, 3, 3)
padding = (1, 1)
strides = (1, 1)
def get_subgraph(dtype):
data = relay.var("data", shape=d_shape, dtype=dtype)
weight = relay.var("weight", shape=w_shape, dtype=dtype)
out_channel = w_shape[0]
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
strides=strides,
out_dtype=dtype,
)
return conv2d
for t in ["float32", "bfloat16"]:
mod = tvm.IRModule.from_expr(get_subgraph(t))
data_np = np.random.uniform(1, 10, d_shape).astype("float32")
weight_np = np.random.uniform(1, 10, size=w_shape).astype("float32")
ref = tvm.topi.testing.conv2d_nchw_python(data_np, weight_np, strides, padding)
if t == "bfloat16":
data_np = np_float2tvm_bf16(data_np)
weight_np = np_float2tvm_bf16(weight_np)
target = "llvm -mcpu=skylake-avx512 -libs=dnnl"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params={"weight": weight_np})
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
if t == "bfloat16":
out = np_bf162np_float(out)
np.testing.assert_allclose(out, ref, rtol=1e-2)
else:
np.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)
@tvm.testing.requires_x86
def test_conv2d_nhwc_dnnl():
if not tvm.get_global_func("tvm.contrib.dnnl.conv2d", allow_missing=True):
print(
"skip because extern dnnl function is not available, \
built with dnnl=ON"
)
return
d_shape = (1, 56, 56, 64)
w_shape = (3, 3, 64, 64)
padding = (1, 1)
strides = (1, 1)
def get_subgraph(dtype):
data = relay.var("data", shape=d_shape, dtype=dtype)
weight = relay.var("weight", shape=w_shape, dtype=dtype)
out_channel = w_shape[3]
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[:2],
channels=out_channel,
padding=padding,
strides=strides,
out_dtype=dtype,
data_layout="NHWC",
kernel_layout="HWIO",
)
return conv2d
for t in ["float32", "bfloat16"]:
mod = tvm.IRModule.from_expr(get_subgraph(t))
data_np = np.random.uniform(1, 10, d_shape).astype("float32")
weight_np = np.random.uniform(1, 10, size=w_shape).astype("float32")
ref = tvm.topi.testing.conv2d_nhwc_python(data_np, weight_np, strides, padding)
if t == "bfloat16":
data_np = np_float2tvm_bf16(data_np)
weight_np = np_float2tvm_bf16(weight_np)
target = "llvm -mcpu=skylake-avx512 -libs=dnnl"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params={"weight": weight_np})
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
if t == "bfloat16":
out = np_bf162np_float(out)
np.testing.assert_allclose(out, ref, rtol=1e-2)
else:
np.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)
def _test_conv2d_int8_alter_dtype(data_dtype, target, dot_product_instrs):
def get_conv2d_nchw(
d_shape,
w_shape,
data_dtype,
):
out_dtype = "int32"
strides = (1, 1)
padding = (1, 1)
data = relay.var("data", shape=d_shape, dtype=data_dtype)
weight = relay.var("weight", shape=w_shape, dtype="int8")
out_channel = w_shape[0]
return relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=w_shape[2:],
channels=out_channel,
padding=padding,
strides=strides,
out_dtype=out_dtype,
)
I, O, H, W = 64, 64, 56, 56
kH = kW = 3
data_shape = (1, I, H, W)
weight_shape = (O, I, kH, kW)
bias_shape = (1, weight_shape[0], 1, 1)
bias = relay.var("bias", shape=bias_shape, dtype="int32")
bias_np = np.random.randint(low=-127, high=128, size=bias_shape).astype("int32")
weight_np = np.random.uniform(-32, 32, size=weight_shape).astype("int8")
conv2d = get_conv2d_nchw(data_shape, weight_shape, data_dtype)
bias_add = relay.add(conv2d, bias)
mod = tvm.IRModule.from_expr(bias_add)
if data_dtype == "uint8":
data_np = np.random.uniform(0, 64, size=data_shape).astype("uint8")
else:
data_np = np.random.uniform(-32, 32, size=data_shape).astype("int8")
params = {"weight": weight_np, "bias": bias_np}
ref = (
relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm")
.evaluate()(*[data_np, weight_np, bias_np])
.numpy()
)
dev = tvm.cpu(0)
with tvm.transform.PassContext(
opt_level=3,
):
lib = relay.build(mod, target=target, params=params)
for dot_product_instr in dot_product_instrs:
assert dot_product_instr in lib.lib.get_source("asm")
rt_mod = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
rt_mod.set_input("data", data_np)
rt_mod.run()
out = rt_mod.get_output(0).numpy()
np.testing.assert_equal(out, ref)
@tvm.testing.requires_arm_dot
def test_conv2d_int8_alter_dtype_arm():
_test_conv2d_int8_alter_dtype(
"uint8", "llvm -mtriple=aarch64-linux-gnu -mattr=+v8.2a,+dotprod", ["sdot"]
)
@tvm.testing.requires_cascadelake
def test_conv2d_int8_alter_dtype_vnni():
_test_conv2d_int8_alter_dtype("int8", "llvm -mcpu=cascadelake", ["vpdpbusd"])
@tvm.testing.requires_skylake_avx512
def test_conv2d_int8_alter_dtype_avx512():
_test_conv2d_int8_alter_dtype(
"int8", "llvm -mcpu=skylake-avx512", ["pmaddubs", "pmaddw", "vpaddd"]
)
if __name__ == "__main__":
tvm.testing.main()
| 79,435 | 34.289205 | 100 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_quantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import relay
from tvm.contrib import graph_executor
from tvm.relay.testing import run_infer_type
def quantize_test_driver(in_dtype, quant_args, axis, out_dtype, in_data, verify_output_data):
shape = in_data.shape
input_data = relay.var("input_data", shape=shape, dtype=in_dtype)
output_zero_point = relay.const(quant_args["out_zero_point"])
output_scale = relay.const(quant_args["out_scale"])
quantized_output = relay.qnn.op.quantize(
input_data,
output_scale=output_scale,
output_zero_point=output_zero_point,
axis=axis,
out_dtype=out_dtype,
)
mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output)
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm", params=None)
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
rt_mod.set_input(input_data=in_data)
rt_mod.set_input(**params)
rt_mod.run()
res = rt_mod.get_output(0).numpy()
np.testing.assert_equal(res, verify_output_data)
assert res.dtype == out_dtype
def test_float32_to_uint8():
data = (
np.array([-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64])
.astype("float32")
.reshape((2, 5))
)
output = np.array([0, 1, 2, 3, 4, 251, 252, 253, 254, 255]).astype("uint8").reshape((2, 5))
quant_args = {"out_zero_point": np.int32(127), "out_scale": np.float32(0.5)}
quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype="uint8",
in_data=data,
verify_output_data=output,
)
def test_float32_to_int8():
data = (
np.array([-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64])
.astype("float32")
.reshape((2, 5))
)
output = (
np.array([-128, -127, -126, -125, -124, 123, 124, 125, 126, 127])
.astype("int8")
.reshape((2, 5))
)
quant_args = {"out_zero_point": np.int32(-1), "out_scale": np.float32(0.5)}
quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype="int8",
in_data=data,
verify_output_data=output,
)
def test_float32_to_uint16():
data = (
np.array([-6553, -6552.8, -6552.6, -6552.4, -6552.2, 6553.2, 6553.4, 6553.6, 6553.8, 6554])
.astype("float32")
.reshape((2, 5))
)
output = (
np.array([0, 1, 2, 3, 4, 65531, 65532, 65533, 65534, 65535])
.astype("uint16")
.reshape((2, 5))
)
quant_args = {"out_zero_point": np.int32(32765), "out_scale": np.float32(0.2)}
quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype="uint16",
in_data=data,
verify_output_data=output,
)
def test_scalar_float32_to_int8():
data = np.array(-63.5).astype("float32")
output = np.array(-128).astype("int8")
quant_args = {"out_zero_point": np.int32(-1), "out_scale": np.float32(0.5)}
quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype="int8",
in_data=data,
verify_output_data=output,
)
def test_channelwise_axis_0():
data = (
np.array([-63.5, -63, -62.5, -62, -61.5, 30, 31, 31.5, 31.75, 32])
.astype("float32")
.reshape((2, 5))
)
output = np.array([0, 1, 2, 3, 4, 243, 247, 249, 250, 251]).astype("uint8").reshape((2, 5))
quant_args = {
"out_zero_point": np.array([127, 123]).astype("int32"),
"out_scale": np.array([0.5, 0.25]).astype("float32"),
}
quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=0,
out_dtype="uint8",
in_data=data,
verify_output_data=output,
)
def test_channelwise_axis_1():
data = np.transpose(
np.array([-63.5, -63, -62.5, -62, -61.5, 30, 31, 31.5, 31.75, 32])
.astype("float32")
.reshape((2, 5))
)
output = np.transpose(
np.array([0, 1, 2, 3, 4, 243, 247, 249, 250, 251]).astype("uint8").reshape((2, 5))
)
quant_args = {
"out_zero_point": np.array([127, 123]).astype("int32"),
"out_scale": np.array([0.5, 0.25]).astype("float32"),
}
quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype="uint8",
in_data=data,
verify_output_data=output,
)
def test_dynamic_quantize():
x = relay.var("x", shape=(1, 2, 3, 4), dtype="float32")
scale_var = relay.var("scale", shape=(), dtype="float32")
zp_var = relay.var("zp", shape=(), dtype="int32")
q_x = relay.qnn.op.quantize(x, scale_var * scale_var, zp_var + zp_var)
tt = run_infer_type(q_x)
assert tt.checked_type == relay.TensorType((1, 2, 3, 4), "int8")
func = relay.Function([x, scale_var, zp_var], q_x)
data = np.random.uniform(size=(1, 2, 3, 4)).astype("float32")
scale = np.array(1).astype("float32")
zp = np.array(0).astype("int32")
mod = tvm.ir.IRModule.from_expr(func)
for target, dev in tvm.testing.enabled_targets():
# TODO: (electriclilies) enable AlterOpLayout when it is fixed
with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]):
lib = relay.build(mod, target=target)
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input(**{"x": data, "scale": scale, "zp": zp})
module.run()
if __name__ == "__main__":
test_float32_to_uint8()
test_float32_to_int8()
test_float32_to_uint16()
test_scalar_float32_to_int8()
test_channelwise_axis_0()
test_channelwise_axis_1()
test_dynamic_quantize()
| 6,689 | 31.318841 | 99 | py |
tvm | tvm-main/tests/python/relay/test_type_functor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay import TypeFunctor, TypeMutator, TypeVisitor
from tvm.relay.ty import (
TypeVar,
IncompleteType,
TensorType,
FuncType,
TupleType,
TypeRelation,
RefType,
GlobalTypeVar,
TypeCall,
)
from tvm.relay.adt import TypeData
def check_visit(typ):
try:
ef = TypeFunctor()
ef.visit(typ)
assert False
except NotImplementedError:
pass
ev = TypeVisitor()
ev.visit(typ)
tvm.ir.assert_structural_equal(TypeMutator().visit(typ), typ, map_free_vars=True)
def test_type_var():
tv = TypeVar("a")
check_visit(tv)
def test_incomplete_type():
it = IncompleteType()
check_visit(it)
def test_tensor_type():
tt = TensorType([])
check_visit(tt)
def test_func_type():
tv = TypeVar("tv")
tt = relay.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
ft = FuncType([tt], tt, type_params=[tv])
check_visit(ft)
def test_tuple_type():
tt = TupleType([TupleType([])])
check_visit(tt)
def test_type_relation():
func = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
attrs = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
tp = TypeVar("tp")
tf = FuncType([], TupleType([]), [], [])
tt = TensorType([1, 2, 3], "float32")
tr = TypeRelation(func, [tp, tf, tt], 2, attrs)
check_visit(tr)
def test_ref_type():
rt = RefType(TupleType([]))
check_visit(rt)
def test_global_type_var():
gtv = GlobalTypeVar("gtv")
check_visit(gtv)
def test_type_call():
tc = TypeCall(GlobalTypeVar("tf"), [TupleType([])])
check_visit(tc)
def test_type_data():
td = TypeData(GlobalTypeVar("td"), [TypeVar("tv")], [])
check_visit(td)
if __name__ == "__main__":
test_type_var()
test_incomplete_type()
test_tensor_type()
test_func_type()
test_tuple_type()
test_type_relation()
test_ref_type()
test_global_type_var()
test_type_call()
test_type_data()
| 2,826 | 22.957627 | 85 | py |
tvm | tvm-main/tests/python/relay/test_pass_lazy_gradient_init.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import relay
from tvm.relay import create_executor, transform
from tvm.relay.testing import rand, run_infer_type
import tvm.testing
from tvm.testing import assert_allclose
def test_tc():
"""Simple testcase, check that transformation typechecks."""
mod = tvm.IRModule()
shape = (20, 20)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x1 = relay.var("x1", t)
x2 = relay.var("x2", t)
# f(x1,x2) = (x1-x2)*x2
y = relay.Function([x1, x2], (x1 - x2) * x2)
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
# function input/output types should remain the same
assert mod["main"].checked_type == relay.FuncType([t, t], t)
def test_add():
"""Simple add testcase. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
# f(x) = x+x
y = relay.Function([x], x + x)
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x.numpy() + x.numpy())
def test_add_tuple():
"""Add elements of tuple. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
tensor_type = relay.TensorType(shape, dtype)
t = relay.TupleType([tensor_type, tensor_type])
x = relay.var("x", t)
# f((x1,x2)) = x1 + x2
y = relay.Function([x], relay.TupleGetItem(x, 0) + relay.TupleGetItem(x, 1))
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
mod = tvm.transform.PrintIR(show_meta_data=True)(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], tensor_type)
x = (rand(dtype, *shape), rand(dtype, *shape))
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x[0].numpy() + x[1].numpy())
def test_mult():
"""Simple multiplication testcase. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape = (15, 15)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
# f(x) = x*x
y = relay.Function([x], x * x)
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x.numpy() * x.numpy())
def test_ret_tuple():
"""Test tuple return type. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
# f(x) = (x,x)
func = relay.Function([x], relay.Tuple([x, x * relay.const(2.0)]))
func = run_infer_type(func)
mod["main"] = func
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
func = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], relay.TupleType([t, t]))
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(func)(x)
assert_allclose(y[0].numpy(), x.numpy())
assert_allclose(y[1].numpy(), x.numpy() * 2.0)
def test_add_broadcast():
"""Test adding matrices of different size. Check types and semantic equivalence."""
mod = tvm.IRModule()
shape1 = (3, 4, 1)
shape2 = (1, 5)
dtype = "float32"
t1 = relay.TensorType(shape1, dtype)
t2 = relay.TensorType(shape2, dtype)
x1 = relay.var("x1", t1)
x2 = relay.var("x2", t2)
func = relay.Function([x1, x2], x1 + x2)
func = run_infer_type(func)
mod["main"] = func
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
func = mod["main"]
x1_np = rand(dtype, *shape1).numpy()
x2_np = rand(dtype, *shape2).numpy()
expected_forward = x1_np + x2_np
expected_forward_type = relay.TensorType(expected_forward.shape, dtype)
assert mod["main"].checked_type == relay.FuncType([t1, t2], expected_forward_type)
forward = create_executor(mod=mod).evaluate(func)(x1_np, x2_np)
assert_allclose(forward.numpy(), expected_forward)
def test_reverse_ad_identity():
"""Simple test with reverse mode ad."""
# of f(x) = x
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x)
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
back_func = mod["main"]
assert mod["main"].checked_type == relay.FuncType(
[t], relay.TupleType([t, relay.TupleType([t])])
)
x = rand(dtype, *shape)
(forward), (grad,) = create_executor(mod=mod).evaluate(back_func)(x)
assert_allclose(forward.numpy(), x.numpy())
assert_allclose(grad.numpy(), np.ones_like(x.numpy()))
def test_multivar_reverse_ad():
"""Simple test with multivariate reverse mode ad."""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
func = relay.Function([x, y], (x * y) * relay.const(np.ones(shape, dtype)))
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
back_func = mod["main"]
assert mod["main"].checked_type == relay.FuncType(
[t, t], relay.TupleType([t, relay.TupleType([t, t])])
)
x = rand(dtype, *shape)
y = rand(dtype, *shape)
(forward), (grad_x, grad_y,) = create_executor(mod=mod).evaluate(
back_func
)(x, y)
assert_allclose(forward.numpy(), x.numpy() * y.numpy())
assert_allclose(grad_x.numpy(), y.numpy())
assert_allclose(grad_y.numpy(), x.numpy())
def test_partial_eval():
"""Test transformation following reverse mode ad and PartialEval"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
func = relay.Function([], relay.const(np.ones(shape, dtype)))
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
mod = transform.InferType()(mod)
back_func = mod["main"]
transform.PartialEvaluate()(mod)
def test_after_partial_eval():
"""Test transformation following reverse mode ad and PartialEval"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
func = relay.Function([x, y], (x * y) * relay.const(np.ones(shape, dtype)))
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
back_func = mod["main"]
seq = tvm.transform.Sequential(
[
transform.PartialEvaluate(),
transform.InferType(),
transform.LazyGradientInit(),
transform.InferType(),
transform.DeadCodeElimination(),
transform.InferType(),
]
)
mod = seq(mod)
assert mod["main"].checked_type == relay.FuncType(
[t, t], relay.TupleType([t, relay.TupleType([t, t])])
)
x = rand(dtype, *shape)
y = rand(dtype, *shape)
(forward), (grad_x, grad_y,) = create_executor(mod=mod).evaluate(
back_func
)(x, y)
assert_allclose(forward.numpy(), x.numpy() * y.numpy())
assert_allclose(grad_x.numpy(), y.numpy())
assert_allclose(grad_y.numpy(), x.numpy())
def test_before_partial_eval():
"""Test transformation before PartialEval"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
func = relay.Function([x, y], x * y)
func = run_infer_type(func)
back_func = transform.gradient(func)
back_func = run_infer_type(back_func)
mod["main"] = back_func
seq = tvm.transform.Sequential(
[
transform.LazyGradientInit(),
transform.PartialEvaluate(),
transform.InferType(),
transform.DeadCodeElimination(),
transform.InferType(),
]
)
mod = seq(mod)
back_func = mod["main"]
assert mod["main"].checked_type == relay.FuncType(
[t, t], relay.TupleType([t, relay.TupleType([t, t])])
)
x = rand(dtype, *shape)
y = rand(dtype, *shape)
(forward), (grad_x, grad_y,) = create_executor(mod=mod).evaluate(
back_func
)(x, y)
assert_allclose(forward.numpy(), x.numpy() * y.numpy())
assert_allclose(grad_x.numpy(), y.numpy())
assert_allclose(grad_y.numpy(), x.numpy())
def test_zeros():
"""Simple test using "zeros" op"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x + relay.zeros(shape, dtype))
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x.numpy())
def test_ones():
"""Simple test using "ones" op"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x + relay.ones(shape, dtype))
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x.numpy() + np.ones_like(x.numpy()))
def test_zeros_like():
"""Simple test using "zeros_like" op"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x + relay.zeros_like(x))
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x.numpy())
def test_ones_like():
"""Simple test using "ones_like" op"""
mod = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.Function([x], x + relay.ones_like(x))
mod["main"] = y
mod = transform.InferType()(mod)
mod = transform.LazyGradientInit()(mod)
y = mod["main"]
assert mod["main"].checked_type == relay.FuncType([t], t)
x = rand(dtype, *shape)
y = create_executor(mod=mod).evaluate(y)(x)
assert_allclose(y.numpy(), x.numpy() + np.ones_like(x.numpy()))
if __name__ == "__main__":
tvm.testing.main()
| 12,419 | 26.66147 | 87 | py |
tvm | tvm-main/tests/python/relay/test_pass_annotate_target.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for annotating external targets."""
import os
import sys
import numpy as np
import pytest
import tvm
import tvm.relay.testing
import tvm.relay.transform as transform
from tvm import relay
from tvm import runtime
from tvm.contrib import utils
def check_result(
mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu(), params=None
):
if sys.platform == "win32":
print("Skip test on Windows for now")
return
def update_lib(lib):
test_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
source_dir = os.path.join(test_dir, "..", "..", "..")
contrib_path = os.path.join(source_dir, "src", "runtime", "contrib")
kwargs = {}
kwargs["options"] = ["-O2", "-std=c++17", "-I" + contrib_path]
tmp_path = utils.tempdir()
lib_name = "lib.so"
lib_path = tmp_path.relpath(lib_name)
lib.export_library(lib_path, fcompile=False, **kwargs)
lib = runtime.load_module(lib_path)
return lib
def check_vm_result():
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
exe = relay.vm.compile(mod, target=target, params=params)
code, lib = exe.save()
lib = update_lib(lib)
exe = runtime.vm.Executable.load_exec(code, lib)
vm = runtime.vm.VirtualMachine(exe, device)
out = vm.run(**map_inputs)
tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol)
def check_graph_executor_result():
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
json, lib, param = relay.build(mod, target=target, params=params)
lib = update_lib(lib)
rt_mod = tvm.contrib.graph_executor.create(json, lib, device)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**param)
rt_mod.run()
out = tvm.nd.empty(out_shape, device=device)
out = rt_mod.get_output(0, out)
tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol)
check_vm_result()
check_graph_executor_result()
def test_extern_dnnl():
def annotated(dtype, ishape, w1shape):
data = relay.var("data", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight1", shape=(w1shape), dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data, weight1], out)
mod = tvm.IRModule.from_expr(f)
return mod
def expected(dtype, ishape, w1shape):
data = relay.var("data", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight1", shape=(w1shape), dtype=dtype)
begin0 = relay.annotation.compiler_begin(data, "dnnl")
begin1 = relay.annotation.compiler_begin(weight1, "dnnl")
depthwise_conv2d_1 = relay.nn.conv2d(
begin0, begin1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
end0 = relay.annotation.compiler_end(depthwise_conv2d_1, "dnnl")
end1 = relay.annotation.compiler_end(depthwise_conv2d_1, "dnnl")
begin2 = relay.annotation.compiler_begin(end1, "dnnl")
begin3 = relay.annotation.compiler_begin(end0, "dnnl")
begin4 = relay.annotation.compiler_begin(weight1, "dnnl")
depthwise_conv2d_2 = relay.nn.conv2d(
begin3, begin4, kernel_size=(3, 3), padding=(1, 1), groups=32
)
end2 = relay.annotation.compiler_end(depthwise_conv2d_2, "dnnl")
begin5 = relay.annotation.compiler_begin(end2, "dnnl")
out = relay.add(begin2, begin5)
end3 = relay.annotation.compiler_end(out, "dnnl")
f = relay.Function([data, weight1], end3)
mod = tvm.IRModule.from_expr(f)
return mod
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
def test_annotate():
mod = annotated(dtype, ishape, w1shape)
mod = transform.AnnotateTarget("dnnl")(mod)
mod = relay.transform.InferType()(mod)
ref_mod = expected(dtype, ishape, w1shape)
ref_mod = relay.transform.InferType()(ref_mod)
tvm.ir.assert_structural_equal(mod, ref_mod)
def test_run():
if not tvm.get_global_func("relay.ext.dnnl", True):
print("skip because DNNL codegen is not available")
return
ref_mod = annotated(dtype, ishape, w1shape)
mod = annotated(dtype, ishape, w1shape)
mod = transform.PartitionGraph()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(
i_data, w1_data
)
check_result(
mod, {"data": i_data, "weight1": w1_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5
)
test_annotate()
test_run()
@pytest.mark.skip(reason="fix constant node before opening this case")
def test_extern_dnnl_mobilenet():
if not tvm.get_global_func("relay.ext.dnnl", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 3, 224, 224)
mod, params = relay.testing.mobilenet.get_workload(batch_size=1, dtype="float32")
mod["main"] = relay.build_module.bind_params_by_name(mod["main"], params)
mod = transform.AnnotateTarget("dnnl")(mod)
mod = transform.PartitionGraph()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
ref_mod, params = relay.testing.mobilenet.get_workload(batch_size=1, dtype="float32")
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu(0)).evaluate()(
i_data, **params
)
check_result(mod, {"data": i_data}, (1, 1000), ref_res.numpy(), tol=1e-5, params=params)
def test_multiple_ends():
@tvm.ir.register_op_attr("nn.relu", "target.test")
def relu(expr): # pylint: disable=unused-variable
return True
def before():
x = relay.var("x", shape=(10, 10))
r = relay.nn.relu(x)
a_1 = relay.abs(r)
a_2 = relay.abs(r)
out = relay.add(a_1, a_2)
f = relay.Function([x], out)
mod = tvm.IRModule.from_expr(f)
return mod
def after():
x = relay.var("x", shape=(10, 10))
cb_1 = relay.annotation.compiler_begin(x, "test")
r = relay.nn.relu(cb_1)
ce_1 = relay.annotation.compiler_end(r, "test")
ce_2 = relay.annotation.compiler_end(r, "test")
cb_2 = relay.annotation.compiler_begin(ce_1, "default")
cb_3 = relay.annotation.compiler_begin(ce_2, "default")
a_1 = relay.abs(cb_2)
a_2 = relay.abs(cb_3)
ce_3 = relay.annotation.compiler_end(a_1, "default")
ce_4 = relay.annotation.compiler_end(a_2, "default")
cb_4 = relay.annotation.compiler_begin(ce_3, "default")
cb_5 = relay.annotation.compiler_begin(ce_4, "default")
out = relay.add(cb_4, cb_5)
ce_6 = relay.annotation.compiler_end(out, "default")
f = relay.Function([x], ce_6)
mod = tvm.IRModule.from_expr(f)
return mod
for annotate_non_call_ops in [False, True]:
result = transform.AnnotateTarget("test", annotate_non_call_ops)(before())
expected = transform.InferType()(after())
assert tvm.ir.structural_equal(expected, result)
def test_type_propagation():
target = "test_type_propagation"
@tvm.ir.register_op_attr("nn.relu", "target." + target)
def relu(expr): # pylint: disable=unused-variable
return expr.args[0].checked_type.dtype == "float32"
def before():
x = relay.var("x", shape=(10, 10))
r = relay.nn.relu(x)
out = relay.nn.relu(r)
f = relay.Function([x], out)
mod = tvm.IRModule.from_expr(f)
return mod
for annotate_non_call_ops in [False, True]:
# If the type isn't propogated, then the relu checker function will fail to get the dtype.
assert transform.AnnotateTarget(target, annotate_non_call_ops)(before())
def test_ref_create_read_write():
target = "relu"
@tvm.ir.register_op_attr("nn.relu", "target." + target)
def annotate(expr):
return True
def before():
ref = relay.expr.RefCreate(relay.const(1.0))
r = relay.expr.RefWrite(ref, relay.nn.relu(relay.expr.RefRead(ref)))
return tvm.IRModule.from_expr(r)
def after(annotate_non_call_ops):
co = relay.const(1.0)
if annotate_non_call_ops:
co = relay.annotation.compiler_begin(co, "default")
ref = relay.expr.RefCreate(co)
ref1 = ref
if annotate_non_call_ops:
ref = relay.annotation.compiler_end(ref, "default")
ref = relay.annotation.compiler_begin(ref, "default")
ref1 = relay.annotation.compiler_end(ref1, "default")
ref1 = relay.annotation.compiler_begin(ref1, "default")
read = relay.expr.RefRead(ref1)
if annotate_non_call_ops:
read = relay.annotation.compiler_end(read, "default")
beg = relay.annotation.compiler_begin(read, target)
relu = relay.nn.relu(beg)
end = relay.annotation.compiler_end(relu, target)
if annotate_non_call_ops:
end = relay.annotation.compiler_begin(end, "default")
r = relay.expr.RefWrite(ref, end)
if annotate_non_call_ops:
r = relay.annotation.compiler_end(r, "default")
return tvm.IRModule.from_expr(r)
for annotate_non_call_ops in [True, False, True]:
result = transform.AnnotateTarget(target, annotate_non_call_ops)(before())
expected = transform.InferType()(after(annotate_non_call_ops))
assert tvm.ir.structural_equal(expected, result)
def test_tuple():
target = "test_tuple"
@tvm.ir.register_op_attr("nn.relu", "target." + target)
def relu(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("concatenate", "target." + target)
def concatenate(expr): # pylint: disable=unused-variable
return True
"""Test that TupleNode is included in annotation when surrounded by supported nodes."""
def before():
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(10, 5))
a_1 = relay.nn.relu(x)
a_2 = relay.nn.relu(y)
out = relay.concatenate((a_1, a_2), axis=1)
f = relay.Function([x, y], out)
mod = tvm.IRModule.from_expr(f)
return mod
def after(annotate_non_call_ops):
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(10, 5))
cb_1 = relay.annotation.compiler_begin(x, target)
cb_2 = relay.annotation.compiler_begin(y, target)
a_1 = relay.nn.relu(cb_1)
a_2 = relay.nn.relu(cb_2)
ce_1 = relay.annotation.compiler_end(a_1, target)
ce_2 = relay.annotation.compiler_end(a_2, target)
if annotate_non_call_ops:
cb_3 = relay.annotation.compiler_begin(ce_1, target)
cb_4 = relay.annotation.compiler_begin(ce_2, target)
tup = relay.Tuple([cb_3, cb_4])
ce_3 = relay.annotation.compiler_end(tup, target)
else:
ce_3 = relay.Tuple([ce_1, ce_2])
cb_3 = relay.annotation.compiler_begin(ce_3, target)
out = relay.op._make.concatenate(cb_3, 1)
ce_4 = relay.annotation.compiler_end(out, target)
f = relay.Function([x, y], ce_4)
mod = tvm.IRModule.from_expr(f)
return mod
for annotate_non_call_ops in [False, True]:
result = transform.AnnotateTarget(target, annotate_non_call_ops)(before())
expected = transform.InferType()(after(annotate_non_call_ops))
assert tvm.ir.structural_equal(expected, result)
def test_composite_function():
def before():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
# add_relu function
in_1 = relay.var("in_1", shape=(10, 10))
in_2 = relay.var("in_2", shape=(10, 10))
add_node = relay.add(in_1, in_2)
relu_node = relay.nn.relu(add_node)
add_relu = relay.Function([in_1, in_2], relu_node)
add_relu = add_relu.with_attr("Composite", "test.add_relu")
# merged function
r = relay.Call(add_relu, [a, b])
f = relay.Function([a, b], r)
mod = tvm.IRModule.from_expr(f)
return mod
def after():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
# add_relu function
in_1 = relay.var("in_1", shape=(10, 10))
in_2 = relay.var("in_2", shape=(10, 10))
add_node = relay.add(in_1, in_2)
relu_node = relay.nn.relu(add_node)
add_relu = relay.Function([in_1, in_2], relu_node)
add_relu = add_relu.with_attr("Composite", "test.add_relu")
# merged function
cb_1 = relay.annotation.compiler_begin(a, "test")
cb_2 = relay.annotation.compiler_begin(b, "test")
r = relay.Call(add_relu, [cb_1, cb_2])
ce_1 = relay.annotation.compiler_end(r, "test")
f = relay.Function([a, b], ce_1)
mod = tvm.IRModule.from_expr(f)
return mod
result = transform.AnnotateTarget("test")(before())
expected = transform.InferType()(after())
assert tvm.ir.structural_equal(expected, result)
def test_double_target():
@tvm.ir.register_op_attr("nn.relu", "target.double.A")
def relu(expr): # pylint: disable=unused-variable
return True
def before():
x = relay.var("x", shape=(10, 5))
a_1 = relay.nn.relu(x)
mod = tvm.IRModule.from_expr(a_1)
return mod
for annotate_non_call_ops in [True, False]:
mod = before()
mod1 = transform.AnnotateTarget("double.A", annotate_non_call_ops)(mod)
mod2 = transform.AnnotateTarget("double.A", annotate_non_call_ops)(mod1)
assert tvm.ir.structural_equal(mod1, mod2)
def test_different_targets():
@tvm.ir.register_op_attr("nn.relu", "target.different.A")
def relu(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("add", "target.different.B")
def relu(expr): # pylint: disable=unused-variable
return True
def before():
x = relay.var("x", shape=(10, 5))
a_1 = relay.nn.relu(x)
b_1 = relay.add(a_1, a_1)
mod = tvm.IRModule.from_expr(b_1)
return mod
for annotate_non_call_ops in [True, False]:
mod = before()
mod1 = transform.AnnotateTarget("different.A", annotate_non_call_ops)(mod)
mod1 = transform.AnnotateTarget("different.B", annotate_non_call_ops)(mod1)
mod2 = transform.AnnotateTarget(["different.A", "different.B"], annotate_non_call_ops)(mod)
assert tvm.ir.structural_equal(mod1, mod2)
def test_multiple_runs():
@tvm.ir.register_op_attr("nn.relu", "target.A")
def relu(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("add", "target.B")
def add(expr): # pylint: disable=unused-variable
return True
def before():
x = relay.var("x", shape=(10, 5))
a_1 = relay.nn.relu(x)
a_2 = relay.abs(a_1)
a_3 = relay.nn.relu(a_1)
out = relay.add(a_2, a_3)
f = relay.Function([x], out)
mod = tvm.IRModule.from_expr(f)
return mod
for annotate_non_call_ops in [True, False]:
mod = transform.AnnotateTarget("A", annotate_non_call_ops)(before())
mod = transform.AnnotateTarget("B", annotate_non_call_ops)(mod)
expected = transform.AnnotateTarget(["A", "B"], annotate_non_call_ops)(before())
assert tvm.ir.structural_equal(expected, mod)
def test_ends_with_tuple():
trgt = "clip"
@tvm.ir.register_op_attr("clip", "target." + trgt)
def relu(expr): # pylint: disable=unused-variable
return True
def get_model(get_item):
"""Return a model"""
a = relay.var("a", shape=(1, 16, 16, 4), dtype="uint8")
z = relay.op.clip(a, 0, 255)
b = relay.op.clip(z, 0, 15)
c = relay.op.clip(z, 16, 31)
t = relay.Tuple((c, b))
tgi = relay.TupleGetItem(t, 1) if get_item else t
foo = relay.Function([a], tgi)
return tvm.IRModule.from_expr(tgi)
def get_expected(annotate_non_call_ops, get_item):
a_ = relay.var("a", shape=(1, 16, 16, 4), dtype="uint8")
a = relay.annotation.compiler_begin(a_, trgt)
z = relay.op.clip(a, 0, 255)
z1 = relay.annotation.compiler_end(z, trgt)
z1 = relay.annotation.compiler_begin(z1, trgt)
b = relay.op.clip(z1, 0, 15)
b = relay.annotation.compiler_end(b, trgt)
b = relay.annotation.compiler_begin(b, trgt) if annotate_non_call_ops else b
z2 = relay.annotation.compiler_end(z, trgt)
z2 = relay.annotation.compiler_begin(z2, trgt)
c = relay.op.clip(z2, 16, 31)
c = relay.annotation.compiler_end(c, trgt)
c = relay.annotation.compiler_begin(c, trgt) if annotate_non_call_ops else c
t = relay.Tuple((c, b))
t = relay.annotation.compiler_end(t, trgt) if annotate_non_call_ops else t
if get_item:
t = relay.annotation.compiler_begin(t, trgt) if annotate_non_call_ops else t
tgi = relay.TupleGetItem(t, 1)
tgi = relay.annotation.compiler_end(tgi, trgt) if annotate_non_call_ops else tgi
else:
tgi = t
foo = relay.Function([a_], tgi)
return tvm.IRModule.from_expr(foo)
for get_item in [True, False]:
for annotate_non_call_ops in [False, True]:
mod = get_model(get_item)
mod = transform.AnnotateTarget("clip", annotate_non_call_ops)(mod)
expected = transform.InferType()(get_expected(annotate_non_call_ops, get_item))
assert tvm.ir.structural_equal(expected, mod)
def test_if_else():
target = "test_if_else"
@tvm.ir.register_op_attr("equal", "target." + target)
def relu(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("tanh", "target." + target)
def tanh(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("sigmoid", "target." + target)
def sigmoid(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("erf", "target." + target)
def erf(expr): # pylint: disable=unused-variable
return True
"""Test that If-else nodes compiles correctly when surrounded by supported nodes."""
def before():
data = relay.var("data", shape=(1, 32))
eq1 = relay.var("e1", shape=[], dtype="float32")
eq2 = relay.var("e2", shape=[], dtype="float32")
eq = relay.equal(eq1, eq2)
true_branch = relay.tanh(data)
false_branch = relay.sigmoid(data)
ife = relay.If(eq, true_branch, false_branch)
out = relay.erf(ife)
func = relay.Function([data, eq1, eq2], out)
mod = tvm.IRModule.from_expr(func)
return mod
def after():
data = relay.var("data", shape=(1, 32))
eq1 = relay.var("e1", shape=[], dtype="float32")
eq2 = relay.var("e2", shape=[], dtype="float32")
cb_1 = relay.annotation.compiler_begin(eq1, target)
cb_2 = relay.annotation.compiler_begin(eq2, target)
equality_condition = relay.equal(cb_1, cb_2)
ce_1 = relay.annotation.compiler_end(equality_condition, target)
# if condition
cb_3 = relay.annotation.compiler_begin(data, target)
true_branch = relay.tanh(cb_3)
ce_2 = relay.annotation.compiler_end(true_branch, target)
# else condition
cb_4 = relay.annotation.compiler_begin(data, target)
false_branch = relay.sigmoid(cb_4)
ce_3 = relay.annotation.compiler_end(false_branch, target)
if_condition = relay.If(ce_1, ce_2, ce_3)
cb_5 = relay.annotation.compiler_begin(if_condition, target)
erf_out = relay.erf(cb_5)
ce_4 = relay.annotation.compiler_end(erf_out, target)
func = relay.Function([data, eq1, eq2], ce_4)
mod = tvm.IRModule.from_expr(func)
return mod
expected = transform.InferType()(after())
for annotate_non_call_ops in [True, False]:
result = transform.AnnotateTarget(target, annotate_non_call_ops)(before())
assert tvm.ir.structural_equal(expected, result)
def test_while_let():
target = "test_while_let"
@tvm.ir.register_op_attr("less", "target." + target)
def less(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("add", "target." + target)
def add(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("zeros_like", "target." + target)
def zeros_like(expr): # pylint: disable=unused-variable
return True
"""Test that let nodes compiles correctly when surrounded by other nodes."""
def before():
var1 = relay.var("var1", shape=(2,))
var2 = relay.var("var2", shape=(), dtype="int32")
var3 = relay.var("var3", shape=(2,))
cond = relay.less(var2, relay.const(10, dtype="int32"))
loop = relay.var("while_loop")
ii = var2 + relay.const(1, dtype="int32")
ss = var3 + var1
true_branch = loop(ii, ss)
ife = relay.If(cond, true_branch, var3)
func_1 = relay.Function([var2, var3], ife)
ret = relay.Let(loop, func_1, loop(relay.const(0, dtype="int32"), relay.zeros_like(var1)))
func_2 = relay.Function([var1], ret)
mod = tvm.IRModule.from_expr(func_2)
return mod
def after(annotate_non_call_ops):
var1 = relay.var("var1", shape=(2,))
var2 = relay.var("var2", shape=(), dtype="int32")
var3 = relay.var("var3", shape=(2,))
var4 = relay.const(10, dtype="int32")
cb_1 = relay.annotation.compiler_begin(var2, target)
cb_2 = relay.annotation.compiler_begin(var4, target)
less_condition = relay.less(cb_1, cb_2)
ce_1 = relay.annotation.compiler_end(less_condition, target)
loop = relay.var("while_loop")
# if condition
cb_3 = relay.annotation.compiler_begin(var2, target)
cb_4 = relay.annotation.compiler_begin(relay.const(1, dtype="int32"), target)
add_op_1 = relay.add(cb_3, cb_4)
ce_2 = relay.annotation.compiler_end(add_op_1, target)
cb_5 = relay.annotation.compiler_begin(ce_2, "default") if annotate_non_call_ops else ce_2
cb_6 = relay.annotation.compiler_begin(var3, target)
cb_7 = relay.annotation.compiler_begin(var1, target)
add_op_2 = relay.add(cb_6, cb_7)
ce_3 = relay.annotation.compiler_end(add_op_2, target)
cb_8 = relay.annotation.compiler_begin(ce_3, "default") if annotate_non_call_ops else ce_3
true_branch = loop(cb_5, cb_8) # while loop
ce_4 = (
relay.annotation.compiler_end(true_branch, "default")
if annotate_non_call_ops
else true_branch
)
if_condition = relay.If(ce_1, ce_4, var3)
const_1 = relay.const(0, dtype="int32")
cb_9 = (
relay.annotation.compiler_begin(const_1, "default")
if annotate_non_call_ops
else const_1
)
cb_10 = relay.annotation.compiler_begin(var1, target)
zeros_like = relay.zeros_like(cb_10)
ce_5 = relay.annotation.compiler_end(zeros_like, target)
cb_11 = relay.annotation.compiler_begin(ce_5, "default") if annotate_non_call_ops else ce_5
while_condition = loop(cb_9, cb_11)
ce_6 = (
relay.annotation.compiler_end(while_condition, "default")
if annotate_non_call_ops
else while_condition
)
func_1 = relay.Function([var2, var3], if_condition)
ret = relay.Let(loop, func_1, ce_6)
func_2 = relay.Function([var1], ret)
mod = tvm.IRModule.from_expr(func_2)
return mod
for annotate_non_call_ops in [False, True]:
result = transform.AnnotateTarget(target, annotate_non_call_ops)(before())
expected = transform.InferType()(after(annotate_non_call_ops))
assert tvm.ir.structural_equal(expected, result)
def test_if_free_vars():
target = "test_if_free_vars"
@tvm.ir.register_op_attr("equal", "target." + target)
def equal(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("sigmoid", "target." + target)
def sigmoid(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("erf", "target." + target)
def erf(expr): # pylint: disable=unused-variable
return True
"""Test that If-else nodes compiles correctly when surrounded by free variables"""
def before():
data = relay.var("data", shape=(1, 32))
eq1 = relay.var("e1", shape=[], dtype="float32")
eq2 = relay.var("e2", shape=[], dtype="float32")
eq = relay.equal(eq1, eq2)
true_branch = relay.zeros(shape=(1, 32), dtype="float32")
false_branch = relay.sigmoid(data)
ife = relay.If(eq, true_branch, false_branch)
out = relay.erf(ife)
func = relay.Function([data, eq1, eq2], out)
mod = tvm.IRModule.from_expr(func)
return mod
def after():
data = relay.var("data", shape=(1, 32))
eq1 = relay.var("e1", shape=[], dtype="float32")
eq2 = relay.var("e2", shape=[], dtype="float32")
cb_1 = relay.annotation.compiler_begin(eq1, target)
cb_2 = relay.annotation.compiler_begin(eq2, target)
equality_condition = relay.equal(cb_1, cb_2)
ce_1 = relay.annotation.compiler_end(equality_condition, target)
# if condition
true_branch = relay.zeros(shape=(1, 32), dtype="float32")
# else condition
cb_3 = relay.annotation.compiler_begin(data, target)
false_branch = relay.sigmoid(cb_3)
ce_2 = relay.annotation.compiler_end(false_branch, target)
if_condition = relay.If(ce_1, true_branch, ce_2)
cb_4 = relay.annotation.compiler_begin(if_condition, target)
erf_out = relay.erf(cb_4)
ce_3 = relay.annotation.compiler_end(erf_out, target)
func = relay.Function([data, eq1, eq2], ce_3)
mod = tvm.IRModule.from_expr(func)
return mod
for annotate_non_call_ops in [True, False]:
result = transform.AnnotateTarget(target, annotate_non_call_ops)(before())
expected = transform.InferType()(after())
assert tvm.ir.structural_equal(expected, result)
def test_free_vars_zeros():
target = "test_free_vars_zeros"
"""Test that free variables compile correctly on their own"""
def before():
func = relay.Function([], relay.zeros(shape=(0), dtype="float32"))
mod = tvm.IRModule.from_expr(func)
return mod
def after():
func = relay.Function([], relay.zeros(shape=(0), dtype="float32"))
mod = tvm.IRModule.from_expr(func)
return mod
result = transform.AnnotateTarget(target)(before())
expected = transform.InferType()(after())
assert tvm.ir.structural_equal(expected, result)
def test_empty_tuple():
target = "test_empty_tuple"
"""An empty tuple should behave just like a call with no args (see above test)."""
def before():
func = relay.Function([], relay.Tuple([]))
mod = tvm.IRModule.from_expr(func)
return mod
def after():
func = relay.Function([], relay.Tuple([]))
mod = tvm.IRModule.from_expr(func)
return mod
for annotate_non_call_ops in [True, False]:
result = transform.AnnotateTarget(target, annotate_non_call_ops)(before())
expected = transform.InferType()(after())
assert tvm.ir.structural_equal(expected, result)
if __name__ == "__main__":
test_extern_dnnl()
test_composite_function()
# test_extern_dnnl_mobilenet()
test_multiple_ends()
test_type_propagation()
test_tuple()
test_multiple_runs()
test_if_else()
test_while_let()
test_if_free_vars()
test_free_vars_zeros()
test_different_targets()
test_double_target()
test_ends_with_tuple()
test_ref_create_read_write()
test_empty_tuple()
| 29,563 | 35.634449 | 99 | py |
tvm | tvm-main/tests/python/relay/test_pass_manager.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for relay pass manager."""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.relay import ExprFunctor
from tvm.relay import Function, Call
from tvm.relay import analysis
from tvm.relay import transform as _transform
from tvm.ir import instrument as _instrument
from tvm.relay.testing import run_infer_type
import tvm.testing
def get_var_func():
shape = (5, 10)
tp = relay.TensorType(shape, "float32")
x = relay.var("x", tp)
gv = relay.GlobalVar("myAbs")
func = relay.Function([x], relay.abs(x))
return gv, func
def extract_var_func(mod, name):
var = mod.get_global_var(name)
func = mod[var]
return var, func
def update_func(func):
# Double the value of Constants and vars.
class DoubleValues(ExprFunctor):
def __init__(self):
ExprFunctor.__init__(self)
def visit_constant(self, const):
return relay.add(const, const)
def visit_var(self, var):
return relay.add(var, var)
def visit_call(self, call):
new_op = self.visit(call.op)
new_args = [self.visit(arg) for arg in call.args]
return Call(new_op, new_args, call.attrs)
def visit_global_var(self, gvar):
return gvar
def visit_op(self, op):
return op
def visit_function(self, fn):
new_body = self.visit(fn.body)
return Function(list(fn.params), new_body, fn.ret_type, fn.type_params, fn.attrs)
double_value = DoubleValues()
return double_value.visit(func)
class OptTester:
"""A helper class for testing the pass manager."""
def __init__(self, mod):
if not isinstance(mod, tvm.IRModule):
raise TypeError("mod is expected to be the type of " "tvm.IRModule")
self.mod = mod
def analysis(self):
"""Perform analysis for the current module."""
pass
@staticmethod
def transform(node, ctx=None):
"""Perform optimization on node."""
if isinstance(node, tvm.IRModule):
# Add a function to the module and return an updated module.
gv, func = get_var_func()
mod = tvm.IRModule({gv: func})
mod.update(node)
return mod
if isinstance(node, relay.Function):
return update_func(node)
raise TypeError("Found not supported node type.")
def get_rand(shape, dtype="float32"):
return tvm.nd.array(np.random.rand(*shape).astype(dtype))
def check_func(func, ref_func):
func = run_infer_type(func)
ref_func = run_infer_type(ref_func)
assert tvm.ir.structural_equal(func, ref_func)
@tvm.testing.uses_gpu
def test_module_pass():
shape = (5, 10)
dtype = "float32"
tp = relay.TensorType(shape, dtype)
x = relay.var("x", tp)
y = relay.var("y", tp)
v_add = relay.GlobalVar("myAdd")
func = relay.Function([x, y], x + y)
mod = tvm.IRModule({v_add: func})
pass_name = "module_pass_test"
opt_level = 0
opt_tester = OptTester(mod)
pass_ctx = None
@tvm.transform.module_pass(opt_level=opt_level, name=pass_name)
def transform(expr, ctx):
return opt_tester.transform(expr, ctx)
def test_pass_registration():
mod_pass = transform
assert isinstance(mod_pass, tvm.transform.ModulePass)
pass_info = mod_pass.info
assert pass_info.name == pass_name
assert pass_info.opt_level == opt_level
def test_pass_registration_no_decorator():
def direct_transform(expr, ctx):
return opt_tester.transform(expr, ctx)
mod_pass = tvm.transform.module_pass(direct_transform, opt_level=3)
assert isinstance(mod_pass, tvm.transform.ModulePass)
pass_info = mod_pass.info
assert pass_info.name == "direct_transform"
assert pass_info.opt_level == 3
def test_pass_run():
module_pass = transform
assert pass_name in str(module_pass)
updated_mod = module_pass(mod)
assert isinstance(updated_mod, tvm.IRModule)
# Check the abs function in the updated module.
v_abs, myabs = get_var_func()
new_v_add = updated_mod.get_global_var(v_abs.name_hint)
new_abs = updated_mod[new_v_add]
check_func(new_abs, myabs)
# Check the add function in the updated module.
v_abs, myabs = get_var_func()
new_v_add = updated_mod.get_global_var(v_add.name_hint)
new_add = updated_mod[new_v_add]
check_func(new_add, func)
# Check the add function in the python transformed module.
ret = opt_tester.transform(mod, pass_ctx)
transformed_v_add = ret.get_global_var(v_add.name_hint)
transformed_add = mod[transformed_v_add]
check_func(new_add, transformed_add)
# Execute the add function.
x_nd = get_rand(shape, dtype)
y_nd = get_rand(shape, dtype)
ref_res = x_nd.numpy() + y_nd.numpy()
for target, dev in tvm.testing.enabled_targets():
res1 = relay.create_executor("graph", device=dev, target=target).evaluate(new_add)(
x_nd, y_nd
)
tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5)
res2 = relay.create_executor("debug", device=dev, target=target).evaluate(new_add)(
x_nd, y_nd
)
tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5)
test_pass_registration()
test_pass_registration_no_decorator
test_pass_run()
def test_function_class_pass():
@relay.transform.function_pass(opt_level=1)
class TestReplaceFunc:
"""Simple test function to replace one argument to another."""
def __init__(self, new_func):
self.new_func = new_func
def transform_function(self, func, mod, ctx):
return self.new_func
x = relay.var("x", shape=(10, 20))
f1 = relay.Function([x], x)
f2 = relay.Function([x], relay.log(x))
fpass = TestReplaceFunc(f1)
assert fpass.info.opt_level == 1
assert fpass.info.name == "TestReplaceFunc"
mod = tvm.IRModule.from_expr(f2)
mod = fpass(mod)
# wrap in expr
mod2 = tvm.IRModule.from_expr(f1)
mod2 = tvm.relay.transform.InferType()(mod2)
assert tvm.ir.structural_equal(mod["main"], mod2["main"])
@tvm.testing.uses_gpu
def test_function_pass():
shape = (10,)
dtype = "float32"
tp = relay.TensorType(shape, dtype)
x = relay.var("x", tp)
v_log = relay.GlobalVar("myLog")
log = relay.Function([x], relay.log(x))
mod = tvm.IRModule({v_log: log})
pass_name = "function_pass_test"
opt_level = 1
opt_tester = OptTester(mod)
pass_ctx = None
@_transform.function_pass(opt_level=opt_level, name=pass_name)
def transform(expr, mod, ctx):
return opt_tester.transform(expr, ctx)
def get_ref_log():
ref_log = relay.Function([x], relay.log(relay.add(x, x)))
return ref_log
def test_pass_registration():
function_pass = transform
assert isinstance(function_pass, _transform.FunctionPass)
pass_info = function_pass.info
assert pass_info.name == pass_name
assert pass_info.opt_level == opt_level
def test_pass_registration_no_decorator():
def direct_transform(expr, ctx):
return opt_tester.transform(expr, ctx)
mod_pass = _transform.function_pass(direct_transform, opt_level=0)
assert isinstance(mod_pass, _transform.FunctionPass)
pass_info = mod_pass.info
assert pass_info.name == "direct_transform"
assert pass_info.opt_level == 0
def test_pass_run():
function_pass = transform
assert pass_name in str(function_pass)
updated_mod = function_pass(mod)
assert isinstance(updated_mod, tvm.IRModule)
# Check the log function in the updated module.
new_v_log = updated_mod.get_global_var(v_log.name_hint)
new_log = updated_mod[new_v_log]
check_func(new_log, get_ref_log())
# Check the log function in the python transformed function.
ret = opt_tester.transform(log, pass_ctx)
check_func(new_log, ret)
# Execute the add function.
x_nd = get_rand(shape, dtype)
ref_res = np.log(x_nd.numpy() * 2)
for target, dev in tvm.testing.enabled_targets():
res1 = relay.create_executor("graph", device=dev, target=target).evaluate(new_log)(x_nd)
tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5)
res2 = relay.create_executor("debug", device=dev, target=target).evaluate(new_log)(x_nd)
tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5)
test_pass_registration()
test_pass_registration_no_decorator()
test_pass_run()
def test_module_class_pass():
@tvm.transform.module_pass(opt_level=1)
class TestPipeline:
"""Simple test function to replace one argument to another."""
def __init__(self, new_mod, replace):
self.new_mod = new_mod
self.replace = replace
def transform_module(self, mod, ctx):
if self.replace:
return self.new_mod
return mod
x = relay.var("x", shape=(10, 20))
m1 = tvm.IRModule.from_expr(relay.Function([x], x))
m2 = tvm.IRModule.from_expr(relay.Function([x], relay.log(x)))
fpass = TestPipeline(m2, replace=True)
assert fpass.info.name == "TestPipeline"
mod3 = fpass(m1)
assert mod3.same_as(m2)
mod4 = TestPipeline(m2, replace=False)(m1)
assert mod4.same_as(m1)
def test_pass_info():
info = tvm.transform.PassInfo(opt_level=1, name="xyz")
assert info.opt_level == 1
assert info.name == "xyz"
@tvm.testing.uses_gpu
def test_sequential_pass():
shape = (10,)
dtype = "float32"
tp = relay.TensorType(shape, dtype)
x = relay.var("x", tp)
y = relay.var("y", tp)
v_sub = relay.GlobalVar("mySub")
sub = relay.Function([x, y], relay.subtract(x, y))
z = relay.var("z", tp)
v_log = relay.GlobalVar("myLog")
log = relay.Function([z], relay.log(z))
mod = tvm.IRModule({v_sub: sub, v_log: log})
def get_ref_log():
ref_log = relay.Function([x], relay.log(relay.add(x, x)))
return ref_log
def get_ref_sub():
ref_sub = relay.Function([x, y], relay.subtract(relay.add(x, x), relay.add(y, y)))
return ref_sub
def get_ref_abs():
shape = (5, 10)
tp = relay.TensorType(shape, "float32")
a = relay.var("a", tp)
ref_abs = relay.Function([a], relay.abs(relay.add(a, a)))
return ref_abs
# Register a module pass.
opt_tester = OptTester(mod)
pass_ctx = None
@tvm.transform.module_pass(opt_level=1)
def mod_transform(expr, ctx):
return opt_tester.transform(expr, ctx)
module_pass = mod_transform
# Register a function pass.
@_transform.function_pass(opt_level=1)
def func_transform(expr, mod, ctx):
return opt_tester.transform(expr, ctx)
function_pass = func_transform
def test_pass_registration():
passes = [module_pass, function_pass]
opt_level = 2
pass_name = "sequential"
sequential = tvm.transform.Sequential(passes=passes, opt_level=opt_level)
pass_info = sequential.info
assert pass_info.name == pass_name
assert pass_info.opt_level == opt_level
def test_no_pass():
passes = []
sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
ret_mod = sequential(mod)
mod_func = ret_mod[v_sub]
check_func(sub, mod_func)
def test_only_module_pass():
passes = [module_pass]
sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
with tvm.transform.PassContext(required_pass=["mod_transform"]):
ret_mod = sequential(mod)
# Check the subtract function.
sub_var, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
check_func(new_sub, sub)
# Check the abs function is added.
abs_var, abs_func = get_var_func()
abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
check_func(new_abs, abs_func)
def test_only_function_pass():
# Check the subtract function.
passes = [function_pass]
sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
with tvm.transform.PassContext(required_pass=["func_transform"]):
ret_mod = sequential(mod)
_, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
check_func(new_sub, get_ref_sub())
# Check the log function.
log_var, new_log = extract_var_func(ret_mod, v_log.name_hint)
check_func(new_log, get_ref_log())
def test_multiple_passes():
# Reset the current module since mod has been polluted by the previous
# function pass.
mod = tvm.IRModule({v_sub: sub, v_log: log})
passes = [module_pass, function_pass]
sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
required = ["mod_transform", "func_transform"]
with tvm.transform.PassContext(required_pass=required):
ret_mod = sequential(mod)
# Check the abs function is added.
abs_var, abs_func = get_var_func()
abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
check_func(new_abs, get_ref_abs())
# Check the subtract function is modified correctly.
_, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
check_func(new_sub, get_ref_sub())
# Check the log function is modified correctly.
_, new_log = extract_var_func(ret_mod, v_log.name_hint)
check_func(new_log, get_ref_log())
# Execute the updated subtract function.
x_nd = get_rand(shape, dtype)
y_nd = get_rand(shape, dtype)
ref_res = np.subtract(x_nd.numpy() * 2, y_nd.numpy() * 2)
for target, dev in tvm.testing.enabled_targets():
res1 = relay.create_executor("graph", device=dev, target=target).evaluate(new_sub)(
x_nd, y_nd
)
tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5)
res2 = relay.create_executor("debug", device=dev, target=target).evaluate(new_sub)(
x_nd, y_nd
)
tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5)
# Execute the updated abs function.
x_nd = get_rand((5, 10), dtype)
ref_res = np.abs(x_nd.numpy() * 2)
for target, dev in tvm.testing.enabled_targets():
res1 = relay.create_executor("graph", device=dev, target=target).evaluate(new_abs)(x_nd)
tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5)
res2 = relay.create_executor("debug", device=dev, target=target).evaluate(new_abs)(x_nd)
tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5)
test_pass_registration()
test_no_pass()
test_only_module_pass()
test_only_function_pass()
test_multiple_passes()
def test_sequential_with_scoping():
shape = (1, 2, 3)
c_data = np.array(shape).astype("float32")
tp = relay.TensorType(shape, "float32")
def before():
c = relay.const(c_data)
x = relay.var("x", tp)
y = relay.add(c, c)
y = relay.multiply(y, relay.const(2, "float32"))
y = relay.add(x, y)
z = relay.add(y, c)
z1 = relay.add(y, c)
z2 = relay.add(z, z1)
return relay.Function([x], z2)
def expected():
x = relay.var("x", tp)
c_folded = (c_data + c_data) * 2
y = relay.add(x, relay.const(c_folded))
z = relay.add(y, relay.const(c_data))
z1 = relay.add(z, z)
return relay.Function([x], z1)
seq = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
relay.transform.EliminateCommonSubexpr(),
relay.transform.AlterOpLayout(),
]
)
mod = tvm.IRModule({"main": before()})
with tvm.transform.PassContext(opt_level=3):
with tvm.target.Target("llvm"):
mod = seq(mod)
zz = mod["main"]
zexpected = run_infer_type(expected())
assert tvm.ir.structural_equal(zz, zexpected)
def test_nested_sequential_with_scoping():
def before():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
y = relay.nn.conv2d(x, w, padding=(1, 1))
y = relay.reshape(y, newshape=(1, 16, -1))
y = relay.reshape(y, newshape=(4, 8, -1, 16))
y = relay.reverse_reshape(y, newshape=(32, 0, -1))
return tvm.IRModule.from_expr(y)
def expected():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
y = relay.nn.conv2d(x, w, padding=(1, 1))
y = relay.reshape(y, newshape=(32, 16, 16))
return tvm.IRModule.from_expr(y)
z = before()
passes = [
tvm.transform.Sequential([relay.transform.SimplifyExpr()]),
]
with tvm.transform.PassContext(opt_level=1):
zz = tvm.transform.Sequential(passes)(z)
expected = relay.transform.InferType()(expected())
assert tvm.ir.structural_equal(zz, expected)
def test_print_ir(capfd):
shape = (1, 2, 3)
tp = relay.TensorType(shape, "float32")
x = relay.var("x", tp)
y = relay.add(x, x)
y = relay.multiply(y, relay.const(2, "float32"))
func = relay.Function([x], y)
seq = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
tvm.transform.PrintIR(),
relay.transform.DeadCodeElimination(),
]
)
mod = tvm.IRModule({"main": func})
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
out = capfd.readouterr().err
assert "PrintIR" in out
assert "multiply" in out
@tvm.instrument.pass_instrument
class PassCounter:
def __init__(self):
# Just setting a garbage value to test set_up callback
self.counts = 1234
def enter_pass_ctx(self):
self.counts = 0
def exit_pass_ctx(self):
self.counts = 0
def run_before_pass(self, module, info):
self.counts += 1
def get_counts(self):
return self.counts
def test_print_debug_callback():
shape = (1, 2, 3)
tp = relay.TensorType(shape, "float32")
x = relay.var("x", tp)
y = relay.add(x, x)
y = relay.multiply(y, relay.const(2, "float32"))
func = relay.Function([x], y)
seq = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
relay.transform.DeadCodeElimination(),
]
)
mod = tvm.IRModule({"main": func})
pass_counter = PassCounter()
with tvm.transform.PassContext(opt_level=3, instruments=[pass_counter]):
# Should be reseted when entering pass context
assert pass_counter.get_counts() == 0
mod = seq(mod)
# TODO(@jroesch): when we remove new fn pass behavior we need to remove
# change this back to match correct behavior
assert pass_counter.get_counts() == 6
# Should be cleanned up after exiting pass context
assert pass_counter.get_counts() == 0
if __name__ == "__main__":
tvm.testing.main()
| 20,413 | 32.032362 | 100 | py |
tvm | tvm-main/tests/python/relay/test_pass_defuse_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy
import pytest
import tvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import run_opt_pass
def test_defuse_simple():
"""Simple testcase."""
def before():
x = relay.var("x", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.exp(y)
w = relay.squeeze(z)
return relay.Function([x], w)
x = before()
x = run_opt_pass(x, transform.InferType())
fused = run_opt_pass(x, transform.FuseOps())
defused = run_opt_pass(fused, transform.DefuseOps())
assert tvm.ir.structural_equal(x, defused)
def test_inception_like():
def conv(data):
y = relay.nn.conv2d(data, relay.var("w"), kernel_size=(3, 3), padding=(1, 1), channels=16)
return relay.nn.relu(data=y)
def inception_like(data):
c0 = conv(data)
c1 = conv(data)
return relay.concatenate((c0, c1), axis=1)
def before(dshape):
x = relay.var("x", shape=dshape)
in1 = inception_like(x)
in2 = inception_like(in1)
return relay.Function(relay.analysis.free_vars(in2), in2)
dshape = (1, 16, 64, 64)
x = before(dshape)
x = run_opt_pass(x, transform.InferType())
fused = run_opt_pass(x, transform.FuseOps())
defused = run_opt_pass(fused, transform.DefuseOps())
assert tvm.ir.structural_equal(x, defused)
def test_defuse_complex():
"""Complex defuse testcase"""
def fused_conv2d_batch_norm(w):
data = relay.var("data", shape=(1, 224, 224, 3))
bn_gamma0 = relay.var("bn_gamma0", relay.TensorType((64,), "float32"))
bn_beta0 = relay.var("bn_beta0", relay.TensorType((64,), "float32"))
bn_mmean0 = relay.var("bn_mean0", relay.TensorType((64,), "float32"))
bn_mvar0 = relay.var("bn_var0", relay.TensorType((64,), "float32"))
c0 = relay.nn.conv2d(
data,
w,
strides=(2, 2),
padding=(3, 3, 3, 3),
channels=64,
kernel_size=(7, 7),
data_layout="NHWC",
kernel_layout="OHWI",
out_layout="NHWC",
)
c1 = relay.nn.batch_norm(c0, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0, axis=3)
c2 = c1[0]
return relay.Function(relay.analysis.free_vars(c2), c2)
def fused_conv2d_batch_norm_relu(z):
data2 = relay.var("data2", shape=(1, 56, 56, 64))
bn_gamma0 = relay.var("bn_gamma0", relay.TensorType((64,), "float32"))
bn_beta0 = relay.var("bn_beta0", relay.TensorType((64,), "float32"))
bn_mmean0 = relay.var("bn_mean0", relay.TensorType((64,), "float32"))
bn_mvar0 = relay.var("bn_var0", relay.TensorType((64,), "float32"))
c0 = relay.nn.conv2d(
data2,
z,
padding=(1, 1, 1, 1),
channels=64,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="OHWI",
out_layout="NHWC",
)
c1 = relay.nn.batch_norm(c0, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0, axis=3)
c2 = c1[0]
c3 = relay.nn.relu(data=c2)
return relay.Function(relay.analysis.free_vars(c3), c3)
def fused_max_pool2d():
data1 = relay.var("data1", shape=(1, 112, 112, 64))
a1 = relay.nn.max_pool2d(
data1,
pool_size=(3, 3),
strides=(2, 2),
padding=(1, 1, 1, 1),
layout="NHWC",
out_layout="NHWC",
)
return relay.Function(relay.analysis.free_vars(a1), a1)
def fused_add_relu():
data1 = relay.var("data1", shape=(1, 56, 56, 64))
data2 = relay.var("data2", shape=(1, 56, 56, 64))
a0 = relay.add(data1, data2)
a1 = relay.nn.relu(a0)
return relay.Function(relay.analysis.free_vars(a1), a1)
def before_fused(conv_layer1_weight, conv_layer2_weight):
data = relay.var("data", shape=(1, 3, 224, 224))
data1 = relay.layout_transform(data, src_layout="NCHW", dst_layout="NHWC")
bn_gamma0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
bn_beta0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
bn_mmean0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
bn_mvar0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
a0 = fused_conv2d_batch_norm(conv_layer1_weight)
a1 = fused_max_pool2d()
a2 = fused_conv2d_batch_norm_relu(conv_layer2_weight)
a3 = fused_add_relu()
y0 = relay.Call(a0, [data1, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0])
y1 = relay.Call(a1, [y0])
y2 = relay.Call(a2, [y1, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0])
y3 = relay.Call(a3, [y1, y2])
return relay.Function(relay.analysis.free_vars(y3), y3)
def golden_defused(conv_layer1_weight, conv_layer2_weight):
data = relay.var("data", shape=(1, 3, 224, 224))
data1 = relay.layout_transform(data, src_layout="NCHW", dst_layout="NHWC")
bn_gamma0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
bn_beta0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
bn_mmean0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
bn_mvar0 = relay.const(tvm.nd.array(numpy.ndarray(shape=(64,), dtype="float32")))
c0 = relay.nn.conv2d(
data1,
conv_layer1_weight,
strides=(2, 2),
padding=(3, 3, 3, 3),
channels=64,
kernel_size=(7, 7),
data_layout="NHWC",
kernel_layout="OHWI",
out_layout="NHWC",
)
c1 = relay.nn.batch_norm(c0, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0, axis=3)
c2 = c1[0]
c3 = relay.nn.max_pool2d(
c2,
pool_size=(3, 3),
strides=(2, 2),
padding=(1, 1, 1, 1),
layout="NHWC",
out_layout="NHWC",
)
c4 = relay.nn.conv2d(
c3,
conv_layer2_weight,
padding=(1, 1, 1, 1),
channels=64,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="OHWI",
out_layout="NHWC",
)
c5 = relay.nn.batch_norm(c4, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0, axis=3)
c6 = c5[0]
c7 = relay.nn.relu(c6)
c8 = relay.add(c3, c7)
c9 = relay.nn.relu(c8)
return relay.Function(relay.analysis.free_vars(c9), c9)
# creating weight constants for the two convolution layers
# in the input fused model and the golden defused model.
conv_layer1_weight = relay.nn.Constant(
tvm.nd.array(numpy.ndarray(shape=(64, 7, 7, 3), dtype="float32"))
)
conv_layer2_weight = relay.nn.Constant(
tvm.nd.array(numpy.ndarray(shape=(64, 3, 3, 64), dtype="float32"))
)
x = before_fused(conv_layer1_weight, conv_layer2_weight)
x = run_opt_pass(x, transform.InferType())
defused = run_opt_pass(x, transform.DefuseOps())
golden1 = golden_defused(conv_layer1_weight, conv_layer2_weight)
golden1 = run_opt_pass(golden1, transform.InferType())
assert tvm.ir.structural_equal(defused, golden1), (
"Actual = \n" + str(defused) + "\nGolden = \n" + str(golden1)
)
if __name__ == "__main__":
tvm.testing.main()
| 8,227 | 37.092593 | 98 | py |
tvm | tvm-main/tests/python/relay/test_recast.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
from tvm.relay.transform import recast
def test_recast_simple():
"""Recast a single convolution operator."""
def before():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
c = relay.nn.conv2d(x, w, padding=(1, 1), out_dtype="float32")
return relay.Function([x, w], c)
def expected():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
x_int = relay.cast(x, "int8")
w_int = relay.cast(w, "int8")
c = relay.nn.conv2d(x_int, w_int, padding=(1, 1), out_dtype="int32")
c_float = relay.cast(c, "float32")
return relay.Function([x, w], c_float)
pre = before()
post = recast(pre, "int8", "int32")
expected = expected()
assert tvm.ir.structural_equal(expected, post)
def test_recast_medium():
"""Recast a slightly larger graph."""
def before():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
c = relay.nn.conv2d(x, w, padding=(1, 1), out_dtype="float32")
w2 = relay.var("w2", shape=[8, 8, 3, 3])
c2 = relay.nn.conv2d(c, w2, padding=(1, 1), out_dtype="float32")
return relay.Function([x, w, w2], c2)
def expected():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
x_int = relay.cast(x, "int8")
w_int = relay.cast(w, "int8")
c = relay.nn.conv2d(x_int, w_int, padding=(1, 1), out_dtype="int32")
c_float = relay.cast(c, "float32")
w2 = relay.var("w2", shape=[8, 8, 3, 3])
w2_int = relay.cast(w2, "int8")
c_float_int = relay.cast(c_float, "int8")
c2 = relay.nn.conv2d(c_float_int, w2_int, padding=(1, 1), out_dtype="int32")
c2_float = relay.cast(c2, "float32")
return relay.Function([x, w, w2], c2_float)
pre = before()
post = recast(pre, "int8", "int32")
expected = expected()
assert tvm.ir.structural_equal(expected, post)
def test_recast_skip():
"""Recast a graph using skip layers."""
def before():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
c = relay.nn.conv2d(x, w, padding=(1, 1), out_dtype="float32")
w2 = relay.var("w2", shape=[8, 8, 3, 3])
c2 = relay.nn.conv2d(c, w2, padding=(1, 1), out_dtype="float32")
return relay.Function([x, w, w2], c2)
def expected():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
c = relay.nn.conv2d(x, w, padding=(1, 1), out_dtype="float32")
w2 = relay.var("w2", shape=[8, 8, 3, 3])
w2_int = relay.cast(w2, "int8")
c_int = relay.cast(c, "int8")
c2 = relay.nn.conv2d(c_int, w2_int, padding=(1, 1), out_dtype="int32")
c2_float = relay.cast(c2, "float32")
return relay.Function([x, w, w2], c2_float)
pre = before()
post = recast(pre, "int8", "int32", skip_layers=[0])
expected = expected()
assert tvm.ir.structural_equal(expected, post)
def test_recast_concat():
def before():
x = relay.var("x", shape=[1, 4])
y = relay.var("y", shape=[1, 4])
t = relay.Tuple([x, y])
c = relay.op.concatenate(t, axis=1)
return relay.Function([x, y], c)
def expected():
xv = relay.var("x", shape=[1, 4])
yv = relay.var("y", shape=[1, 4])
x = relay.cast(xv, "float16")
y = relay.cast(yv, "float16")
t = relay.Tuple([x, y])
c = relay.op.concatenate(t, axis=1)
c = relay.cast(c, "float32")
return relay.Function([xv, yv], c)
pre = before()
post = recast(pre, "float16", "float32", ops=["concatenate"])
expected = expected()
assert tvm.ir.structural_equal(expected, post)
def test_recast_relu():
"""Recast a ReLU operator which does not have attributes."""
def before():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
c = relay.nn.conv2d(x, w, padding=(1, 1), out_dtype="float32")
r = relay.nn.relu(c)
return relay.Function([x, w], r)
def expected():
x = relay.var("x", shape=[8, 8, 8, 8])
w = relay.var("w", shape=[8, 8, 3, 3])
x_fp16 = relay.cast(x, "float16")
w_fp16 = relay.cast(w, "float16")
c = relay.nn.conv2d(x_fp16, w_fp16, padding=(1, 1), out_dtype="float16")
c_float32 = relay.cast(c, "float32")
c_float16 = relay.cast(c_float32, "float16")
r = relay.nn.relu(c_float16)
r_float32 = relay.cast(r, "float32")
return relay.Function([x, w], r_float32)
pre = before()
post = recast(pre, "float16", "float16", ops=["nn.conv2d", "nn.relu"])
expected = expected()
assert tvm.ir.structural_equal(expected, post)
if __name__ == "__main__":
test_recast_simple()
test_recast_medium()
test_recast_skip()
test_recast_concat()
test_recast_relu()
| 5,830 | 34.773006 | 84 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_add.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
from tvm import relay
def test_tflite_same_io_qnn_params():
data_dtype = "uint8"
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.00784314, "float32"),
lhs_zero_point=relay.const(127, "int32"),
rhs_scale=relay.const(0.00784314, "float32"),
rhs_zero_point=relay.const(127, "int32"),
output_scale=relay.const(0.00784314, "float32"),
output_zero_point=relay.const(127, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_datas = [
np.array((140, 153, 165, 178)).reshape((1, 4)),
np.array((25, 153, 178, 216)).reshape((1, 4)),
np.array((25, 153, 216, 165)).reshape((1, 4)),
]
y_datas = [
np.array((204, 178, 165, 140)).reshape((1, 4)),
np.array((204, 178, 191, 25)).reshape((1, 4)),
np.array((204, 178, 25, 191)).reshape((1, 4)),
]
golden_outputs = [
np.array((217, 204, 203, 191)).reshape((1, 4)),
np.array((102, 204, 242, 114)).reshape((1, 4)),
np.array((102, 204, 114, 229)).reshape((1, 4)),
]
for i in range(0, 3):
x_data = x_datas[i]
y_data = y_datas[i]
golden_output = golden_outputs[i]
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_tflite_different_io_qnn_params():
data_dtype = "uint8"
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.0156863, "float32"),
lhs_zero_point=relay.const(127, "int32"),
rhs_scale=relay.const(0.0117647, "float32"),
rhs_zero_point=relay.const(85, "int32"),
output_scale=relay.const(0.0235294, "float32"),
output_zero_point=relay.const(128, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_datas = [
np.array((76, 140, 153, 172)).reshape((1, 4)),
np.array((133, 140, 146, 153)).reshape((1, 4)),
np.array((76, 140, 172, 146)).reshape((1, 4)),
]
y_datas = [
np.array((136, 119, 128, 17)).reshape((1, 4)),
np.array((136, 119, 111, 94)).reshape((1, 4)),
np.array((136, 119, 17, 128)).reshape((1, 4)),
]
golden_outputs = [
np.array((120, 154, 167, 124)).reshape((1, 4)),
np.array((158, 154, 154, 150)).reshape((1, 4)),
np.array((120, 154, 124, 163)).reshape((1, 4)),
]
for i in range(0, 3):
x_data = x_datas[i]
y_data = y_datas[i]
golden_output = golden_outputs[i]
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_saturation():
# Same params
data_dtype = "uint8"
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.125, "float32"),
lhs_zero_point=relay.const(0, "int32"),
rhs_scale=relay.const(0.125, "float32"),
rhs_zero_point=relay.const(0, "int32"),
output_scale=relay.const(0.125, "float32"),
output_zero_point=relay.const(0, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
mod = relay.transform.InferType()(mod)
x_data = np.array((255, 1, 1, 0)).reshape((1, 4))
y_data = np.array((255, 255, 128, 0)).reshape((1, 4))
golden_output = np.array((255, 255, 129, 0)).reshape((1, 4))
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
# Same params, different scale
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.125, "float32"),
lhs_zero_point=relay.const(0, "int32"),
rhs_scale=relay.const(0.125, "float32"),
rhs_zero_point=relay.const(0, "int32"),
output_scale=relay.const(0.25, "float32"),
output_zero_point=relay.const(0, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 1, 1, 0)).reshape((1, 4))
y_data = np.array((255, 255, 127, 0)).reshape((1, 4))
golden_output = np.array((255, 129, 65, 0)).reshape((1, 4))
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
# Same io params, different output scale
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.125, "float32"),
lhs_zero_point=relay.const(0, "int32"),
rhs_scale=relay.const(0.125, "float32"),
rhs_zero_point=relay.const(0, "int32"),
output_scale=relay.const(0.25, "float32"),
output_zero_point=relay.const(0, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 1, 1, 0)).reshape((1, 4))
y_data = np.array((255, 255, 127, 0)).reshape((1, 4))
golden_output = np.array((255, 129, 65, 0)).reshape((1, 4))
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
# All params different
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.5, "float32"),
lhs_zero_point=relay.const(0, "int32"),
rhs_scale=relay.const(0.25, "float32"),
rhs_zero_point=relay.const(0, "int32"),
output_scale=relay.const(0.125, "float32"),
output_zero_point=relay.const(0, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 0, 1, 0)).reshape((1, 4))
y_data = np.array((0, 128, 64, 0)).reshape((1, 4))
golden_output = np.array((255, 255, 132, 0)).reshape((1, 4))
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_ignore_channel_axis():
data_dtype = "uint8"
x = relay.var("x", shape=(4,), dtype=data_dtype)
y = relay.var("y", shape=(4,), dtype=data_dtype)
z = relay.qnn.op.add(
lhs=x,
rhs=y,
lhs_scale=relay.const(0.00784314, "float32"),
lhs_zero_point=relay.const(127, "int32"),
rhs_scale=relay.const(0.00784314, "float32"),
rhs_zero_point=relay.const(127, "int32"),
output_scale=relay.const(0.00784314, "float32"),
output_zero_point=relay.const(127, "int32"),
lhs_axis=1,
rhs_axis=1,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
if __name__ == "__main__":
test_tflite_same_io_qnn_params()
test_tflite_different_io_qnn_params()
test_saturation()
test_ignore_channel_axis()
| 8,990 | 33.186312 | 97 | py |
tvm | tvm-main/tests/python/relay/test_dataflow_pattern.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-wildcard-import
import numpy as np
import tvm
from tvm import relay
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.dataflow_pattern import *
from tvm.relay.testing import run_opt_pass
# NB: 1 corresponds to the C++ enum that specicfies this
# we loose the type safety due to the Python/C++ calling
# convention.
K_ELEMWISE = 0
K_BROADCAST = 1
## NODE TESTS
def test_expr_pattern():
ep = is_expr(relay.var("x", shape=(4, 1)))
assert isinstance(ep, ExprPattern)
assert isinstance(ep.expr, relay.Var)
def test_var_pattern():
v = is_var("x")
assert isinstance(v, VarPattern)
assert v.name == "x"
def test_constant_pattern():
c = is_constant()
assert isinstance(c, ConstantPattern)
def test_wildcard_pattern():
wc = wildcard()
assert isinstance(wc, WildcardPattern)
def test_CallPattern():
wc1 = wildcard()
wc2 = wildcard()
c = is_op("add")(wc1, wc2)
assert isinstance(c, CallPattern)
assert isinstance(c.args[0], WildcardPattern)
assert isinstance(c.args[1], WildcardPattern)
def test_FunctionPattern():
wc1 = wildcard()
wc2 = wildcard()
c = is_op("add")(wc1, wc2)
f = FunctionPattern([wc1, wc2], c)
assert isinstance(f, FunctionPattern)
assert isinstance(f.params[0], WildcardPattern)
assert isinstance(f.params[1], WildcardPattern)
assert isinstance(f.body, CallPattern)
assert isinstance(f.body.args[0], WildcardPattern)
assert isinstance(f.body.args[1], WildcardPattern)
def test_TuplePattern():
wc1 = wildcard()
wc2 = wildcard()
t = is_tuple([wc1, wc2])
assert isinstance(t, TuplePattern)
assert isinstance(t.fields[0], WildcardPattern)
assert isinstance(t.fields[1], WildcardPattern)
def test_TupleGetItemPattern():
wc1 = wildcard()
wc2 = wildcard()
t = is_tuple([wc1, wc2])
tgi = is_tuple_get_item(t, 1)
assert isinstance(tgi, TupleGetItemPattern)
assert isinstance(tgi.tuple, TuplePattern)
assert isinstance(tgi.tuple.fields[0], WildcardPattern)
assert isinstance(tgi.tuple.fields[1], WildcardPattern)
def test_AltPattern():
is_add_or_sub = is_op("add") | is_op("subtract")
assert isinstance(is_add_or_sub, AltPattern)
def test_TypePattern():
ttype = relay.TensorType((10, 10), "float32")
ty_pat = has_type(ttype)
assert isinstance(ty_pat, TypePattern)
assert ty_pat.type == ttype
def test_DataTypePattern():
dtype = "float16"
pattern = has_dtype(dtype)
assert isinstance(pattern, DataTypePattern)
assert pattern.dtype == dtype
def test_ShapePattern():
shape = [10, 10]
pattern = has_shape(shape)
assert isinstance(pattern, ShapePattern)
assert tvm.ir.structural_equal(pattern.shape, shape)
def test_AttrPattern():
op = is_op("add").has_attr({"TOpPattern": K_ELEMWISE})
assert isinstance(op, AttrPattern)
assert op.attrs["TOpPattern"] == K_ELEMWISE
def test_IfPattern():
x = is_var("x")
y = is_var("y")
pat = is_if(is_op("less")(x, y), x, y)
assert isinstance(pat, IfPattern)
assert isinstance(pat.cond, CallPattern)
assert isinstance(pat.true_branch, VarPattern)
assert isinstance(pat.false_branch, VarPattern)
def test_LetPattern():
x = is_var("x")
y = is_var("y")
let_var = is_var("let")
pat = is_let(let_var, is_op("less")(x, y), let_var)
assert isinstance(pat, LetPattern)
assert isinstance(pat.var, VarPattern)
assert isinstance(pat.value, CallPattern)
assert isinstance(pat.body, VarPattern)
## MATCHER TESTS
def test_match_op():
assert is_op("add").match(relay.op.op.get("add"))
def test_no_match_op():
assert not is_op("add").match(relay.op.op.get("subtract"))
def test_match_op_or():
is_add_or_sub = is_op("add") | is_op("subtract")
assert is_add_or_sub.match(relay.op.op.get("add"))
assert is_add_or_sub.match(relay.op.op.get("subtract"))
def test_match_call_commutive():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(is_var("x"), is_var("y"))
assert add_pattern.match(x + y)
assert add_pattern.match(y + x)
mul_pattern = is_op("multiply")(is_var("x"), is_var("y"))
assert mul_pattern.match(x * y)
assert mul_pattern.match(y * x)
def test_no_match_call_commutive():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("subtract")(is_var("x"), is_var("y"))
assert add_pattern.match(x - y)
assert not add_pattern.match(y - x)
add_pattern = is_op("divide")(is_var("x"), is_var("y"))
assert add_pattern.match(x / y)
assert not add_pattern.match(y / x)
def test_match_call():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
assert add_pattern.match(x + y)
# Match call with any number of inputs
call_pattern = wildcard()(None)
assert call_pattern.match(relay.op.nn.relu(x))
assert call_pattern.match(relay.op.add(x, y))
def test_no_match_call():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
assert not add_pattern.match(x - y)
def test_match_func():
x = relay.var("x")
y = relay.var("y")
wc1 = wildcard()
wc2 = wildcard()
func_pattern = FunctionPattern([wc1, wc2], wc1 + wc2)
assert func_pattern.match(relay.Function([x, y], x + y))
# Match Function with any number of inputs
func_pattern = FunctionPattern(None, wildcard())
assert func_pattern.match(relay.Function([x], x))
assert func_pattern.match(relay.Function([x, y], x + y))
def test_no_match_func():
x = relay.var("x")
y = relay.var("y")
wc1 = wildcard()
wc2 = wildcard()
func_pattern = FunctionPattern([wc1, wc2], wc1 + wc2)
assert not func_pattern.match(relay.Function([x, y], x - y))
def test_match_if():
x = is_var("x")
y = is_var("y")
pat = is_if(is_op("less")(x, y), x, y)
x = relay.var("x")
y = relay.var("y")
cond = x < y
assert pat.match(relay.expr.If(cond, x, y))
def test_no_match_if():
x = is_var("x")
y = is_var("y")
pat = is_if(is_op("less")(x, y), x, y)
x = relay.var("x")
y = relay.var("y")
assert not pat.match(relay.expr.If(x > y, x, y))
assert not pat.match(relay.expr.If(x < y, y, x))
def test_match_let():
x = is_var("x")
y = is_var("y")
let_var = is_var("let")
pat = is_let(let_var, is_op("less")(x, y), let_var)
x = relay.var("x")
y = relay.var("y")
lv = relay.var("let")
cond = x < y
assert pat.match(relay.expr.Let(lv, cond, lv))
def test_no_match_let():
x = is_var("x")
y = is_var("y")
let_var = is_var("let")
pat = is_let(let_var, is_op("less")(x, y), let_var)
x = relay.var("x")
y = relay.var("y")
lv = relay.var("let")
assert not pat.match(relay.expr.Let(lv, x > y, lv))
assert not pat.match(relay.expr.Let(lv, x < y, lv * x))
def test_match_option():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
pattern = is_op("nn.relu")(
is_op("nn.conv2d")(wildcard(), wildcard()).optional(
lambda x: is_op("nn.bias_add")(x, wildcard())
)
)
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
assert pattern.match(relu)
conv2d = relay.op.nn.conv2d(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
assert pattern.match(relu)
pattern = is_op("nn.conv2d")(wildcard(), wildcard())
pattern = pattern.optional(is_op("nn.relu")).optional(is_op("tanh"))
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
tanh = relay.op.tanh(conv2d)
tanh2 = relay.op.tanh(relu)
relu2 = relay.op.nn.relu(tanh)
assert pattern.match(conv2d)
assert pattern.match(relu)
assert pattern.match(tanh)
assert pattern.match(tanh2)
assert not pattern.match(relu2)
def test_no_match_option():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
pattern = is_op("nn.relu")(
is_op("nn.conv2d")(wildcard(), wildcard()).optional(
lambda x: is_op("nn.bias_add")(x, wildcard())
)
)
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.tanh(conv2d)
assert not pattern.match(relu)
conv2d = relay.op.nn.dense(x, w)
relu = relay.op.tanh(conv2d)
assert not pattern.match(relu)
conv2d = relay.op.nn.dense(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
assert not pattern.match(relu)
conv2d = relay.op.nn.conv2d(x, w)
bias_add = conv2d + w
relu = relay.op.nn.relu(bias_add)
assert not pattern.match(relu)
def test_match_const():
conv2d = is_op("nn.conv2d")(wildcard(), is_constant())
pattern = is_op("nn.bias_add")(conv2d, wildcard())
x = relay.var("x", shape=(1, 3, 224, 224))
w = relay.var("w", shape=(3, 3, 3, 3))
b = relay.var("b", shape=(3,))
conv2d = relay.op.nn.conv2d(x, w)
out = relay.op.nn.bias_add(conv2d, b)
func = relay.Function([x, w, b], out)
mod = tvm.IRModule.from_expr(func)
assert not pattern.match(mod["main"].body)
mod["main"] = bind_params_by_name(mod["main"], {"w": tvm.nd.array(np.ones(shape=(3, 3, 3, 3)))})
assert pattern.match(mod["main"].body)
def test_match_tuple():
x = relay.var("x")
y = relay.var("y")
z = relay.op.op.get("add")
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add")))
assert tuple_pattern.match(relay.expr.Tuple((x, y, z)))
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add")))
tuple_get_item_pattern = is_tuple_get_item(tuple_pattern, 1)
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 1))
tuple_get_item_pattern = is_tuple_get_item(tuple_pattern) # Match any index
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 0))
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 1))
assert tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 2))
# Match tuple with any inputs
tuple_pattern = is_tuple(None)
concat_pattern = is_op("concatenate")(tuple_pattern)
assert concat_pattern.match(relay.op.concatenate(relay.expr.Tuple((x,)), axis=0))
assert concat_pattern.match(relay.op.concatenate(relay.expr.Tuple((x, y)), axis=0))
assert concat_pattern.match(relay.op.concatenate(relay.expr.Tuple((x, y, z)), axis=0))
def test_no_match_tuple():
x = relay.var("x")
y = relay.var("y")
z = relay.op.op.get("add")
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add"), wildcard()))
assert not tuple_pattern.match(relay.expr.Tuple((x, y, z)))
tuple_pattern = is_tuple((is_var("x"), wildcard(), is_op("add")))
tuple_get_item_pattern = is_tuple_get_item(tuple_pattern, 1)
assert not tuple_get_item_pattern.match(relay.expr.TupleGetItem(relay.expr.Tuple((x, y, z)), 2))
def test_match_type():
x = relay.var("x", shape=(10, 10), dtype="float32")
ty_pat = has_type(relay.TensorType((10, 10), "float32"))
assert ty_pat.match(x)
def test_no_match_type():
x = relay.var("x", shape=(10, 10), dtype="int32")
ty_pat = has_type(relay.TensorType((10, 10), "float32"))
assert not ty_pat.match(x)
def test_match_dtype():
x = relay.var("x", shape=(10, 10), dtype="float32")
ty_pat = has_dtype("float32")
assert ty_pat.match(x)
def test_no_match_dtype():
x = relay.var("x", shape=(10, 10), dtype="int32")
ty_pat = has_dtype("float32")
assert not ty_pat.match(x)
def test_match_shape():
x = relay.var("x", shape=(10, 10), dtype="float32")
ty_pat = has_shape((10, 10))
assert ty_pat.match(x)
def test_no_match_shape():
x = relay.var("x", shape=(10, 10), dtype="int32")
ty_pat = has_shape((10, 5))
assert not ty_pat.match(x)
def test_match_op_attr():
op = is_op("add").has_attr({"TOpPattern": K_BROADCAST})
op_pat = op(wildcard(), wildcard())
x = relay.var("x")
y = relay.var("y")
assert op_pat.match(x + y)
def test_no_match_op_attr():
op = is_op("nn.dense").has_attr({"TOpPattern": K_ELEMWISE})
op_pat = op(wildcard(), wildcard())
x = relay.var("x")
y = relay.var("y")
assert not op_pat.match(relay.op.nn.dense(x, y))
op = is_op("add").has_attr({"TOpPattern": K_BROADCAST})
op_pat = op(wildcard(), wildcard())
x = relay.var("x")
y = relay.var("y")
assert not op_pat.match(x - y)
z = relay.var("z")
assert not op_pat.match(relay.Let(z, x + y, z))
def test_match_func_attr():
pattern = wildcard().has_attr({"Composite": "add"})
x = relay.var("x")
y = relay.var("y")
f = relay.Function([x, y], x + y).with_attr("Composite", "add")
assert pattern.match(f)
def test_no_match_func_attr():
pattern = wildcard().has_attr({"Composite": "add"})
x = relay.var("x")
y = relay.var("y")
f = relay.Function([x, y], x + y).with_attr("RandomTest", "add")
assert not pattern.match(f)
f = relay.Function([x, y], x + y).with_attr("Composite", "conv_bias")
assert not pattern.match(f)
def test_match_call_attr():
# String attr
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"data_layout": "NCHW"})
x = relay.var("x")
y = relay.var("y")
assert is_conv2d.match(relay.op.nn.conv2d(x, y))
# Array attr
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"kernel_size": [3, 3]})
out = relay.op.nn.conv2d(x, y, kernel_size=[3, 3])
assert is_conv2d.match(out)
# non-operator call
attr_dict = {"call_attr": "attr"}
call_has_attr = wildcard()(wildcard()).has_attr(attr_dict)
call_attr = tvm.ir.make_node("DictAttrs", **attr_dict)
a = relay.Var("a")
b = relay.Var("b")
assert call_has_attr.match(relay.Call(a, [b], attrs=call_attr))
# empty attrs should match anything
empty_attrs = tvm.ir.make_node("DictAttrs", **{})
call_has_empty_attrs = wildcard()(wildcard()).has_attr({})
assert call_has_empty_attrs.match(relay.Call(a, [b], attrs=empty_attrs))
assert call_has_empty_attrs.match(relay.Call(a, [b], attrs=call_attr))
def test_no_match_call_attr():
x = relay.var("x")
y = relay.var("y")
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"data_layout": "NHWC"})
assert not is_conv2d.match(relay.op.nn.conv2d(x, y))
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"RandomAttr": "NCHW"})
assert not is_conv2d.match(relay.op.nn.conv2d(x, y))
# Array attr
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard()).has_attr({"kernel_size": [3, 3]})
out = relay.op.nn.conv2d(x, y, kernel_size=[2, 1])
assert not is_conv2d.match(out)
# non-operator calls
call_has_attr = wildcard()(wildcard()).has_attr({"call_attr": "attr"})
wrong_key = tvm.ir.make_node("DictAttrs", **{"wrong": "attr"})
wrong_value = tvm.ir.make_node("DictAttrs", **{"call_attr": "wrong"})
empty_attrs = tvm.ir.make_node("DictAttrs", **{})
a = relay.Var("a")
b = relay.Var("b")
# attrs left undefined
assert not call_has_attr.match(relay.Call(a, [b]))
# wrong attrs
assert not call_has_attr.match(relay.Call(a, [b], attrs=wrong_key))
assert not call_has_attr.match(relay.Call(a, [b], attrs=wrong_value))
assert not call_has_attr.match(relay.Call(a, [b], attrs=empty_attrs))
def test_match_call_attr_dtype():
is_cast = is_op("cast")(wildcard()).has_attr({"dtype": "float32"})
x = relay.var("x")
assert is_cast.match(relay.op.cast(x, "float32"))
def test_match_diamond():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
# Expr
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert diamond.match(out)
def test_no_match_diamond():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
# Expr
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
# Check
assert not diamond.match(leaky_relu)
assert not diamond.match(relu)
def test_match_fake_diamond():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
# Expr
input1 = relay.var("input1")
weight1 = relay.var("weight1")
conv2d1 = relay.op.nn.conv2d(input1, weight1)
inp2 = relay.var("input2")
weight2 = relay.var("weight2")
conv2d2 = relay.op.nn.conv2d(inp2, weight2)
relu = relay.op.nn.relu(conv2d1)
leaky_relu = relay.op.nn.leaky_relu(conv2d2, alpha=0)
out = relu + leaky_relu
# Check
assert not diamond.match(out)
def test_at_most_one_parent():
# Pattern
P = is_op("nn.conv2d")(wildcard(), wildcard()) # 'parent'
I = is_op("nn.relu")(wildcard()) # 'intermediate' ('path' in the code)
C = is_op("add")(wildcard(), wildcard()) # 'child'
pattern = dominates(P, I, C)
# n6(P)
# / \
# n7 \
# / \
# n8(P) n10(I)
# \ /
# n9(I) /
# \ /
# n11(C)
x = relay.var("x")
w = relay.var("w")
n6 = relay.op.nn.conv2d(x, w) # matches P
n7 = relay.op.tanh(n6) # does not match I
n8 = relay.op.nn.conv2d(n7, w) # matches P
n9 = relay.op.nn.relu(n8) # matches I
n10 = relay.op.nn.relu(n6) # matches I
n11 = relay.add(n9, n10) # matches C
# Does not match: Can't match the parent pattern P at both 8 and 6.
# Note that if we did allow P to be used twice the implementation would
# need to be changed to not 'jump over' n7.
assert not pattern.match(n11)
def test_match_dominator():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Classic Diamond
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert diamond.match(out)
# Deeper Branch
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
relu = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert diamond.match(out)
# Single Branch
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
out = relu + tanh
# Check
assert diamond.match(out)
# Fuzzy path/nested Diamond
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()) | is_op(
"add"
)(wildcard(), wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
tanh = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = tanh + leaky_relu
assert diamond.match(out)
def test_not_match_dominator():
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Fake Diamond
input1 = relay.var("input1")
weight1 = relay.var("weight1")
conv2d1 = relay.op.nn.conv2d(input1, weight1)
inp2 = relay.var("input2")
weight2 = relay.var("weight2")
conv2d2 = relay.op.nn.conv2d(inp2, weight2)
relu = relay.op.nn.relu(conv2d1)
leaky_relu = relay.op.nn.leaky_relu(conv2d2, alpha=0)
out = relu + leaky_relu
# Check
assert not diamond.match(out)
# Add op that doesn't match K_ELEMWISE
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert not diamond.match(out)
# Relu on the input instead of the conv
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(inp)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert not diamond.match(out)
# No conv
inp = relay.var("input")
relu = relay.op.nn.relu(inp)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
out = relu + tanh
# Check
assert not diamond.match(out)
def test_match_typed_dominator():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()).has_dtype(
"float32"
)
reduction = is_op("add")(wildcard(), wildcard()).has_shape([1, 3, 10, 10])
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Classic Diamond
inp = relay.var("input", relay.TensorType((1, 3, 12, 12), "float32"))
weight = relay.var("weight", relay.TensorType((3, 3, 3, 3), "float32"))
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Check
assert diamond.match(out)
def test_no_match_typed_dominator():
# Classic Diamond
inp = relay.var("input", relay.TensorType((1, 3, 12, 12), "float32"))
weight = relay.var("weight", relay.TensorType((3, 3, 3, 3), "float32"))
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()).has_dtype(
"float32"
)
reduction = is_op("add")(wildcard(), wildcard()).has_shape([1, 1, 10, 10])
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Check
assert not diamond.match(out)
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()).has_dtype(
"float16"
)
reduction = is_op("add")(wildcard(), wildcard()).has_shape([1, 3, 10, 10])
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Check
assert not diamond.match(out)
def test_rewrite():
x = relay.var("x")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
sub_pattern = is_op("subtract")(wildcard(), wildcard())
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = add_pattern
def callback(self, pre, post, node_map):
return post.args[0] - post.args[1]
out = rewrite(TestRewrite(), x + y)
assert sub_pattern.match(out)
def test_rewrite_func():
x = relay.var("x")
w = relay.var("w")
y = relay.var("y")
add_pattern = is_op("add")(wildcard(), wildcard())
sub_pattern = is_op("subtract")(wildcard(), wildcard())
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = add_pattern
def callback(self, pre, post, node_map):
return post.args[0] - post.args[1]
inpf = relay.var("input")
weightf = relay.var("weight")
func = relay.Function(
[inpf, weightf], relay.op.nn.relu(relay.op.nn.conv2d(inpf, weightf)), attrs=None
)
out = rewrite(TestRewrite(), func(x, w) + y)
assert sub_pattern.match(out)
def test_rewrite_func_with_attr():
x = relay.var("x")
y = relay.var("y")
f = relay.Function([x, y], x + y).with_attr("Composite", "add")
a = relay.var("a")
b = relay.var("b")
c = relay.Call(f, [a, b])
c_abs = relay.abs(c)
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = wildcard().has_attr({"Composite": "add"})(wildcard(), wildcard())
def callback(self, pre, post, node_map):
return post.args[0] + post.args[1]
out = rewrite(TestRewrite(), c_abs)
inlined_add_pattern = is_op("abs")(is_op("add")(wildcard(), wildcard()))
assert inlined_add_pattern.match(out)
def test_nested_rewrite():
class PatternCallback(DFPatternCallback):
def __init__(self, pattern):
super(PatternCallback, self).__init__()
self.pattern = pattern
def callback(self, pre, post, node_map):
return post
def gen():
x = relay.var("x")
y = relay.var("y")
y_add = relay.add(y, y)
n0 = relay.add(x, y_add)
n1 = relay.add(x, n0)
return relay.add(n1, n0)
def pattern():
a = wildcard()
b = wildcard()
n0 = is_op("add")(a, b)
n1 = is_op("add")(n0, a)
return is_op("add")(n0, n1)
out = gen()
pat = pattern()
new_out = rewrite(PatternCallback(pat), out)
assert tvm.ir.structural_equal(out, new_out)
def test_not_fuse_multi_diamond():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
path1 = is_op("nn.relu")(is_conv2d)
path2 = is_op("nn.leaky_relu")(is_conv2d)
diamond = is_op("add")(path1, path2)
# Expr
inp = relay.var("input")
weight = relay.var("weight")
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
out = out + conv2d
# Check
assert not diamond.match(out)
class BatchnormCallback(DFPatternCallback):
def __init__(self):
super(BatchnormCallback, self).__init__()
self.x = wildcard()
self.var = wildcard()
self.mean = wildcard()
self.beta = wildcard()
self.gamma = wildcard()
self.eps = is_constant()
self.pattern = (
self.gamma * (self.x - self.mean) / is_op("sqrt")(self.var + self.eps) + self.beta
)
def callback(self, pre, post, node_map):
x = node_map[self.x][0]
var = node_map[self.var][0]
mean = node_map[self.mean][0]
beta = node_map[self.beta][0]
gamma = node_map[self.gamma][0]
eps = node_map[self.eps][0]
return relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=eps.data.numpy().item())[0]
def test_fuse_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
def test_no_fuse_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
fake_BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) - beta
out = rewrite(BatchnormCallback(), fake_BN)
assert tvm.ir.structural_equal(out, fake_BN)
def test_fuse_double_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
BN2 = gamma * (BN - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN2)
bn = relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
bn2 = relay.op.nn.batch_norm(bn, gamma, beta, mean, var, epsilon=1e-5)[0]
assert tvm.ir.structural_equal(out, bn2)
def test_partial_fuse_double_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5)) - beta
BN2 = gamma * (BN - mean) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN2)
bn2 = relay.op.nn.batch_norm(BN, gamma, beta, mean, var, epsilon=1e-5)[0]
assert tvm.ir.structural_equal(out, bn2)
def test_fuse_batchnorm_commutation():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
# commute add
BN = beta + gamma * (x - mean) / relay.op.sqrt(var + relay.const(1e-5))
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
# associate divide/multiply
BN = (gamma * (x - mean)) / relay.op.sqrt(var + relay.const(1e-5)) + beta
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
# associate multiply/divide
BN = gamma * ((x - mean) / relay.op.sqrt(var + relay.const(1e-5))) + beta
out = rewrite(BatchnormCallback(), BN)
assert tvm.ir.structural_equal(
out, relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)[0]
)
def test_quadruple_rewrite_dominator():
class DominatorRemovalCallback(DFPatternCallback):
def __init__(self):
super(DominatorRemovalCallback, self).__init__()
self.inp = wildcard()
self.weight = wildcard()
is_conv2d = is_op("nn.conv2d")(self.inp, self.weight)
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(
wildcard()
) | is_op("add")(wildcard(), wildcard())
reduction = is_op("add")(wildcard(), wildcard())
self.pattern = dominates(is_conv2d, is_unary_elemwise, reduction)
def callback(self, pre, post, node_map):
inp = node_map[self.inp][0]
weight = node_map[self.weight][0]
return relay.op.nn.conv2d(inp, weight)
inp = relay.var("input")
weight = relay.var("weight")
# Classic Diamond
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Deeper Branch
conv2d = relay.op.nn.conv2d(out, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
relu = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = relu + leaky_relu
# Single Branch
conv2d = relay.op.nn.conv2d(out, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
out = relu + tanh
# Fuzzy path/nested Diamond
conv2d = relay.op.nn.conv2d(out, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
tanh = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
out = tanh + leaky_relu
one = relay.op.nn.conv2d(inp, weight)
two = relay.op.nn.conv2d(one, weight)
three = relay.op.nn.conv2d(two, weight)
four = relay.op.nn.conv2d(three, weight)
assert tvm.ir.structural_equal(DominatorRemovalCallback().rewrite(out), four)
def algebraic_simplify(expr):
zero = is_expr(relay.const(0)) | is_expr(relay.const(0.0))
one = is_expr(relay.const(1)) | is_expr(relay.const(1.0))
class ElwiseNullCallback(DFPatternCallback):
def callback(self, pre, post, node_map):
return node_map[self.x][0] # pylint: disable=no-member
class AddCallback(ElwiseNullCallback):
def __init__(self):
super(AddCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x + zero
class SubCallback(ElwiseNullCallback):
def __init__(self):
super(SubCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x - zero
class MulCallback(ElwiseNullCallback):
def __init__(self):
super(MulCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x * one
class DivCallback(ElwiseNullCallback):
def __init__(self):
super(DivCallback, self).__init__()
self.x = wildcard()
self.pattern = self.x / one
class MulZeroCallback(ElwiseNullCallback):
def __init__(self):
super(MulZeroCallback, self).__init__()
self.x = zero
self.pattern = self.x * wildcard()
class ZeroDivCallback(ElwiseNullCallback):
def __init__(self):
super(ZeroDivCallback, self).__init__()
self.x = zero
self.pattern = self.x / wildcard()
return rewrite(
[
AddCallback(),
SubCallback(),
MulCallback(),
DivCallback(),
MulZeroCallback(),
ZeroDivCallback(),
],
expr,
)
def test_algebraic_simplify():
x = relay.Var("x")
y = relay.Var("y")
one = relay.const(1)
zero = relay.const(0)
onef = relay.const(1.0)
zerof = relay.const(0.0)
assert algebraic_simplify(x + zero) == x
assert algebraic_simplify(x + zerof) == x
assert algebraic_simplify(zero + x) == x
assert algebraic_simplify(zerof + x) == x
assert algebraic_simplify(x - zero) == x
assert algebraic_simplify(x - zerof) == x
assert algebraic_simplify(x * one) == x
assert algebraic_simplify(x * onef) == x
assert algebraic_simplify(one * x) == x
assert algebraic_simplify(onef * x) == x
assert algebraic_simplify(x * zero) == zero
assert algebraic_simplify(x * zerof) == zerof
assert algebraic_simplify(x / one) == x
assert algebraic_simplify(x / onef) == x
assert algebraic_simplify(zero / x) == zero
assert algebraic_simplify(zerof / x) == zerof
assert tvm.ir.structural_equal(
algebraic_simplify((x + zero * y) / one + (y * one) - zero / x), x + y
)
def test_double_partition():
# Pattern 1
conv2d_p = is_op("nn.conv2d")(wildcard(), wildcard())
bias_add_p = is_op("nn.bias_add")(conv2d_p, wildcard())
relu_p = is_op("nn.relu")(bias_add_p)
# Graph
x = relay.var("input")
w = relay.var("weight")
b = relay.var("bias")
w2 = relay.var("weight")
b2 = relay.var("bias")
conv2d = relay.op.nn.conv2d(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
conv2d2 = relay.op.nn.conv2d(relu, w2)
bias_add2 = relay.op.nn.bias_add(conv2d2, b2)
partitioned = bias_add2
for pat, label in [(relu_p, "conv_bias_relu"), (bias_add_p, "conv_bias")]:
partitioned = pat.partition(partitioned, {"Composite": label})
inpf = relay.var("input")
weightf = relay.var("weight")
biasf = relay.var("bias")
func0 = (
relay.Function(
[inpf, weightf, biasf],
relay.op.nn.relu(relay.op.nn.bias_add(relay.op.nn.conv2d(inpf, weightf), biasf)),
)
.with_attr("Composite", "conv_bias_relu")
.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_")
)
inpf = relay.var("input")
weightf = relay.var("weight")
biasf = relay.var("bias")
func1 = (
relay.Function(
[inpf, weightf, biasf], relay.op.nn.bias_add(relay.op.nn.conv2d(inpf, weightf), biasf)
)
.with_attr("Composite", "conv_bias")
.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_")
)
expected = func1(func0(x, w, b), w2, b2)
assert tvm.ir.structural_equal(partitioned, expected)
def test_partition_dominator():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
# Classic Diamond
inp = relay.var("input")
weight = relay.var("weight")
def generate_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return relu + leaky_relu
out = generate_diamond(inp * inp, weight * weight)
# Check
partitioned = diamond.partition(out)
i = relay.Var("input")
w = relay.Var("weight")
f = relay.Function([i, w], generate_diamond(i, w)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.relu_nn.relu_nn.leaky_relu_add_"
)
assert tvm.ir.structural_equal(partitioned, f(inp * inp, weight * weight))
def test_quadruple_partition_dominator():
# Pattern
is_conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
is_unary_elemwise = (wildcard().has_attr({"TOpPattern": K_ELEMWISE}))(wildcard()) | is_op(
"add"
)(wildcard(), wildcard())
reduction = is_op("add")(wildcard(), wildcard())
diamond = dominates(is_conv2d, is_unary_elemwise, reduction)
inp = relay.var("input")
weight = relay.var("weight")
# Classic Diamond
def classic_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return relu + leaky_relu
# Deeper Branch
def deeper_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
relu = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return relu + leaky_relu
# Single Branch
def single_branch(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relay.op.nn.relu(relu)
tanh = relay.op.tanh(relu)
return relu + tanh
# Fuzzy path/nested Diamond
def nested_diamond(inp, weight):
conv2d = relay.op.nn.conv2d(inp, weight)
relu = relay.op.nn.relu(conv2d)
relu = relu + relu
tanh = relay.op.tanh(relu)
leaky_relu = relay.op.nn.leaky_relu(conv2d, alpha=0)
return tanh + leaky_relu
partitioned = diamond.partition(
nested_diamond(
single_branch(deeper_diamond(classic_diamond(inp, weight), weight), weight), weight
)
)
functions = []
partition_names = [
"nn.conv2d_nn.relu_nn.relu_nn.leaky_relu_add_",
"nn.conv2d_nn.relu_nn.relu_tanh_nn.leaky_relu_add_",
"nn.conv2d_nn.relu_nn.relu_tanh_add_",
"nn.conv2d_nn.relu_add_tanh_nn.leaky_relu_add_",
]
for i, f in enumerate([classic_diamond, deeper_diamond, single_branch, nested_diamond]):
inpf = relay.var("input")
weightf = relay.var("weight")
functions.append(
relay.Function([inpf, weightf], f(inpf, weightf)).with_attr(
"PartitionedFromPattern", partition_names[i]
)
)
reference = functions[3](
functions[2](functions[1](functions[0](inp, weight), weight), weight), weight
)
assert tvm.ir.structural_equal(partitioned, reference)
def get_BN(x, var, mean, beta, gamma, eps):
return gamma * (x - mean) / relay.op.sqrt(var + eps) + beta
def test_partition_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
eps = relay.const(1e-5)
BN = get_BN(x, var, mean, beta, gamma, eps)
xf = relay.var("xf")
varf = relay.var("varf")
meanf = relay.var("meanf")
betaf = relay.var("betaf")
gammaf = relay.var("gammaf")
# Put the arguments in toplogological order for the reference
f = relay.Function(
[gammaf, xf, meanf, varf, betaf], get_BN(xf, varf, meanf, betaf, gammaf, eps)
).with_attr("PartitionedFromPattern", "subtract_multiply_add_sqrt_divide_add_")
partitioned = BatchnormCallback().pattern.partition(BN)
reference = f(gamma, x, mean, var, beta)
assert tvm.ir.structural_equal(partitioned, reference)
def test_partition_double_batchnorm():
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
eps = relay.const(1e-5)
BN = gamma * (x - mean) / relay.op.sqrt(var + eps) + beta
BN2 = gamma * (BN - mean) / relay.op.sqrt(var + eps) + beta
xf = relay.var("xf")
varf = relay.var("varf")
meanf = relay.var("meanf")
betaf = relay.var("betaf")
gammaf = relay.var("gammaf")
f1 = relay.Function(
[gammaf, xf, meanf, varf, betaf], get_BN(xf, varf, meanf, betaf, gammaf, eps)
).with_attr("PartitionedFromPattern", "subtract_multiply_add_sqrt_divide_add_")
# The partitioner doesn't replace duplicates, so we use two copies of the function
xf2 = relay.var("xf2")
varf2 = relay.var("varf2")
meanf2 = relay.var("meanf2")
betaf2 = relay.var("betaf2")
gammaf2 = relay.var("gammaf2")
f2 = relay.Function(
[gammaf2, xf2, meanf2, varf2, betaf2], get_BN(xf2, varf2, meanf2, betaf2, gammaf2, eps)
).with_attr("PartitionedFromPattern", "subtract_multiply_add_sqrt_divide_add_")
partitioned = BatchnormCallback().pattern.partition(BN2)
reference = f2(gamma, f1(gamma, x, mean, var, beta), mean, var, beta)
assert tvm.ir.structural_equal(partitioned, reference)
def test_overlappting_partitions():
x = wildcard()
gamma = wildcard()
beta = wildcard()
moving_mean = wildcard()
moving_var = wildcard()
bn_node = is_op("nn.batch_norm")(x, gamma, beta, moving_mean, moving_var)
tuple_get_item_node = TupleGetItemPattern(bn_node, 0)
x = relay.var("x")
var = relay.var("var")
mean = relay.var("mean")
beta = relay.var("beta")
gamma = relay.var("gamma")
BN = relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)
T1 = BN[0]
T2 = BN[0]
add = T1 + T2
assert tuple_get_item_node.partition(add) == add
def test_partition_overused():
pattern = is_op("nn.relu")(is_op("nn.conv2d")(wildcard(), wildcard()))
x = relay.var("input")
w = relay.var("weight")
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
out = relu + conv2d
assert pattern.partition(out) == out
def test_partition_fuzzy_tuple():
x = relay.var("x")
y = relay.var("y")
z = x + y
tuple_pattern = is_tuple(None)
concat_pattern = is_op("concatenate")(tuple_pattern)
xp = relay.var("xp")
yp = relay.var("yp")
zp = relay.var("zp")
def create_func(args, body):
return relay.Function(args, body).with_attr("PartitionedFromPattern", "Tuple_concatenate_")
def concat(*args):
return relay.op.concatenate(relay.expr.Tuple(args), axis=0)
one = concat_pattern.partition(concat(x))
assert tvm.ir.structural_equal(one, create_func([xp], concat(xp))(x))
two = concat_pattern.partition(concat(x, y))
assert tvm.ir.structural_equal(two, create_func([xp, yp], concat(xp, yp))(x, y))
three = concat_pattern.partition(concat(x, y, z))
assert tvm.ir.structural_equal(three, create_func([xp, yp, zp], concat(xp, yp, zp))(x, y, z))
def test_partition_fuzzy_function_args():
func_pattern = FunctionPattern(None, wildcard() + wildcard())(None) + wildcard()
x = relay.var("x")
y = relay.var("y")
z = relay.var("z")
b = relay.var("b")
xp = relay.var("xp")
yp = relay.var("yp")
zp = relay.var("zp")
def create_func(call):
N = len(call.op.params)
new_params = [relay.var(str(i)) for i in range(N + 1)]
label = "add_FunctionCall_add_"
if N == 3:
label = "add_" + label
return relay.Function(
new_params, relay.Call(call.op, (new_params[0:-1])) + new_params[-1]
).with_attr("PartitionedFromPattern", label)(*([x, y, z][0:N] + [b]))
f1 = relay.Function([xp], xp + xp)(x)
one = func_pattern.partition(f1 + b)
assert tvm.ir.structural_equal(one, create_func(f1))
f2 = relay.Function([xp, yp], xp + yp)(x, y)
two = func_pattern.partition(f2 + b)
assert tvm.ir.structural_equal(two, create_func(f2))
f3 = relay.Function([xp, yp, zp], xp + yp + zp)(x, y, z)
three = func_pattern.partition(f3 + b)
assert tvm.ir.structural_equal(three, create_func(f3))
def test_partition_check():
pattern = is_op("nn.relu")(is_op("nn.conv2d")(is_var("input"), wildcard()))
def check(pre):
return pre.args[0].attrs.data_layout == "NCHW"
x = relay.var("input")
w = relay.var("weight")
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
xf = relay.var("input")
wf = relay.var("weight")
conv2df = relay.op.nn.conv2d(xf, wf)
reluf = relay.op.nn.relu(conv2df)
func = relay.Function([xf, wf], reluf).with_attr("PartitionedFromPattern", "nn.conv2d_nn.relu_")
reference = func(x, w)
partitioned = pattern.partition(relu, check=check)
assert tvm.ir.structural_equal(partitioned, reference)
conv2d = relay.op.nn.conv2d(x, w, data_layout="NHWC")
relu = relay.op.nn.relu(conv2d)
assert relu == pattern.partition(relu, check=check)
def test_partition_check_types():
pattern = is_op("nn.relu")(is_op("nn.conv2d")(wildcard(), wildcard()))
def check(pre):
conv = pre.args[0]
return (conv.attrs.data_layout == "NCHW") and bool(conv.checked_type.shape[0] == 1)
x = relay.var("input", shape=(1, 10, 10, 10))
w = relay.var("weight", shape=(10, 10, 3, 3))
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
relu = run_opt_pass(relu, relay.transform.InferType())
partitioned = pattern.partition(relu, check=check)
assert partitioned.op.attrs["PartitionedFromPattern"] == "nn.conv2d_nn.relu_"
conv2d = relay.op.nn.conv2d(x, w, data_layout="NHWC")
relu = relay.op.nn.relu(conv2d)
relu = run_opt_pass(relu, relay.transform.InferType())
assert relu == pattern.partition(relu, check=check)
x = relay.var("input", shape=(2, 10, 10, 10))
w = relay.var("weight", shape=(10, 10, 3, 3))
conv2d = relay.op.nn.conv2d(x, w)
relu = relay.op.nn.relu(conv2d)
relu = run_opt_pass(relu, relay.transform.InferType())
assert relu == pattern.partition(relu, check=check)
def conv_bias_relu(x, w, b):
conv2d = relay.op.nn.conv2d(x, w)
bias_add = relay.op.nn.bias_add(conv2d, b)
relu = relay.op.nn.relu(bias_add)
return relu
def test_partition_option():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
bias = conv2d.optional(lambda x: is_op("nn.bias_add")(x, wildcard()))
pattern1 = is_op("nn.relu")(bias)
conv2d = is_op("nn.conv2d")(wildcard(), wildcard())
bias = is_op("nn.bias_add")(conv2d, wildcard())
pattern2 = bias.optional(lambda x: is_op("nn.relu")(x))
relu = conv_bias_relu(x, w, b)
xf = relay.var("x")
wf = relay.var("w")
bf = relay.var("b")
func = relay.Function([xf, wf, bf], conv_bias_relu(xf, wf, bf)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
assert pattern1.match(relu)
assert tvm.ir.structural_equal(func(x, w, b), pattern1.partition(relu))
assert pattern2.match(relu)
assert tvm.ir.structural_equal(func(x, w, b), pattern2.partition(relu))
def test_partition_function():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
x1 = relay.var("x1")
w1 = relay.var("w1")
wc_x = wildcard()
wc_w = wildcard()
wc_b = wildcard()
wc_x1 = wildcard()
wc_w1 = wildcard()
func_pattern = FunctionPattern([wc_x1, wc_w1], is_op("nn.conv2d")(wc_x1, wc_w1))
pattern = func_pattern(wc_x, wc_w) + wc_b
func = relay.Function([x1, w1], relay.nn.conv2d(x1, w1))
expr = func(x, w) + b + b
x2 = relay.var("x2")
w2 = relay.var("w2")
b2 = relay.var("b2")
func2 = relay.Function([x2, w2, b2], func(x2, w2) + b2).with_attr(
"PartitionedFromPattern", "nn.conv2d_FunctionCall_add_"
)
expr2 = func2(x, w, b) + b
assert tvm.ir.structural_equal(pattern.partition(expr), expr2)
def test_partition_optional_function():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
x1 = relay.var("x1")
w1 = relay.var("w1")
wc_x = wildcard()
wc_w = wildcard()
wc_x1 = wildcard()
wc_w1 = wildcard()
func_pattern0 = FunctionPattern(
[wc_x1, wc_w1], is_op("sigmoid")(is_op("nn.conv2d")(wc_x1, wc_w1))
)
func_pattern1 = FunctionPattern(
[wc_x1, wc_w1], is_op("nn.relu")(is_op("nn.conv2d")(wc_x1, wc_w1))
)
pattern = func_pattern0(wc_x, wc_w) | func_pattern1(wc_x, wc_w)
func = relay.Function([x1, w1], relay.nn.relu(relay.nn.conv2d(x1, w1)))
expr = func(x, w) + b
x2 = relay.var("x2")
w2 = relay.var("w2")
func2 = relay.Function([x2, w2], func(x2, w2)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.relu_FunctionCall_"
)
expr2 = func2(x, w) + b
assert tvm.ir.structural_equal(pattern.partition(expr), expr2)
def test_rewrite_function_with_fuzzy_body():
"""Allow Rewriting a function with a fuzzy body via dominator analysis"""
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
x1 = relay.var("x1")
w1 = relay.var("w1")
wc_x = wildcard()
wc_w = wildcard()
wc_b = wildcard()
wc_x1 = wildcard()
wc_w1 = wildcard()
func_pattern = FunctionPattern([wc_x1, wc_w1], wildcard())
pattern = func_pattern(wc_x, wc_w) + wc_b
func = relay.Function([x1, w1], relay.nn.conv2d(x1, w1))
expr = func(x, w) + b + b
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = pattern
def callback(self, pre, post, node_map):
return x + w
out = rewrite(TestRewrite(), expr)
assert tvm.ir.structural_equal(out, x + w + b)
def test_partition_function_with_fuzzy_body():
"""
Allow Rewriting a function with a fuzzy body via dominator analysis
"""
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
x1 = relay.var("x1")
w1 = relay.var("w1")
wc_x = wildcard()
wc_w = wildcard()
wc_b = wildcard()
wc_x1 = wildcard()
wc_w1 = wildcard()
func_pattern = FunctionPattern([wc_x1, wc_w1], wildcard())
pattern = func_pattern(wc_x, wc_w) + wc_b
func = relay.Function([x1, w1], relay.nn.conv2d(x1, w1))
expr = func(x, w) + b + b
x2 = relay.var("x2")
w2 = relay.var("w2")
b2 = relay.var("b2")
func2 = relay.Function([x2, w2, b2], func(x2, w2) + b2).with_attr(
"PartitionedFromPattern", "nn.conv2d_FunctionCall_add_"
)
expr2 = func2(x, w, b) + b
assert tvm.ir.structural_equal(pattern.partition(expr), expr2)
def test_match_match():
add_pattern = is_op("add")(wildcard(), wildcard())
class TestRewrite(DFPatternCallback):
def __init__(self):
super(TestRewrite, self).__init__()
self.pattern = add_pattern
def callback(self, pre, post, node_map):
return post.args[0] - post.args[1]
mod = tvm.IRModule({})
tvm.relay.prelude.Prelude(mod)
# Apply rewrite on IR including relay.Match
out = rewrite(TestRewrite(), mod["tensor_concatenate_int64"])
assert tvm.ir.structural_equal(mod["tensor_concatenate_int64"], out)
def test_partition_constant_embedding():
x = relay.var("x")
w = relay.var("w")
wc = relay.const(1)
b = relay.var("b")
xf = relay.var("x")
wf = relay.var("w")
bf = relay.var("b")
embeded_func = relay.Function([xf, bf], conv_bias_relu(xf, wc, bf)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
xf = relay.var("x")
wf = relay.var("w")
bf = relay.var("b")
lifted_func = relay.Function([xf, wf, bf], conv_bias_relu(xf, wf, bf)).with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
relu = conv_bias_relu(x, w, b)
reluc = conv_bias_relu(x, wc, b)
# Check lifting of wildcard matches
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), wildcard()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(lifted_func(x, wc, b), pattern.partition(reluc))
# Check lifting of input matches
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_var()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(reluc, pattern.partition(reluc)) # Constants are not Inputs
# Check embedding of constant matches
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_constant()), wildcard())
)
assert tvm.ir.structural_equal(relu, pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
# Check embedding of constant ExprPatterns
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_expr(wc)), wildcard())
)
assert tvm.ir.structural_equal(relu, pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
# Check lifting/embedding of Alt matches
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_var() | is_constant()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
# Check lifting/embedding of Alt matches with the other ordering
pattern = is_op("nn.relu")(
is_op("nn.bias_add")(is_op("nn.conv2d")(wildcard(), is_constant() | is_var()), wildcard())
)
assert tvm.ir.structural_equal(lifted_func(x, w, b), pattern.partition(relu))
assert tvm.ir.structural_equal(embeded_func(x, b), pattern.partition(reluc))
def test_rewrite_once():
# This class recursively removes the arguments to concat until there is nothing left to concatenate.
class ConcatRewriter(DFPatternCallback):
def __init__(self, rewrite_once):
super().__init__(rewrite_once=rewrite_once)
self.pattern = is_op("concatenate")(None)
def callback(self, pre, post, node_map):
concat_args = post.args[0]
# Remove the last argument
new_args = [concat_args[i] for i in range(len(concat_args) - 1)]
if new_args:
return relay.op.concatenate(relay.expr.Tuple(new_args), axis=0)
else:
return concat_args[0]
x = relay.var("x")
y = relay.var("y")
z = relay.var("z")
concat = relay.op.concatenate(relay.expr.Tuple([x, y, z]), axis=0)
def test_one_callback():
# Let the rewriter run recursively
out = rewrite(ConcatRewriter(False), concat)
expected = x
assert tvm.ir.structural_equal(out, expected)
# Run the rewriter once
out = rewrite(ConcatRewriter(True), concat)
expected = relay.op.concatenate(relay.expr.Tuple([x, y]), axis=0)
assert tvm.ir.structural_equal(out, expected)
def test_multi_callbacks():
# This class recursively add a nn.relu operator after nn.softmax
class OneMoreReluRewriter(DFPatternCallback):
def __init__(self, rewrite_once):
super().__init__(rewrite_once=rewrite_once)
self.pattern = is_op("nn.softmax")(None)
def callback(self, pre, post, node_map):
return relay.nn.relu(post)
def before():
# Before:
# x y z
# | | |
# concat
# |
# softmax
return relay.nn.softmax(concat)
def once_concat():
# ConcatRewrite once, OneMoreReluRewrite once
# Expected:
# x y
# | |
# concat
# |
# softmax
# |
# relu
return relay.nn.relu(
relay.nn.softmax(relay.op.concatenate(relay.expr.Tuple([x, y]), axis=0))
)
def recursive_concat():
# ConcatRewrite recursively, OneMoreReluRewrite once
# Expected:
# x
# |
# softmax
# |
# relu
return relay.nn.relu(relay.nn.softmax(x))
# Run ConcatRewriter once, OneMoreReluRewriter once
out = rewrite(
[OneMoreReluRewriter(True), ConcatRewriter(True)],
before(),
)
assert tvm.ir.structural_equal(out, once_concat())
# Run ConcatRewriter recursively, OneMoreReluRewriter once
out = rewrite(
[OneMoreReluRewriter(True), ConcatRewriter(False)],
before(),
)
assert tvm.ir.structural_equal(out, recursive_concat())
test_one_callback()
test_multi_callbacks()
def test_matched_outside_but_dominated():
"""In this example the pattern matches the nn.conv2d/add/multiply flow. Even though the
add output is consumed by the sigmoid, the sigmoid itself is dominated by the multiply.
So partitioning can proceed, all be it with a duplication of the add."""
in_mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(16, 16, 32, 32), float16], %weight: Tensor[(32, 16, 3, 3), float16], %bias: Tensor[(32), float32]) -> Tensor[(16, 32, 32, 32), float32] {
%0 = layout_transform(%data, src_layout="NCHW", dst_layout="NHWC");
%1 = layout_transform(%weight, src_layout="OIHW", dst_layout="OHWI");
%2 = expand_dims(%bias, axis=1, num_newaxis=2);
%3 = expand_dims(%2, axis=0);
%4 = nn.conv2d(%0, %1, padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="OHWI", out_dtype="float32");
%5 = layout_transform(%3, src_layout="NCHW", dst_layout="NHWC");
%6 = add(%4, %5);
%7 = sigmoid(%6);
%8 = multiply(%6, %7);
layout_transform(%8, src_layout="NHWC", dst_layout="NCHW")
}
"""
)
expected_mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(16, 16, 32, 32), float16], %weight: Tensor[(32, 16, 3, 3), float16], %bias: Tensor[(32), float32]) -> Tensor[(16, 32, 32, 32), float32] {
%2 = expand_dims(%bias, axis=1, num_newaxis=2);
%3 = expand_dims(%2, axis=0);
%4 = layout_transform(%data, src_layout="NCHW", dst_layout="NHWC");
%5 = layout_transform(%weight, src_layout="OIHW", dst_layout="OHWI");
%6 = nn.conv2d(%4, %5, padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="OHWI", out_dtype="float32");
%7 = layout_transform(%3, src_layout="NCHW", dst_layout="NHWC");
%8 = add(%6, %7);
%9 = sigmoid(%8);
%10 = fn (%FunctionVar_0_0, %FunctionVar_0_1, %FunctionVar_0_2, %FunctionVar_0_3, PartitionedFromPattern="nn.conv2d_add_multiply_") {
%0 = nn.conv2d(%FunctionVar_0_0, %FunctionVar_0_1, padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="OHWI", out_dtype="float32");
%1 = add(%0, %FunctionVar_0_2);
multiply(%1, %FunctionVar_0_3)
};
%11 = %10(%4, %5, %7, %9);
layout_transform(%11, src_layout="NHWC", dst_layout="NCHW")
}
"""
)
pattern = is_op("multiply")(
is_op("add")(is_op("nn.conv2d")(wildcard(), wildcard()), wildcard()), wildcard()
)
actual_mod = tvm.IRModule.from_expr(pattern.partition(in_mod["main"]))
actual_mod = relay.transform.InferType()(actual_mod)
tvm.ir.assert_structural_equal(actual_mod, expected_mod)
def test_partition_parallel_branch_with_same_input():
"""In this example, conv2d's two consumer(add and multiply) on two different branches are
merged into one partition, make sure that the partitioned function has no redundant parameters"""
# Pattern
path1 = is_op("multiply")(wildcard(), wildcard())
path2 = is_op("add")(wildcard(), wildcard())
pattern = is_op("add")(path1, path2)
i = relay.Var("input")
w = relay.Var("weight")
l = relay.Var("left")
r = relay.Var("right")
conv2d = relay.op.nn.conv2d(i, w)
branch1 = relay.multiply(l, conv2d)
branch2 = relay.add(conv2d, r)
add = relay.add(branch1, branch2)
lf = relay.Var("leftf")
mf = relay.Var("midf")
rf = relay.Var("rightf")
f = relay.Function([lf, mf, rf], (lf * mf) + (mf + rf)).with_attr(
"PartitionedFromPattern", "multiply_add_add_"
)
partitioned = pattern.partition(add)
reference = f(l, conv2d, r)
assert tvm.ir.structural_equal(partitioned, reference)
if __name__ == "__main__":
tvm.testing.main()
| 63,867 | 31.43677 | 181 | py |
tvm | tvm-main/tests/python/relay/test_analysis_extract_intermediate_expr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test function extraction"""
import pytest
import tvm
from tvm import relay
def get_conv_net():
"""This gets the net for:
conv2d
/ |
/ |
conv2d |
\ |
\ |
elemwise add
|
|
|
split
|
|
|
elemwise add
"""
dshape = (1, 1, 5, 1)
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
tuple_out = relay.op.split(z, indices_or_sections=1, axis=0)
tuple_0_add = relay.add(tuple_out[0], relay.const(1, dtype="float32"))
return tvm.IRModule.from_expr(tuple_0_add)
def get_conv2d():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
return tvm.IRModule.from_expr(y)
def test_extract():
dshape = (1, 1, 5, 1)
def before():
return get_conv_net()
def expected_0():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
return tvm.IRModule.from_expr(y)
def expected_1():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
return tvm.IRModule.from_expr(x1)
def expected_2():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
return tvm.IRModule.from_expr(z)
def expected_3():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
tuple_out = relay.op.split(z, indices_or_sections=1, axis=0)
return tvm.IRModule.from_expr(tuple_out.astuple())
def expected_4():
# check tuple node
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
tuple_out = relay.op.split(z, indices_or_sections=1, axis=0)
return tvm.IRModule.from_expr(tuple_out[0])
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 0), expected_0()
)
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 1), expected_1()
)
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 2), expected_2()
)
assert tvm.ir.structural_equal(
(relay.analysis.extract_intermdeiate_expr(before(), 3)), expected_3()
)
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 4), expected_4()
)
assert tvm.ir.structural_equal(relay.analysis.extract_intermdeiate_expr(before(), 5), before())
if __name__ == "__main__":
tvm.testing.main()
| 4,495 | 33.320611 | 99 | py |
tvm | tvm-main/tests/python/relay/test_ir_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
from tvm.relay.testing.temp_op_attr import TempOpAttr
from tvm.relay.op import op as _op
def test_op_attr():
log_op = relay.op.get("log")
@tvm.ir.register_op_attr("exp", "ftest")
def test(x):
return x + 1
assert log_op.num_inputs == 1
assert log_op.get_attr("ftest") is None
assert relay.op.get("exp").get_attr("ftest")(1) == 2
def test_op_reset_attr():
"""Tests reset_attr functionality."""
def add1(x):
return x + 1
def add2(x):
return x + 2
# Register fadd1 and fadd2 attributes.
tvm.ir.register_op_attr("exp", "fadd1", add1)
tvm.ir.register_op_attr("log", "fadd1", add1)
tvm.ir.register_op_attr("log", "fadd2", add2)
# Reset log fadd1 attr.
log_op = relay.op.get("log")
log_op.reset_attr("fadd1")
# Check that fadd1 attr is resetted.
assert log_op.get_attr("fadd1") is None
# Check that fadd1 attr of other ops are intact.
assert relay.op.get("exp").get_attr("fadd1")(1) == 2
# Check that other attrs of the log op are intact.
assert relay.op.get("log").get_attr("fadd2")(1) == 3
def test_op_temp_attr():
"""Tests reset_attr functionality."""
def add1(x):
return x + 1
def add2(x):
return x + 2
# Set original attr value is add1.
tvm.ir.register_op_attr("sqrt", "ftest", add1)
with TempOpAttr("sqrt", "ftest", add2):
# Check that the attr value is updated to add2.
assert relay.op.get("sqrt").get_attr("ftest")(1) == 3
# Check that the attr value is recovered to add1.
assert relay.op.get("sqrt").get_attr("ftest")(1) == 2
def test_op_level1():
x = relay.Var("x")
for op_name in ["log", "exp", "sqrt", "rsqrt", "tanh"]:
y = getattr(relay, op_name)(x)
assert y.op.name == op_name
assert y.op.support_level == 1
assert y.args[0] == x
def test_op_level3():
x = relay.Var("x")
for op_name in ["ceil", "floor", "trunc", "round", "abs", "negative"]:
y = getattr(relay, op_name)(x)
assert y.op.name == op_name
assert y.op.support_level == 3
assert y.args[0] == x
def test_op_register():
"""Tests register_op functionality."""
op_name = "custom_op"
_op.register(op_name, r"code(Add two tensor with inner broadcasting.)code")
_op.get(op_name).set_num_inputs(2)
_op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
_op.get(op_name).add_argument("data_1", "Tensor", "The input data tensor.")
# call default relation functions
_op.get(op_name).add_type_rel("Identity")
_op.get(op_name).set_support_level(1)
_op.register_pattern(op_name, _op.OpPattern.ELEMWISE)
_op.register_stateful(op_name, False)
assert _op.get(op_name).name == op_name
assert _op.get(op_name).num_inputs == 2
assert _op.get(op_name).get_attr("TOpPattern") == _op.OpPattern.ELEMWISE
assert _op.get(op_name).get_attr("TOpIsStateful") == False
if __name__ == "__main__":
test_op_attr()
test_op_reset_attr()
test_op_temp_attr()
test_op_level1()
test_op_level3()
test_op_register()
| 3,952 | 29.407692 | 79 | py |
tvm | tvm-main/tests/python/relay/test_any.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import relay, te
from tvm.relay.loops import while_loop
from tvm.relay.testing import run_infer_type as infer_type
from tvm.topi.testing import searchsorted_ref
from utils import ref_funcs
from utils.assert_diagnostic import DiagnosticTesting
def int32(val):
return relay.const(val, "int32")
def any_dims(ndim):
shape = []
for _ in range(ndim):
shape.append(relay.Any())
return tuple(shape)
def check_result(
args,
mod,
expected,
flatten=False,
assert_shape=False,
only_vm=False,
targets=None,
disable_targets=None,
):
if not isinstance(expected, list):
expected = [expected]
for kind in ["debug", "vm"]:
targets = targets or tvm.testing.enabled_targets()
for tgt, dev in targets:
if disable_targets and tgt in disable_targets:
continue
if kind == "debug" and (only_vm or dev.device_type != tvm.cpu().device_type):
continue
result = relay.create_executor(kind, mod=mod, device=dev, target=tgt).evaluate()(*args)
if isinstance(result, tvm.runtime.container.ADT):
result = [r.numpy() for r in result]
else:
result = [result.numpy()]
for r, e in zip(result, expected):
if assert_shape:
assert r.shape == e, "Shape mismatch: expect %s but got %s." % (
str(e),
str(r),
)
else:
if flatten:
r = r.flatten()
e = e.flatten()
tvm.testing.assert_allclose(r, e, atol=2e-6)
def verify_any_broadcast(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op):
dtype = "float32"
x = relay.var("x", shape=x_shape, dtype=dtype)
y = relay.var("y", shape=y_shape, dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], op(x, y))
x_np = np.random.uniform(size=x_np_shape).astype(dtype)
y_np = np.random.uniform(size=y_np_shape).astype(dtype)
res_np = np_op(x_np, y_np)
check_result([x_np, y_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_broadcast():
# Test broadcast with 1s
verify_any_broadcast((relay.Any(),), (3, 2), (1,), (3, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (1, 2), (1, 2), (1, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (1, 2), (3, 2), (1, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (3, 2), (1, 2), (3, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (3, relay.Any()), (1, 2), (3, 1), relay.add, np.add)
# Test broadcast with values other than 1
verify_any_broadcast((relay.Any(),), (3, 2), (2,), (3, 2), relay.add, np.add)
verify_any_broadcast((relay.Any(), 2), (3, 2), (3, 2), (3, 2), relay.add, np.add)
def verify_any_elemwise(x_shape, x_np_shape, op, np_op):
dtype = "float32"
x = relay.var("x", shape=x_shape, dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], op(x))
x_np = np.random.uniform(size=x_np_shape).astype(dtype)
res_np = np_op(x_np)
check_result([x_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_elemwise():
verify_any_elemwise((relay.Any(),), (3,), relay.sqrt, np.sqrt)
verify_any_elemwise((relay.Any(), 2), (5, 2), relay.negative, np.negative)
verify_any_elemwise((relay.Any(), relay.Any()), (5, 4), relay.exp, np.exp)
verify_any_elemwise((relay.Any(),), (3,), relay.round, np.round)
@tvm.testing.uses_gpu
def test_any_broadcast_fail():
# Test broadcast with incompatible values at runtime
def check_fail(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op):
try:
verify_any_broadcast(x_shape, y_shape, x_np_shape, y_np_shape, op, np_op)
except tvm._ffi.base.TVMError:
pass
else:
assert False
check_fail((relay.Any(),), (3, 2), (1,), (4, 2), relay.add, np.add)
check_fail((relay.Any(), 2), (3, 2), (4, 2), (4, 2), relay.add, np.add)
check_fail((relay.Any(), 2), (3, relay.Any()), (1, 2), (4, 1), relay.add, np.add)
check_fail((relay.Any(), 2), (3, 3), (1, 3), (3, 3), relay.add, np.add)
check_fail((relay.Any(),), (3, 2), (2), (4, 2), relay.add, np.add)
def verify_any_full_like(x_shape, x_np_shape, relay_op, np_op, dtype="float32"):
x = relay.var("x", shape=x_shape, dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], relay_op(x))
x_np = np.random.uniform(size=x_np_shape).astype(dtype)
res_np = np_op(x_np)
check_result([x_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_full_like():
# zeros_like, ones_like
verify_any_full_like(any_dims(3), (2, 3, 5), relay.zeros_like, np.zeros_like, "float32")
verify_any_full_like(any_dims(3), (225, 115, 15), relay.zeros_like, np.zeros_like, "float32")
verify_any_full_like(
any_dims(5), (10, 11, 12, 13, 14), relay.zeros_like, np.zeros_like, "int32"
)
verify_any_full_like(any_dims(3), (2, 3, 5), relay.ones_like, np.ones_like, "float32")
verify_any_full_like(any_dims(3), (225, 115, 15), relay.ones_like, np.ones_like, "float32")
verify_any_full_like(any_dims(5), (10, 11, 12, 13, 14), relay.ones_like, np.ones_like, "int32")
def verify_any_full(x_np_shape, relay_op, np_op, dtype="float32", value=None):
x = relay.var("x", shape=(len(x_np_shape),), dtype="int32")
mod = tvm.IRModule()
out = relay_op(x, dtype) if value is None else relay_op(relay.expr.const(value), x, dtype)
mod["main"] = relay.Function([x], out)
res_np = np_op(x_np_shape) if value is None else np_op(x_np_shape, value)
x_np = np.array(x_np_shape).astype("int32")
check_result([x_np], mod, res_np)
@tvm.testing.uses_gpu
def test_any_full():
# zeros, ones, full
verify_any_full((2, 3, 5), relay.zeros, np.zeros, "float32")
verify_any_full((225, 115, 15), relay.zeros, np.zeros, "float32")
verify_any_full((10, 11, 12, 13, 14), relay.zeros, np.zeros, "int32")
verify_any_full((2, 3, 5), relay.ones, np.ones, "float32")
verify_any_full((225, 115, 15), relay.ones, np.ones, "float32")
verify_any_full((10, 11, 12, 13, 14), relay.ones, np.ones, "int32")
verify_any_full((10, 11, 12, 13, 14), relay.full, np.full, "float32", 2.0)
verify_any_full((1, 2, 3, 4), relay.full, np.full, "int32", -2)
@tvm.testing.uses_gpu
def test_any_concat():
x = relay.var("x", shape=(relay.Any(), 2), dtype="float32")
y = relay.var("y", shape=(1, 2), dtype="float32")
xx = x - relay.expr.const(3.0)
yy = y * relay.expr.const(5.0)
z = relay.op.concatenate([xx, yy], axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.random.uniform(size=(3, 2)).astype("float32")
y_np = np.random.uniform(size=(1, 2)).astype("float32")
ref = np.concatenate([x_np - 3.0, y_np * 5.0], axis=0)
check_result([x_np, y_np], mod, ref)
num_inputs = 25
x = [relay.var("x", shape=(relay.Any(),), dtype="float32") for _ in range(num_inputs)]
z = relay.op.concatenate(x, axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function(x, z)
x_np = [np.random.uniform(size=(1,)).astype("float32") for _ in range(num_inputs)]
ref = np.concatenate(x_np, axis=0)
check_result(x_np, mod, ref)
def test_oshape(in_vars, axis, oshape):
z = relay.op.concatenate(in_vars, axis=axis)
mod = tvm.IRModule()
mod["main"] = relay.Function(in_vars, z)
typed_mod = relay.transform.InferType()(mod)
assert typed_mod["main"].body.checked_type == relay.TensorType(oshape, dtype="float32")
x = [relay.var("x", shape=(relay.Any(), 3), dtype="float32") for _ in range(3)]
x.append(relay.var("x", shape=(relay.Any(), relay.Any()), dtype="float32"))
test_oshape(x, 0, (relay.Any(), 3))
test_oshape(x, 1, (relay.Any(), relay.Any()))
# [(1, 3), (1, ?)] -> (2, ?)
x = [
relay.var("x", shape=(1, 3), dtype="float32"),
relay.var("x", shape=(1, relay.Any()), dtype="float32"),
]
test_oshape(x, 0, (2, relay.Any()))
test_oshape(x, 1, (1, relay.Any()))
def verify_any_reshape(x_shape, newshape, x_np_shape, out_shape, variable_newshape=False):
x = relay.var("x", shape=x_shape, dtype="float32")
relu_x = relay.nn.relu(x)
data = np.random.uniform(size=x_np_shape).astype("float32")
expected = data.reshape(out_shape)
params = [x]
args = [data]
if variable_newshape:
newshape_var = relay.var("newshape", shape=(len(newshape),), dtype="int64")
params.append(newshape_var)
args.append(np.array(newshape, dtype="int64"))
newshape = newshape_var
y = relay.reshape(relu_x, newshape=newshape)
mod = tvm.IRModule()
mod["main"] = relay.Function(params, y)
check_result(args, mod, expected)
@tvm.testing.uses_gpu
def test_any_reshape():
for variable_newshape in [False, True]:
# Variable newshape only supports that output rank is the same as newshape
verify_any_reshape(any_dims(3), (1, -1), (2, 3, 4), (1, 24), variable_newshape)
verify_any_reshape(any_dims(3), (0, -1), (2, 3, 4), (2, 12), variable_newshape)
verify_any_reshape(any_dims(3), (0, -2), (2, 3, 4), (2, 3, 4))
verify_any_reshape(any_dims(3), (-4, -1, 2, -3), (6, 3, 4), (3, 2, 12))
verify_any_reshape(any_dims(3), (-4, 2, -1, -2), (6, 3, 4), (2, 3, 3, 4))
verify_any_reshape(any_dims(3), (1, -1, 0), (2, 3, 4), (1, 6, 4))
verify_any_reshape(any_dims(3), (-1, 1, 0), (2, 3, 4), (6, 1, 4))
def verify_any_one_hot(indices_shape, indices_np_shape, depth, on_value, off_value, axis, dtype):
indices = relay.var("indices", shape=indices_shape, dtype="int32")
on_value_const = relay.const(on_value, dtype)
off_value_const = relay.const(off_value, dtype)
y = relay.one_hot(indices, on_value_const, off_value_const, depth, axis=axis, dtype=dtype)
params = [indices]
mod = tvm.IRModule()
mod["main"] = relay.Function(params, y)
indices_npy = np.random.randint(0, depth, size=indices_np_shape).astype("int32")
out_npy = tvm.topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype)
args = [indices_npy]
check_result(args, mod, out_npy)
@tvm.testing.uses_gpu
def test_any_one_hot():
verify_any_one_hot(any_dims(1), (3,), 3, 1, 0, -1, "int32")
verify_any_one_hot(any_dims(2), (2, 2), 5, 0.5, -0.5, 1, "float32")
verify_any_one_hot(any_dims(4), (3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
def verify_any_argwhere(x_shape, x_np_shape, dtype="bool"):
x = relay.var("x", shape=x_shape, dtype=dtype)
y = relay.argwhere(x)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
data = np.random.choice([0, 1, 2, 3], size=x_np_shape).astype(dtype)
expected = np.argwhere(data)
check_result([data], mod, expected, flatten=True)
@tvm.testing.uses_gpu
def test_any_argwhere():
verify_any_argwhere(any_dims(1), (5,))
verify_any_argwhere(any_dims(2), (5, 5))
verify_any_argwhere(any_dims(2), (5, 5), "int32")
verify_any_argwhere(any_dims(2), (5, 5), "int8")
verify_any_argwhere(any_dims(3), (5, 5, 5))
verify_any_argwhere(any_dims(4), (5, 5, 5, 5))
verify_any_argwhere(any_dims(5), (5, 5, 5, 5, 5))
verify_any_argwhere(any_dims(1), (5,), "int32")
verify_any_argwhere(any_dims(3), (5, 5, 5), "int32")
verify_any_argwhere(any_dims(4), (5, 5, 5, 5), "int32")
verify_any_argwhere(any_dims(5), (5, 5, 5, 5, 5), "int32")
verify_any_argwhere(any_dims(1), (5,), "int8")
verify_any_argwhere(any_dims(3), (5, 5, 5), "int8")
verify_any_argwhere(any_dims(4), (5, 5, 5, 5), "int8")
verify_any_argwhere(any_dims(5), (5, 5, 5, 5, 5), "int8")
def verify_any_take(data_shape, indices_shape, axis, data_np_shape, indices_np_shape):
mod = tvm.IRModule()
data = relay.var("data", shape=data_shape, dtype="float32")
indices = relay.var("indices", shape=indices_shape, dtype="int32")
y = relay.take(data, indices, axis=axis)
mod["main"] = relay.Function([data, indices], y)
data_np = np.random.uniform(size=data_np_shape).astype("float32")
if axis is None:
max_index = data_np.size
else:
max_index = data_np.shape[axis]
indices_np = np.random.randint(max_index, size=indices_np_shape).astype("int32")
ref = np.take(data_np, indices_np, axis=axis)
check_result([data_np, indices_np], mod, ref)
@tvm.testing.uses_gpu
def test_any_take():
verify_any_take(any_dims(2), (1,), 0, (4, 5), (1,))
verify_any_take(any_dims(2), (), 0, (4, 5), ())
verify_any_take(any_dims(2), (), None, (4, 5), ())
verify_any_take(any_dims(3), any_dims(2), 1, (3, 4, 5), (2, 3))
verify_any_take(any_dims(2), any_dims(3), None, (4, 5), (2, 3, 4))
verify_any_take(any_dims(2), any_dims(4), -1, (4, 5), (2, 3, 4, 5))
def verify_any_tile(dshape, reps, np_dshape, np_reps):
mod = tvm.IRModule()
x = relay.var("x", shape=dshape, dtype="float32")
y = relay.tile(x, reps=reps)
mod["main"] = relay.Function([x], y)
x_data = np.random.uniform(size=np_dshape).astype("float32")
ref_res = np.tile(x_data, reps=np_reps)
check_result([x_data], mod, ref_res)
@tvm.testing.uses_gpu
def test_any_tile():
verify_any_tile(any_dims(3), (3, 2, 1), (2, 3, 4), (3, 2, 1))
verify_any_tile(any_dims(3), (1, 2), (2, 3, 4), (1, 2))
verify_any_tile(any_dims(2), (3, 2, 1), (2, 3), (3, 2, 1))
verify_any_tile(any_dims(3), (1,), (2, 3, 4), (1,))
@tvm.testing.uses_gpu
def test_any_shape_of():
x = relay.var("x", shape=any_dims(2), dtype="float32")
y = relay.shape_of(x)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
data = np.random.uniform(size=(3, 4)).astype("float32")
check_result([data], mod, np.array([3, 4]).astype("int64"))
x = relay.var("x", shape=any_dims(3), dtype="float32")
y0 = relay.shape_of(x)
y1 = relay.take(y0, relay.const(1, "int32"))
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y1)
data = np.random.uniform(size=(2, 3, 4)).astype("float32")
check_result([data], mod, np.array(3).astype("int64"))
class TestAnyReduce:
config = {
"argmax": (relay.argmax, any_dims(3), None, False, False, (3, 4, 5), ()),
"argmin": (relay.argmin, any_dims(4), 1, False, True, (3, 4, 5, 6), (3, 1, 5, 6)),
"all": (relay.all, any_dims(3), (1, 2), True, False, (3, 4, 5), (4, 5)),
"max": (relay.max, any_dims(4), -1, True, True, (3, 4, 5, 6), (1, 1, 1, 6)),
"min": (relay.min, any_dims(3), (0, 1), False, False, (4, 5, 6), (6,)),
"prod": (relay.prod, any_dims(4), 2, True, True, (3, 4, 5, 6), (1, 1, 5, 1)),
"mean": (relay.mean, any_dims(2), 0, False, False, (1, 2), (2,)),
"variance": (relay.variance, any_dims(5), (2, 4), False, False, (3, 4, 5, 6, 7), (3, 4, 6)),
}
(
reduce_op,
data_shape,
axis,
exclude,
keepdims,
static_data_shape,
ref_out_shape,
) = tvm.testing.parameters(*config.values(), ids=config.keys())
def test_any_reduce(
self,
target,
dev,
reduce_op,
data_shape,
axis,
exclude,
keepdims,
static_data_shape,
ref_out_shape,
):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and reduce_op == relay.all:
pytest.xfail("Known failing test case for vulkan runtime")
mod = tvm.IRModule()
dtype = "bool" if reduce_op == relay.all else "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = reduce_op(data, axis, keepdims, exclude)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True, targets=[(target, dev)])
def verify_any_layout_transform(
data_shape, src_layout, dst_layout, static_data_shape, ref_out_shape
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.layout_transform(data, src_layout, dst_layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_layout_transform():
verify_any_layout_transform(any_dims(4), "NCHW", "NHWC", (3, 4, 5, 6), (3, 5, 6, 4))
verify_any_layout_transform(
any_dims(5), "NCHW16c", "NCHW2c", (1, 2, 8, 8, 16), (1, 16, 8, 8, 2)
)
verify_any_layout_transform(any_dims(5), "NCHW6n", "NHWC", (3, 4, 5, 6, 6), (18, 5, 6, 4))
verify_any_layout_transform(any_dims(4), "NCHW", "NCHW4c", (3, 4, 5, 6), (3, 1, 5, 6, 4))
verify_any_layout_transform((16, 1), "CH", "C4cH", (16, 1), (4, 4, 1))
def test_bilayout_with_any():
bilayout = tvm.tir.bijective_layout("NCHW", "NHWC")
assert isinstance(bilayout, tvm.tir.BijectiveLayout)
dst_shape = bilayout.forward_shape((relay.Any(), 32, 7, relay.Any()))
assert dst_shape[3] == 32
src_shape = bilayout.backward_shape(dst_shape)
assert src_shape[1] == 32
def verify_any_expand_dims(data_shape, axis, num_newaxis, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.expand_dims(data, axis=axis, num_newaxis=num_newaxis)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_expand_dims():
verify_any_expand_dims(any_dims(3), 1, 2, (1, 2, 3), (1, 1, 1, 2, 3))
verify_any_expand_dims(any_dims(3), -1, 2, (1, 2, 3), (1, 2, 3, 1, 1))
def verify_any_transpose(data_shape, axes, static_data_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.transpose(data, axes=axes)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out = np.transpose(data_np, axes)
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_transpose():
verify_any_transpose(any_dims(3), (1, 0, 2), (10, 3, 2))
verify_any_transpose(any_dims(3), None, (2, 3, 4))
verify_any_transpose(any_dims(6), (0, 1, 3, 2, 5, 4), (11, 12, 2, 1, 9, 17))
verify_any_transpose(any_dims(2), (-1, 0), (3, 2))
def verify_any_squeeze(data_shape, axis, static_data_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.squeeze(data, axis=axis)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out = np.squeeze(data_np, axis)
check_result([data_np], mod, ref_out)
def verify_any_squeeze_sqrt(data_shape, axis, static_data_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.squeeze(data, axis=axis)
y = relay.sqrt(y)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out = np.sqrt(np.squeeze(data_np, axis))
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_squeeze():
verify_any_squeeze((relay.Any(), relay.Any(), relay.Any()), (0,), (1, 9, 8))
verify_any_squeeze((1, relay.Any(), relay.Any()), (0,), (1, 9, 8))
verify_any_squeeze(
(1, relay.Any(), relay.Any(), 1, relay.Any(), relay.Any()), (0, 3), (1, 12, 2, 1, 9, 17)
)
verify_any_squeeze_sqrt((1, relay.Any(), 12, 32, 1), (-1,), (1, 100, 12, 32, 1))
verify_any_squeeze_sqrt((relay.Any(), relay.Any(), relay.Any(), 1), (-1,), (1, 9, 8, 1))
@tvm.testing.uses_gpu
def test_any_reshape_like():
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=(relay.Any(), 3, 10), dtype=dtype)
shape_like = relay.var("data", shape=(relay.Any(), 5, 6), dtype=dtype)
y = relay.reshape_like(data, shape_like)
mod["main"] = relay.Function([data, shape_like], y)
data_np = np.random.uniform(size=(3, 3, 10)).astype(dtype)
shape_like_np = np.random.uniform(size=(3, 5, 6)).astype(dtype)
check_result([data_np, shape_like_np], mod, shape_like_np.shape, assert_shape=True)
def verify_any_conv2d(
data_shape,
kernel_shape,
strides,
padding,
dilation,
static_data_shape,
ref_out_shape,
data_layout="NCHW",
kernel_layout="OIHW",
use_cudnn=False,
targets=None,
disable_targets=None,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv2d(
data,
kernel,
strides,
padding,
dilation,
kernel_size=kernel_shape[2:4] if kernel_layout == "OIHW" else kernel_shape[0:2],
data_layout=data_layout,
kernel_layout=kernel_layout,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
if use_cudnn and tvm.get_global_func("tvm.contrib.cudnn.conv2d.forward", True):
targets = [("cuda -libs=cudnn", tvm.cuda(0))]
check_result(
[data_np, kernel_np],
mod,
ref_out_shape,
assert_shape=True,
targets=targets,
disable_targets=disable_targets,
)
# TODO(@kevinthesun): Support dynamic input height and width.
@tvm.testing.uses_gpu
def test_any_conv2d():
verify_any_conv2d(
(relay.Any(), 64, 224, 224),
(64, 64, 3, 3),
(1, 1),
(1, 1),
(1, 1),
(1, 64, 224, 224),
(1, 64, 224, 224),
)
verify_any_conv2d(
(relay.Any(), 64, 224, 224),
(64, 64, 3, 3),
(1, 1),
(1, 1),
(2, 2),
(2, 64, 224, 224),
(2, 64, 222, 222),
)
verify_any_conv2d(
(relay.Any(), 64, 224, 224),
(64, 64, 3, 3),
(1, 1),
(1, 1),
(1, 1),
(1, 64, 224, 224),
(1, 64, 224, 224),
use_cudnn=True,
)
verify_any_conv2d(
(relay.Any(), 224, 224, 64),
(3, 3, 64, 64),
(1, 1),
(1, 1),
(1, 1),
(1, 224, 224, 64),
(1, 224, 224, 64),
data_layout="NHWC",
kernel_layout="HWIO",
)
verify_any_conv2d(
(relay.Any(), 224, 224, 64),
(3, 3, 64, 64),
(1, 1),
(1, 1),
(2, 2),
(2, 224, 224, 64),
(2, 222, 222, 64),
data_layout="NHWC",
kernel_layout="HWIO",
)
verify_any_conv2d(
(relay.Any(), 64, relay.Any(), relay.Any()),
(64, 64, 3, 3),
(1, 1),
(1, 1),
(1, 1),
(1, 64, 224, 224),
(1, 64, 224, 224),
targets=[("llvm", tvm.cpu(0))],
)
verify_any_conv2d(
(relay.Any(), 64, relay.Any(), relay.Any()),
(64, 64, 1, 1),
(1, 1),
(0, 0),
(1, 1),
(1, 64, 224, 224),
(1, 64, 224, 224),
targets=[("llvm", tvm.cpu(0))],
)
class TestAnyConv2dNCHWc:
data_shape = tvm.testing.parameter((relay.Any(), 8, 224, 224, 8))
kernel_shape = tvm.testing.parameter((8, 8, 3, 3, 8, 8))
strides = tvm.testing.parameter((1, 1))
padding = tvm.testing.parameter((1, 1))
data_layout = tvm.testing.parameter("NCHW8c")
kernel_layout = tvm.testing.parameter("OIHW8i8o")
out_layout = tvm.testing.parameter("NCHW8c")
dilation, static_data_shape, ref_out_shape = tvm.testing.parameters(
((1, 1), (1, 8, 224, 224, 8), (1, 8, 224, 224, 8)),
((2, 2), (2, 8, 224, 224, 8), (2, 8, 222, 222, 8)),
)
@tvm.testing.known_failing_targets("cuda", "vulkan")
def test_any_conv2d_NCHWc(
self,
target,
dev,
data_shape,
kernel_shape,
strides,
padding,
dilation,
data_layout,
kernel_layout,
out_layout,
static_data_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.contrib_conv2d_nchwc(
data,
kernel,
strides,
padding,
dilation,
kernel_size=kernel_shape[2:4],
channels=kernel_shape[0] * kernel_shape[-1],
data_layout=data_layout,
kernel_layout=kernel_layout,
out_layout=out_layout,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
check_result(
[data_np, kernel_np], mod, ref_out_shape, assert_shape=True, targets=[(target, dev)]
)
def verify_any_conv1d_transpose_ncw(
data_shape,
kernel_shape,
strides,
padding,
dilation,
groups,
static_data_shape,
ref_out_shape,
output_padding,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv1d_transpose(
data,
kernel,
strides,
padding,
dilation,
groups,
kernel_size=kernel_shape[2:],
output_padding=output_padding,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
check_result([data_np, kernel_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_conv1d_transpose_ncw():
verify_any_conv1d_transpose_ncw(
(relay.Any(), 64, 224),
(64, 192, 3),
(1,),
(1,),
(1,),
1,
(2, 64, 224),
(2, 192, 224),
(0, 0),
)
verify_any_conv1d_transpose_ncw(
(relay.Any(), 32, 224),
(32, 64, 3),
(2,),
(1,),
(1,),
1,
(1, 32, 224),
(1, 64, 448),
(1, 1),
)
def verify_any_conv2d_transpose_nchw(
data_shape,
kernel_shape,
strides,
padding,
dilation,
groups,
static_data_shape,
ref_out_shape,
output_padding,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv2d_transpose(
data,
kernel,
strides,
padding,
dilation,
groups,
kernel_size=kernel_shape[2:4],
output_padding=output_padding,
)
mod["main"] = relay.Function([data, kernel], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
check_result([data_np, kernel_np], mod, ref_out_shape, assert_shape=True)
# TODO(@kevinthesun): Support dynamic input height and width.
@tvm.testing.uses_gpu
def test_any_conv2d_transpose_nchw():
verify_any_conv2d_transpose_nchw(
(relay.Any(), 64, 224, 224),
(64, 192, 3, 3),
(1, 1),
(1, 1),
(1, 1),
1,
(2, 64, 224, 224),
(2, 192, 224, 224),
(0, 0),
)
verify_any_conv2d_transpose_nchw(
(relay.Any(), 32, 224, 224),
(32, 64, 3, 3),
(2, 2),
(1, 1),
(1, 1),
1,
(1, 32, 224, 224),
(1, 64, 448, 448),
(1, 1),
)
def verify_any_pool2d(
pool_type,
data_shape,
pool_size,
strides,
dilation,
padding,
layout,
static_data_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
pool_func = relay.nn.max_pool2d if pool_type == "max" else relay.nn.avg_pool2d
data = relay.var("data", shape=data_shape, dtype=dtype)
y = pool_func(data, pool_size, strides, dilation, padding, layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_pool2d():
verify_any_pool2d(
"max",
(relay.Any(), 3, relay.Any(), relay.Any()),
(3, 3),
(1, 1),
(1, 1),
(1, 1),
"NCHW",
(2, 3, 220, 220),
(2, 3, 220, 220),
)
verify_any_pool2d(
"avg",
(relay.Any(), relay.Any(), relay.Any(), 4),
(1, 1),
(2, 2),
(1, 1),
(0, 0),
"NHWC",
(3, 220, 220, 4),
(3, 110, 110, 4),
)
verify_any_pool2d(
"max",
(relay.Any(), 3, relay.Any(), relay.Any(), 4),
(3, 3),
(2, 2),
(1, 1),
(1, 1),
"NCHW4c",
(2, 3, 220, 220, 4),
(2, 3, 110, 110, 4),
)
def verify_any_global_pool2d(pool_type, data_shape, layout, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
pool_func = relay.nn.global_max_pool2d if pool_type == "max" else relay.nn.global_avg_pool2d
data = relay.var("data", shape=data_shape, dtype=dtype)
y = pool_func(data, layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_global_pool2d():
verify_any_global_pool2d(
"max", (relay.Any(), 3, relay.Any(), relay.Any()), "NCHW", (2, 3, 220, 220), (2, 3, 1, 1)
)
verify_any_global_pool2d(
"avg", (relay.Any(), relay.Any(), relay.Any(), 4), "NHWC", (3, 220, 220, 4), (3, 1, 1, 4)
)
verify_any_global_pool2d(
"max",
(relay.Any(), 3, relay.Any(), relay.Any(), 4),
"NCHW4c",
(2, 3, 220, 220, 4),
(2, 3, 1, 1, 4),
)
def verify_any_split(data_shape, indices_or_sections, axis, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.split(data, indices_or_sections, axis)
mod["main"] = relay.Function([data], y.astuple())
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
for kind in ["vm"]:
result = relay.create_executor(kind, mod=mod, device=tvm.cpu(), target="llvm").evaluate()(
data_np
)
for ret, ref_ret in zip(result, ref_out_shape):
assert ret.numpy().shape == ref_ret, "Shape mismatch: expect %s but got %s." % (
str(ref_ret),
str(ret.numpy().shape),
)
@tvm.testing.uses_gpu
def test_any_split():
verify_any_split((relay.Any(), 4), 2, -1, (9, 4), [(9, 2), (9, 2)])
verify_any_split((relay.Any(), 4), 2, 1, (9, 4), [(9, 2), (9, 2)])
verify_any_split((relay.Any(), relay.Any()), 2, 1, (9, 4), [(9, 2), (9, 2)])
verify_any_split((relay.Any(), 12), (1, 4, 8), 1, (7, 12), [(7, 1), (7, 3), (7, 4)])
verify_any_split((relay.Any(), relay.Any()), (1, 4, 8), 1, (7, 12), [(7, 1), (7, 3), (7, 4)])
verify_any_split((relay.Any(), 12), (8,), 1, (7, 12), [(7, 8), (7, 4)])
verify_any_split((relay.Any(), relay.Any()), (8,), 1, (7, 12), [(7, 8), (7, 4)])
@tvm.testing.uses_gpu
def test_any_batch_flatten():
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=any_dims(3), dtype=dtype)
y = relay.nn.batch_flatten(data)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=(3, 3, 10)).astype(dtype)
ref_out_shape = (3, 30)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
# TODO(tvm-team) Fix dense schedule
@tvm.testing.known_failing_targets("cuda", "vulkan")
class TestAnyDense:
(
data_shape,
weight_shape,
units,
static_data_shape,
static_weight_shape,
ref_out_shape,
) = tvm.testing.parameters(
(any_dims(2), any_dims(2), None, (4, 16), (8, 16), (4, 8)),
(any_dims(2), (50, relay.Any()), 50, (4, 40), (50, 40), (4, 50)),
)
@tvm.testing.known_failing_targets("cuda", "vulkan")
def test_any_dense(
self,
target,
dev,
data_shape,
weight_shape,
units,
static_data_shape,
static_weight_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
weight = relay.var("weight", shape=weight_shape, dtype=dtype)
y = relay.nn.dense(data, weight, units)
mod["main"] = relay.Function([data, weight], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
weight_np = np.random.uniform(size=static_weight_shape).astype(dtype)
check_result(
[data_np, weight_np], mod, ref_out_shape, assert_shape=True, targets=[(target, dev)]
)
@tvm.testing.parametrize_targets("cuda -libs=cublas")
@tvm.testing.known_failing_targets("cuda", "vulkan")
def test_any_dense_cublas(
self,
target,
dev,
data_shape,
weight_shape,
units,
static_data_shape,
static_weight_shape,
ref_out_shape,
):
self.test_any_dense(
target,
dev,
data_shape,
weight_shape,
units,
static_data_shape,
static_weight_shape,
ref_out_shape,
)
class TestAnyBatchMatmul:
dtype = tvm.testing.parameter("float32")
executor_kind = tvm.testing.parameter("vm", "debug")
(x_shape, y_shape) = tvm.testing.parameters(
((1, 16, 32), (1, 32, 16)),
((5, 16, 32), (5, 32, 16)),
((5, 16, 32), (5, 32, 20)),
((30, 16, 32), (30, 32, 20)),
)
# any_x = tvm.testing.parameter("none", "batch")
# any_y = tvm.testing.parameter("none", "batch", "all")
any_x, any_y = tvm.testing.parameters(
("none", "batch"), ("none", "all"), ("batch", "none"), ("batch", "batch"), ("batch", "all")
)
transpose_x = tvm.testing.parameter(True, False)
transpose_y = tvm.testing.parameter(True, False)
@tvm.testing.fixture
def x_var_shape(self, x_shape, any_x):
if any_x == "none":
return x_shape
elif any_x == "batch":
return tuple(relay.Any() if i == 0 else size for i, size in enumerate(x_shape))
elif any_x == "all":
return tuple(relay.Any() for _ in x_shape)
@tvm.testing.fixture
def y_var_shape(self, y_shape, any_y):
if any_y == "none":
return y_shape
elif any_y == "batch":
return tuple(relay.Any() if i == 0 else size for i, size in enumerate(y_shape))
elif any_y == "all":
return tuple(relay.Any() for _ in y_shape)
@tvm.testing.known_failing_targets("cuda", "vulkan")
def test_any_batch_matmul(
self,
target,
dev,
x_shape,
y_shape,
any_x,
any_y,
x_var_shape,
y_var_shape,
transpose_x,
transpose_y,
executor_kind,
dtype,
):
if transpose_x:
x_shape = (x_shape[0], x_shape[2], x_shape[1])
x_var_shape = (x_var_shape[0], x_var_shape[2], x_var_shape[1])
if transpose_y:
y_shape = (y_shape[0], y_shape[2], y_shape[1])
y_var_shape = (y_var_shape[0], y_var_shape[2], y_var_shape[1])
x = relay.var("x", relay.TensorType(x_var_shape, dtype))
y = relay.var("y", relay.TensorType(y_var_shape, dtype))
z = relay.nn.batch_matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y)
func = relay.Function([x, y], z)
x_np = np.random.uniform(size=x_shape).astype(dtype)
y_np = np.random.uniform(size=y_shape).astype(dtype)
z_np = tvm.topi.testing.batch_matmul(x_np, y_np, trans_x=transpose_x, trans_y=transpose_y)
mod = tvm.ir.IRModule.from_expr(func)
z = relay.create_executor(executor_kind, mod=mod, device=dev, target=target).evaluate()(
x_np, y_np
)
tvm.testing.assert_allclose(z.numpy(), z_np, rtol=1e-5)
@tvm.testing.uses_gpu
def verify_any_pad(data_shape, pad_width, static_data_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.pad(data, pad_width)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out = np.pad(data_np, pad_width)
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_pad():
verify_any_pad(any_dims(3), ((0, 0), (1, 1), (2, 2)), (1, 2, 3))
verify_any_pad(any_dims(4), ((1, 0), (1, 3), (0, 2), (9, 0)), (13, 11, 3, 1))
def verify_any_dilate(data_shape, strides, static_data_shape, dilation_value=None):
assert len(data_shape) == len(strides)
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
if dilation_value is None:
y = relay.nn.dilate(data, strides)
else:
y = relay.nn.dilate(data, strides, dilation_value)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_shape = tuple(
(static_data_shape[i] - 1) * strides[i] + 1 for i in range(len(static_data_shape))
)
if dilation_value is None:
dilation_value = 0.0
ref_out = np.ones(shape=ref_shape, dtype=dtype)
ref_out = dilation_value * ref_out
ref_out[tuple(slice(None, None, strides[i]) for i in range(len(data_shape)))] = data_np
check_result([data_np], mod, ref_out)
@tvm.testing.uses_gpu
def test_any_dilate():
verify_any_dilate(any_dims(1), (1,), (1,))
verify_any_dilate(any_dims(1), (1,), (5,))
verify_any_dilate(any_dims(1), (5,), (5,))
verify_any_dilate(any_dims(3), (1, 1, 1), (1, 2, 3))
verify_any_dilate(any_dims(3), (1, 1, 2), (1, 2, 3))
verify_any_dilate(any_dims(3), (1, 1, 5), (1, 2, 3))
verify_any_dilate(any_dims(3), (3, 7, 5), (1, 2, 3))
verify_any_dilate(any_dims(4), (3, 7, 1, 5), (1, 2, 3, 4))
verify_any_dilate(any_dims(4), (3, 7, 1, 5), (1, 2, 3, 4), 1.0)
def verify_any_softmax(data_shape, axis, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.softmax(data, axis)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_softmax():
verify_any_softmax(any_dims(3), -1, (1, 2, 3), (1, 2, 3))
verify_any_softmax(any_dims(4), 2, (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_relu(data_shape, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.relu(data)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_relu():
verify_any_relu(any_dims(3), (1, 2, 3), (1, 2, 3))
verify_any_relu(any_dims(4), (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_prelu(data_shape, alpha, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
alpha = relay.const(np.array([alpha]), dtype=dtype)
y = relay.nn.prelu(data, alpha)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_prelu():
verify_any_prelu(any_dims(3), 1, (1, 2, 3), (1, 2, 3))
verify_any_prelu(any_dims(4), 2, (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_leaky_relu(data_shape, alpha, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.leaky_relu(data, alpha)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_leaky_relu():
verify_any_leaky_relu(any_dims(3), 0.1, (1, 2, 3), (1, 2, 3))
verify_any_leaky_relu(any_dims(4), 0.2, (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_bias_add(data_shape, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
bias = relay.const(np.random.randn(1), dtype=dtype)
y = relay.nn.bias_add(data, bias)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_bias_add():
verify_any_bias_add(any_dims(3), (1, 2, 3), (1, 2, 3))
verify_any_bias_add(any_dims(4), (13, 11, 3, 1), (13, 11, 3, 1))
def verify_any_topk(data_shape, kval, np_dshape, dtype, ret_type="indices", const_k=False):
mod = tvm.IRModule()
data = relay.var("data", shape=data_shape, dtype=dtype)
np_data = np.random.uniform(size=np_dshape).astype(dtype)
if const_k:
k = relay.const(kval)
args = [data]
in_vals = [np_data]
else:
k = relay.var("k", shape=(), dtype="int32")
args = [data, k]
in_vals = [np_data, kval]
out = relay.topk(data, k, ret_type=ret_type)
if ret_type == "both":
out = out[0]
mod["main"] = relay.Function(args, out)
sorted = np.argsort(-np_data)
if len(np_dshape) == 2:
ref_out = sorted[:, 0:kval]
else:
ref_out = sorted[0:kval]
check_result(in_vals, mod, ref_out)
@tvm.testing.uses_gpu
def test_any_topk():
verify_any_topk(any_dims(1), 5, (10,), "float32")
verify_any_topk(any_dims(2), 2, (6, 3), "int32")
verify_any_topk(any_dims(2), 3, (6, 3), "float32", const_k=True)
verify_any_topk(any_dims(1), 0, (0,), "float32", ret_type="both")
def verify_any_get_valid_counts(num_anchor_real, dtype, targets=None):
mod = tvm.IRModule()
batch_size = 1
num_anchor = relay.Any()
data = relay.var("data", shape=(batch_size, num_anchor, 5), dtype=dtype)
np_data = np.random.uniform(size=(batch_size, num_anchor_real, 5)).astype(dtype)
np_out1 = np.zeros(shape=(batch_size,))
np_out2 = np.zeros(shape=np_data.shape).astype(dtype)
np_out3 = np.zeros(shape=(batch_size, num_anchor_real))
score_threshold = 0.95
for i in range(batch_size):
np_out1[i] = 0
inter_idx = 0
for j in range(num_anchor_real):
score = np_data[i, j, 0]
if score > score_threshold:
for k in range(5):
np_out2[i, inter_idx, k] = np_data[i, j, k]
np_out1[i] += 1
np_out3[i, inter_idx] = j
inter_idx += 1
if j >= np_out1[i]:
for k in range(5):
np_out2[i, j, k] = -1.0
np_out3[i, j] = -1
z = relay.vision.get_valid_counts(data, score_threshold, 0, score_index=0)
mod["main"] = relay.Function([data], z.astuple())
check_result([np_data], mod, [np_out1, np_out2, np_out3], targets=targets)
@tvm.testing.uses_gpu
def test_any_get_valid_counts():
verify_any_get_valid_counts(10, "float32")
# opencl seems to have issues with empty size buffer
# Check failed: err_code == CL_SUCCESS == false: OpenCL Error,
# code=-61: CL_INVALID_BUFFER_SIZE
targets = []
for tgt, dev in tvm.testing.enabled_targets():
if "opencl" not in tgt:
targets.append((tgt, dev))
verify_any_get_valid_counts(0, "float32", targets=targets)
@tvm.testing.uses_gpu
def test_fused_ops():
x = relay.var("x", shape=(relay.Any(), relay.Any()), dtype="float32")
y0 = x + relay.const(1.0, "float32")
y1 = y0 * relay.const(2.0, "float32")
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y1)
data = np.random.uniform(size=(5, 4)).astype("float32")
check_result([data], mod, (data + 1) * 2)
@tvm.testing.uses_gpu
def test_arange_with_dynamic_shape():
# m, n, k = relay.ShapeVar('m'), relay.ShapeVar('n'), relay.ShapeVar('k')
m, n, k = relay.Any(), relay.Any(), relay.Any()
x = relay.var("x", shape=(m, n, k), dtype="float32")
y0 = relay.shape_of(x)
y1 = relay.take(y0, relay.const(0, "int32"))
y2 = relay.op.arange(y1, dtype="int32")
y3 = y2 + relay.const(1, dtype="int32")
data = np.random.rand(10, 5, 3).astype("float32")
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y3)
check_result([data], mod, np.array(range(10)).astype("int32") + 1)
def verify_any_random_strided_slice(
begin_shape,
end_shape,
strides_shape,
data_shape,
slice_mode="end",
const_attrs=False,
):
# Generate random numpy input data
np_begin = np.random.randint(2, size=begin_shape, dtype="int32")
np_end = np.random.randint(5, 10, size=end_shape, dtype="int32")
np_strides = np.random.randint(
1, 2 if slice_mode == "size" else 3, size=strides_shape, dtype="int32"
)
verify_any_strided_slice(
np_begin, np_end, np_strides, data_shape, slice_mode=slice_mode, const_attrs=const_attrs
)
def verify_any_strided_slice(
np_begin,
np_end,
np_strides,
data_shape,
axes=None,
slice_mode="end",
const_attrs=False,
):
np_data = np.random.uniform(size=data_shape).astype("float32")
# target numpy result
ref_res = tvm.topi.testing.strided_slice_python(
np_data, np_begin, np_end, np_strides, slice_mode, axes
)
# Relay Module
mod = tvm.IRModule()
data = relay.var("data", shape=any_dims(len(data_shape)), dtype="float32")
if const_attrs:
begin = relay.const(np_begin)
end = relay.const(np_end)
strides = relay.const(np_strides)
args = [data]
np_inputs = [np_data]
else:
begin = relay.var("begin", shape=np_begin.shape, dtype="int32")
end = relay.var("end", shape=np_end.shape, dtype="int32")
strides = relay.var("strides", shape=np_strides.shape, dtype="int32")
args = [data, begin, end, strides]
np_inputs = [np_data, np_begin, np_end, np_strides]
y = relay.strided_slice(
data, begin=begin, end=end, strides=strides, axes=axes, slice_mode=slice_mode
)
mod["main"] = relay.Function(args, y)
check_result(np_inputs, mod, ref_res)
@tvm.testing.uses_gpu
def test_any_strided_slice():
verify_any_random_strided_slice((2,), (2,), (2,), (15, 21))
verify_any_random_strided_slice((3,), (3,), (3,), (15, 17, 21))
verify_any_random_strided_slice((3,), (3,), (3,), (23, 29, 41))
verify_any_random_strided_slice((4,), (4,), (4,), (40, 50, 60, 70))
verify_any_random_strided_slice((3,), (3,), (3,), (15, 17, 21), slice_mode="size")
verify_any_random_strided_slice((2,), (2,), (2,), (15, 21), const_attrs=True)
begin = np.array([0, 1000000]).astype("int32")
end = np.array([1000000, -1000000]).astype("int32")
strides = np.array([1, -1]).astype("int32")
verify_any_strided_slice(begin, end, strides, (15, 21), const_attrs=False)
verify_any_strided_slice(begin, end, strides, (15, 21), const_attrs=True)
verify_any_strided_slice(begin, end, strides, (15, 17, 21), axes=[0, 2], const_attrs=True)
@tvm.testing.uses_gpu
def test_recursive_concat():
"""
fn @concat_loop(%i: int32, %st: (any, 1)) -> (any, 1) {
if (%i < 10) {
let %i = reshape(cast(i, "float32"), newshape=(1, ))
let %new_st = concatenate((st, i), axis=0)
concat_loop(%i + 1, )
} else {
st
}
}
"""
# Initial Values.
i = relay.var("i", shape=(), dtype="int32")
st = relay.var("st", shape=(relay.Any(), 1), dtype="int32")
def _cond(i, st):
return relay.op.min(relay.op.less(i, int32(10)))
def _body(i, st):
i_vec = relay.op.reshape(i, (1, 1))
ret = relay.op.concatenate([st, i_vec], axis=0)
return i + int32(1), ret
loop = while_loop(_cond, [i, st], _body)
start = relay.var("start", shape=(), dtype="int32")
body = loop(start, relay.op.reshape(relay.const(0), newshape=(1, 1)))
func = relay.Function([start], relay.TupleGetItem(body, 1))
mod = tvm.IRModule()
mod["main"] = func
data = np.array(0.0, dtype="int32")
ref = np.array([0] + list(range(10))).reshape((11, 1)).astype("int32")
check_result([data], mod, ref)
@tvm.testing.uses_gpu
def test_recursive_concat_with_wrong_annotation():
"""
v0.0.1
fn (%start: int32) {
%7 = {
let %while_loop = fn (%i: int32, %st: Tensor[(1, 1), int32]) {
%0 = less(%i, 10)
%1 = min(%0)
if (%1) {
%2 = add(%i, 1)
%3 = reshape(%i, newshape=[1, 1])
%4 = (%st, %3)
/* The result of concat should be 1,1 but it is 2, 1. */
%5 = concatenate(%4)
%while_loop(%2, %5)
} else {
(%i, %st)
}
}
%6 = reshape(0, newshape=[1, 1])
%while_loop(%start, %6)
}
%7.1
}
"""
# Initial Values.
i = relay.var("i", shape=(), dtype="int32")
st = relay.var("st", shape=(1, 1), dtype="int32")
def _cond(i, st):
return relay.op.min(relay.op.less(i, int32(10)))
def _body(i, st):
i_vec = relay.op.reshape(i, (1, 1))
ret = relay.op.concatenate([st, i_vec], axis=0)
return i + int32(1), ret
loop = while_loop(_cond, [i, st], _body)
start = relay.var("start", shape=(), dtype="int32")
body = loop(start, relay.op.reshape(relay.const(0), newshape=(1, 1)))
func = relay.Function([start], relay.TupleGetItem(body, 1))
with DiagnosticTesting() as diagnostics:
diagnostics.assert_message(
"The Relay type checker is unable to show the following types match:\n"
" Tensor[(2, 1), int32]\n"
" Tensor[(1, 1), int32]\n"
"In particular:\n"
" dimension 0 conflicts: 2 does not match 1."
)
func = infer_type(func)
@tvm.testing.uses_gpu
def test_tuple_get_item():
mod = tvm.IRModule()
dtype = "float32"
static_data_shape = (9, 4)
data_shape = (relay.Any(), 4)
indices_or_sections = 2
axis = 1
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.split(data, indices_or_sections, axis)
y = relay.expr.TupleGetItem(y.astuple(), 0)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out_shape = (9, 2)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_mixed_input_type():
mod = tvm.IRModule()
dtype = "float32"
static_data_shape = (9, 4)
data_shape = (relay.Any(), 4)
tensor_type = relay.TensorType(data_shape, dtype)
tuple_type = relay.TupleType([tensor_type, tensor_type])
data0 = relay.var("d0", type_annotation=relay.TupleType([tuple_type, tensor_type]))
data1 = relay.var("d1", shape=(relay.Any(), 4), dtype=dtype)
data_tuple = relay.expr.TupleWrapper(data0, 2)
nested_data_tuple = relay.expr.TupleWrapper(data_tuple[0], 2)
y = nested_data_tuple[1] * data_tuple[1] + data1
mod["main"] = relay.Function([data0, data1], y)
data_np0 = np.random.uniform(size=static_data_shape).astype(dtype)
data_np1 = np.random.uniform(size=static_data_shape).astype(dtype)
ref_out_shape = (9, 4)
check_result(
[[[data_np0, data_np0], data_np0], data_np1],
mod,
ref_out_shape,
assert_shape=True,
only_vm=True,
)
def verify_any_crop_and_resize(
data_shape,
boxes_shape,
box_indices_shape,
crop_size,
layout,
static_boxes,
static_box_indices_shape,
ref_out_shape,
):
mod = tvm.IRModule()
dtype = "float32"
indices_dtype = "int32"
data = relay.var("data", shape=data_shape, dtype=dtype)
boxes = relay.var("boxes", shape=boxes_shape, dtype=dtype)
box_indices = relay.var("box_indices", shape=box_indices_shape, dtype=indices_dtype)
y = relay.image.crop_and_resize(data, boxes, box_indices, crop_size, layout)
mod["main"] = relay.Function([data, boxes, box_indices], y)
data_np = np.random.uniform(size=data_shape).astype(dtype)
boxes_np = np.random.uniform(size=static_boxes).astype(dtype)
box_indices_np = np.random.uniform(size=static_box_indices_shape).astype(indices_dtype)
check_result([data_np, boxes_np, box_indices_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_crop_and_resize():
verify_any_crop_and_resize(
data_shape=(1, 234, 234, 256),
boxes_shape=(relay.Any(), 4),
box_indices_shape=(relay.Any(),),
crop_size=(14, 14),
layout="NHWC",
static_boxes=(128, 4),
static_box_indices_shape=(128,),
ref_out_shape=(128, 14, 14, 256),
)
verify_any_crop_and_resize(
data_shape=(1, 256, 234, 234),
boxes_shape=(relay.Any(), 4),
box_indices_shape=(relay.Any(),),
crop_size=(14, 14),
layout="NCHW",
static_boxes=(128, 4),
static_box_indices_shape=(128,),
ref_out_shape=(128, 256, 14, 14),
)
def verify_any_mirror_pad(data_shape, pad_width, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.nn.mirror_pad(data, pad_width)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_mirror_pad():
verify_any_mirror_pad(
data_shape=(1, 256, 232, 232),
pad_width=((0, 0), (0, 0), (1, 1), (1, 1)),
static_data_shape=(1, 256, 232, 232),
ref_out_shape=(1, 256, 234, 234),
)
def verify_any_ndarray_size(data_np_shape):
v = relay.var("v", shape=any_dims(len(data_np_shape)), dtype="float32")
n = relay.ndarray_size(v, dtype="int32")
mod = tvm.IRModule()
mod["main"] = relay.Function([v], n)
np_data = np.zeros(data_np_shape, dtype="float32")
ref_res = np.size(np_data)
check_result([np_data], mod, ref_res)
@tvm.testing.uses_gpu
def test_any_ndarray_size():
verify_any_ndarray_size((2,))
verify_any_ndarray_size((2, 2))
verify_any_ndarray_size((1, 2, 3, 4))
def verify_any_resize2d(data_shape, scale, layout, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
if layout == "NHWC":
size = (data_shape[1] * scale, data_shape[2] * scale)
else:
size = (data_shape[2] * scale, data_shape[3] * scale)
y = relay.image.resize2d(data, size, None, layout)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_resize():
verify_any_resize2d(
data_shape=(relay.Any(), 4, 4, 4),
scale=2,
layout="NHWC",
static_data_shape=(1, 4, 4, 4),
ref_out_shape=(1, 8, 8, 4),
)
verify_any_resize2d(
data_shape=(relay.Any(), 8, 17, 20),
scale=3,
layout="NCHW",
static_data_shape=(2, 8, 17, 20),
ref_out_shape=(2, 8, 51, 60),
)
def verify_any_grid_sample(data_shape, grid_shape, static_data_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
grid = relay.var("grid", shape=grid_shape, dtype=dtype)
y = relay.image.grid_sample(data, grid)
mod["main"] = relay.Function([data, grid], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
grid_np = np.random.uniform(size=grid_shape).astype(dtype)
check_result([data_np, grid_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_grid_sample():
verify_any_grid_sample(
data_shape=(relay.Any(), 4, 16, 32),
grid_shape=(4, 2, 8, 8),
static_data_shape=(4, 4, 16, 32),
ref_out_shape=(4, 4, 8, 8),
)
verify_any_grid_sample(
data_shape=(relay.Any(), 4, 16, 32),
grid_shape=(4, 2, 32, 32),
static_data_shape=(4, 4, 16, 32),
ref_out_shape=(4, 4, 32, 32),
)
def verify_any_affine_grid(num_batch, static_num_batch, target_shape, ref_out_shape):
mod = tvm.IRModule()
dtype = "float32"
data_shape = (num_batch, 2, 3)
static_data_shape = (static_num_batch, 2, 3)
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.image.affine_grid(data, target_shape)
mod["main"] = relay.Function([data], y)
data_np = np.random.uniform(size=static_data_shape).astype(dtype)
check_result([data_np], mod, ref_out_shape, assert_shape=True)
@tvm.testing.uses_gpu
def test_any_affine_grid():
verify_any_affine_grid(
num_batch=relay.Any(),
static_num_batch=1,
target_shape=(16, 32),
ref_out_shape=(1, 2, 16, 32),
)
verify_any_affine_grid(
num_batch=relay.Any(),
static_num_batch=8,
target_shape=(32, 32),
ref_out_shape=(8, 2, 32, 32),
)
def test_any_consecutive_broadcast():
dtype = "float32"
data0 = relay.var("data0", shape=any_dims(2), dtype=dtype)
data1 = relay.var("data1", shape=any_dims(2), dtype=dtype)
data2 = relay.var("data2", shape=any_dims(2), dtype=dtype)
data3 = relay.var("data3", shape=any_dims(2), dtype=dtype)
out0 = data0 + data1
out1 = data0 * data1
out2 = out0 - out1
out3 = data2 + data3
out4 = data2 * data3
out5 = out3 - out4
out6 = out2 * out5
mod = tvm.IRModule()
mod["main"] = relay.Function([data0, data1, data2, data3], out6)
np_data0 = np.random.uniform(size=(1, 4)).astype(dtype)
np_data1 = np.random.uniform(size=(2, 4)).astype(dtype)
np_data2 = np.random.uniform(size=(1, 4)).astype(dtype)
np_data3 = np.random.uniform(size=(2, 4)).astype(dtype)
ref_res = ((np_data0 + np_data1) - (np_data0 * np_data1)) * (
(np_data2 + np_data3) - (np_data2 * np_data3)
)
check_result([np_data0, np_data1, np_data2, np_data3], mod, ref_res)
def test_reshape_concat():
dtype = "float32"
d0 = relay.var("d0", shape=any_dims(2), dtype=dtype)
d1 = relay.var("d1", shape=any_dims(3), dtype=dtype)
out = relay.op.concatenate([relay.op.reshape(d0, [-1]), relay.op.reshape(d1, [-1])], axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function([d0, d1], out)
np_data0 = np.random.uniform(size=(4, 5)).astype(dtype)
np_data1 = np.random.uniform(size=(2, 5, 2)).astype(dtype)
ref_res = np.concatenate([np.reshape(np_data0, [-1]), np.reshape(np_data1, [-1])], axis=0)
check_result([np_data0, np_data1], mod, ref_res)
d0 = relay.var("d0", shape=any_dims(2), dtype=dtype)
d1 = relay.var("d1", shape=any_dims(2), dtype=dtype)
s0 = relay.var("s0", shape=any_dims(3), dtype=dtype)
s1 = relay.var("s1", shape=any_dims(3), dtype=dtype)
out = relay.op.concatenate(
[relay.op.reshape_like(d0, s0), relay.op.reshape_like(d1, s1)], axis=0
)
mod = tvm.IRModule()
mod["main"] = relay.Function([d0, d1, s0, s1], out)
np_data0 = np.random.uniform(size=(4, 5)).astype(dtype)
np_data1 = np.random.uniform(size=(8, 5)).astype(dtype)
np_shape_like0 = np.random.uniform(size=(2, 2, 5)).astype(dtype)
np_shape_like1 = np.random.uniform(size=(4, 2, 5)).astype(dtype)
ref_res = np.concatenate(
[np.reshape(np_data0, np_shape_like0.shape), np.reshape(np_data1, np_shape_like1.shape)],
axis=0,
)
check_result([np_data0, np_data1, np_shape_like0, np_shape_like1], mod, ref_res)
def test_any_adv_index():
data = relay.var("data", shape=(5, relay.Any(), relay.Any()), dtype="float32")
index0 = relay.var("index0", shape=(1, relay.Any()), dtype="int64")
index1 = relay.var("index1", shape=(relay.Any(), 1), dtype="int64")
out = relay.adv_index([data, index0, index1])
mod = tvm.IRModule()
mod["main"] = relay.Function([data, index0, index1], out)
np_data_shape = (5, 5, 10)
np_index0_shape = (1, 4)
np_index1_shape = (4, 1)
np_data = np.random.uniform(size=np_data_shape).astype("float32")
np_index0 = np.random.uniform(0, np_data_shape[0], size=np_index0_shape).astype("int64")
np_index1 = np.random.uniform(0, np_data_shape[0], size=np_index1_shape).astype("int64")
ref_res = np_data[tuple([np_index0, np_index1])]
print(ref_res.shape)
check_result([np_data, np_index0, np_index1], mod, ref_res)
def verify_any_repeat(data_shape, np_dshape, repeats, axis):
mod = tvm.IRModule()
dtype = "float32"
data = relay.var("data", shape=data_shape, dtype=dtype)
y = relay.repeat(data, repeats, axis)
mod["main"] = relay.Function([data], y)
np_data = np.random.uniform(size=np_dshape).astype(dtype)
ref_res = np.repeat(np_data, repeats, axis)
check_result([np_data], mod, ref_res)
@tvm.testing.uses_gpu
def test_any_repeat():
verify_any_repeat(any_dims(2), (1, 2), 2, 0)
verify_any_repeat(any_dims(1), (3,), 3, -1)
verify_any_repeat(any_dims(4), (2, 1, 1, 4), 4, 2)
def verify_any_stack(data_shape, np_dshape, num_data, axis):
mod = tvm.IRModule()
dtype = "float32"
inputs = []
for i in range(num_data):
inputs.append(relay.var("data{}".format(i), shape=data_shape, dtype=dtype))
y = relay.stack(inputs, axis)
mod["main"] = relay.Function(inputs, y)
np_inputs = []
for _ in range(num_data):
np_inputs.append(np.random.uniform(size=np_dshape).astype(dtype))
ref_res = np.stack(np_inputs, axis)
check_result(np_inputs, mod, ref_res)
@tvm.testing.uses_gpu
def test_any_stack():
verify_any_stack(any_dims(2), (1, 2), 3, 0)
verify_any_stack(any_dims(1), (3,), 4, -1)
verify_any_stack(any_dims(4), (2, 1, 1, 4), 2, 2)
def verify_any_where(
cond_shape, x_shape, y_shape, cond_np_shape, x_np_shape, y_np_shape, y_np_shape_invalid=None
):
dtype = "float32"
cond = relay.var("cond", shape=cond_shape, dtype="bool")
x = relay.var("x", shape=x_shape, dtype=dtype)
y = relay.var("y", shape=y_shape, dtype=dtype)
z = relay.where(cond, x, y)
mod = tvm.IRModule()
mod["main"] = relay.Function([cond, x, y], z)
cond_np = np.random.randn(*cond_np_shape) > 0
x_np = np.random.randn(*x_np_shape).astype(dtype)
y_np = np.random.randn(*y_np_shape).astype(dtype)
expected = np.where(cond_np, x_np, y_np)
check_result([cond_np, x_np, y_np], mod, expected)
# verify invalid broadcasting check
if y_np_shape_invalid:
y_np_bad = np.random.randn(*y_np_shape_invalid).astype(dtype)
try:
check_result([cond_np, x_np, y_np_bad], mod, expected)
except tvm.error.TVMError as e:
error_msg = str(e).split("\n")[-1]
assert "Invalid broadcast shapes" in error_msg
@tvm.testing.uses_gpu
def test_any_where():
verify_any_where(any_dims(1), (5,), (5,), (5,), (5,), (5,))
verify_any_where(any_dims(1), any_dims(1), (5,), (5,), (5,), (5,))
verify_any_where(any_dims(1), any_dims(1), any_dims(1), (5,), (5,), (5,))
verify_any_where((5,), any_dims(1), any_dims(1), (5,), (5,), (5,))
# where with broadcast
verify_any_where(any_dims(1), any_dims(1), any_dims(1), (5,), (1,), (5,))
verify_any_where(any_dims(1), any_dims(2), any_dims(2), (5,), (5, 5), (5, 5))
verify_any_where(any_dims(1), any_dims(1), any_dims(2), (5,), (5,), (5, 5))
verify_any_where(
any_dims(2), any_dims(2), any_dims(2), (3, 4), (3, 1), (1, 4), y_np_shape_invalid=(2, 4)
)
# Test scalar where in a dynamically shaped graph
x = relay.var("x", shape=any_dims(1), dtype="int64")
y = relay.var("y", shape=any_dims(2), dtype="float32")
left = relay.take(x, relay.const(1, dtype="int32")) + relay.const(4, "int64")
right = relay.const(4, "int64")
where = relay.where(relay.const(False, "bool"), left, right)
z = relay.take(y, where, axis=1)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.random.randn(2).astype("int64")
y_np = np.random.randn(2, 6).astype("float32")
expected = y_np[:, 4]
check_result([x_np, y_np], mod, expected)
@tvm.testing.uses_gpu
def test_non_max_suppression():
x0 = relay.var("x0", relay.ty.TensorType((1, relay.Any(), 6), "float32"))
x1 = relay.var("x1", relay.ty.TensorType((1,), "int32"))
x2 = relay.var("x2", relay.ty.TensorType((1, relay.Any()), "int32"))
x3 = relay.var("x3", relay.ty.TensorType((), "int32"))
z = relay.vision.non_max_suppression(
x0,
x1,
x2,
x3,
iou_threshold=0.5,
force_suppress=True,
top_k=2,
return_indices=True,
invalid_to_bottom=False,
)
z = z.astuple()
func = relay.Function([x0, x1, x2, x3], z)
mod = tvm.IRModule()
mod["main"] = func
np_data = np.array(
[
[
[0, 0.8, 1, 20, 25, 45],
[1, 0.7, 30, 60, 50, 80],
[0, 0.4, 4, 21, 19, 40],
[2, 0.9, 35, 61, 52, 79],
[1, 0.5, 100, 60, 70, 110],
]
]
).astype("float32")
np_valid_count = np.array([4]).astype("int32")
np_indices = np.array([[0, 1, 3, 4, -1]]).astype("int32")
np_max_output_size = -1
np_indices_result = np.array([[4, 0, -1, -1, -1]])
np_valid_box_count = np.array([[2]]).astype("int32")
check_result(
[np_data, np_valid_count, np_indices, np_max_output_size],
mod,
[np_indices_result, np_valid_box_count],
only_vm=False,
)
np_data = np.zeros((1, 0, 6)).astype("float32")
np_valid_count = np.array([0]).astype("int32")
np_indices = np.zeros((1, 0)).astype("int32")
np_max_output_size = -1
np_indices_result = np.zeros((1, 0))
np_valid_box_count = np.array([[0]]).astype("int32")
check_result(
[np_data, np_valid_count, np_indices, np_max_output_size],
mod,
[np_indices_result, np_valid_box_count],
only_vm=False,
)
@tvm.testing.uses_gpu
def test_all_class_non_max_suppression():
def verify_all_class_non_max_suppression(
boxes_np,
scores_np,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected,
output_format="onnx",
):
batch_size = boxes_np.shape[0]
num_classes = scores_np.shape[1]
num_boxes = relay.Any()
boxes = relay.var("boxes", relay.ty.TensorType((batch_size, num_boxes, 4), "float32"))
scores = relay.var(
"scores", relay.ty.TensorType((batch_size, num_classes, num_boxes), "float32")
)
nms_out = relay.vision.all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, output_format
)
if output_format == "onnx":
three = relay.const(np.array([3]), dtype="int64")
begin = relay.const(np.array([0, 0]), dtype="int64")
end = relay.op.concatenate([nms_out[1], three], axis=0)
strides = relay.const(np.array([1, 1]), dtype="int64")
out = relay.op.strided_slice(nms_out[0], begin, end, strides)
mod = tvm.IRModule()
mod["main"] = relay.Function([boxes, scores], out)
check_result([boxes_np, scores_np], mod, [expected])
else:
out = nms_out.tuple_value
mod = tvm.IRModule()
mod["main"] = relay.Function([boxes, scores], out)
check_result([boxes_np, scores_np], mod, expected)
boxes = np.array(
[
[
[0.0, 0.0, 0.3, 0.3],
[0.5, 0.5, 0.4, 0.4],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[0.5, 0.5, 1.0, 1.0],
],
]
).astype("float32")
scores = np.array(
[
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.8, 0.2, 0.6, 0.3, 0.9]],
]
).astype("float32")
max_output_boxes_per_class = 2
iou_threshold = 0.8
score_threshold = 0.4
expected = np.array([[0, 0, 4], [0, 0, 2], [0, 1, 4], [0, 1, 0]])
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
expected = [
np.array(
[[[0, 4], [0, 2], [1, 4], [1, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]]
),
np.array(
[
[
0.9,
0.6,
0.9,
0.8,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
]
),
np.array([4]),
]
verify_all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected,
output_format="tensorflow",
)
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 0.9, 1.2],
]
]
).astype(np.float32)
scores = np.array([[[0.2, 0.3], [0.3, 0.2]]]).astype(np.float32)
iou_threshold = 0.3
score_threshold = 0.15
expected = np.array([[0, 0, 1], [0, 1, 0]])
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
# zero box detection case
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
]
]
).astype(np.float32)
scores = np.array([[[0.2]]]).astype(np.float32)
score_threshold = 0.4
expected = np.zeros((0, 3))
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
@tvm.testing.uses_gpu
def test_gather_nd():
def verify_gather_nd(data_shape, indices_shape, data_shape_np, indices_shape_np, batch_dims=0):
x = relay.var("x", relay.TensorType(data_shape, "float32"))
y = relay.var("y", relay.TensorType(indices_shape, "int32"))
z = relay.gather_nd(x, y, batch_dims=batch_dims, index_rank=indices_shape[0])
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
data_np = np.random.uniform(size=data_shape_np).astype("float32")
indices_np = np.random.randint(low=0, high=2, size=indices_shape_np, dtype="int32")
ref_res = ref_funcs.gather_nd(data_np, indices_np, batch_dims)
check_result([data_np, indices_np], mod, [ref_res])
verify_gather_nd((2, 2), (2, relay.Any()), (2, 2), (2, 3))
verify_gather_nd((relay.Any(), 2), (2, relay.Any()), (2, 2), (2, 3))
verify_gather_nd((relay.Any(), 2), (1, relay.Any()), (10, 2), (1, 10), 1)
verify_gather_nd(
(relay.Any(), 2, 2, 3, 4), (3, relay.Any(), relay.Any()), (3, 2, 2, 3, 4), (3, 3, 2), 2
)
@tvm.testing.uses_gpu
def test_scatter_nd():
def verify_scatter_nd(data_np, indices_np, updates_np, ref_res):
indices_shape = (2, relay.Any())
updates_shape = (relay.Any(),)
data = relay.var("data", shape=data_np.shape, dtype=str(data_np.dtype))
indices = relay.var("indices", relay.TensorType(indices_shape, str(indices_np.dtype)))
updates = relay.var("updates", relay.TensorType(updates_shape, str(updates_np.dtype)))
out = relay.op.scatter_nd(data, indices, updates, "add")
mod = tvm.IRModule()
mod["main"] = relay.Function([data, indices, updates], out)
check_result([data_np, indices_np, updates_np], mod, [ref_res])
data = np.zeros((2, 2)).astype("int64")
indices = np.array([[1, 1, 0], [0, 1, 0]])
updates = np.array([2, 3, 0])
out = np.array([[0, 0], [2, 3]])
verify_scatter_nd(data, indices, updates, out)
@tvm.testing.uses_gpu
def test_scatter_nd_any_updates():
def verify_scatter_nd_any_updates(data_np, indices_np, updates_np, ref_res):
indices_shape = (2, relay.Any())
updates_shape = (2, relay.Any())
data = relay.var("data", shape=data_np.shape, dtype=str(data_np.dtype))
indices = relay.var("indices", relay.TensorType(indices_shape, str(indices_np.dtype)))
updates = relay.var("updates", relay.TensorType(updates_shape, str(updates_np.dtype)))
out = relay.op.scatter_nd(data, indices, updates, "add")
mod = tvm.IRModule()
mod["main"] = relay.Function([data, indices, updates], out)
check_result([data_np, indices_np, updates_np], mod, [ref_res], only_vm=True)
data = np.zeros((3, 3)).astype("int64")
indices = np.array([[1, 1], [0, 1]])
updates = np.array([[2, 2], [1, 1]])
out = np.array([[0, 0, 0], [0, 0, 0], [2, 2, 1]])
verify_scatter_nd_any_updates(data, indices, updates, out)
@tvm.testing.uses_gpu
def test_gather():
def verify_gather(data_shape, indices_shape, data_shape_np, indices_shape_np, axis):
x = relay.var("x", relay.TensorType(data_shape, "float32"))
y = relay.var("y", relay.TensorType(indices_shape, "int32"))
z = relay.gather(x, axis, y)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
data_np = np.random.uniform(size=data_shape_np).astype("float32")
indices_np = np.random.randint(low=0, high=2, size=indices_shape_np, dtype="int32")
ref_res = tvm.topi.testing.gather_python(data_np, axis, indices_np)
check_result([data_np, indices_np], mod, [ref_res])
verify_gather((relay.Any(),), (relay.Any(),), (10,), (10,), 0)
verify_gather((2, 2), (2, relay.Any()), (2, 2), (2, 3), 1)
verify_gather((relay.Any(), 2), (2, relay.Any()), (2, 2), (2, 3), 1)
verify_gather((relay.Any(), relay.Any()), (relay.Any(), relay.Any()), (2, 3), (1, 3), 0)
@tvm.testing.uses_gpu
def test_searchsorted():
def verify_searchsorted(
sorted_sequence_shape, values_shape, sorted_sequence_shape_np, values_shape_np
):
x = relay.var("x", relay.TensorType(sorted_sequence_shape, "float32"))
y = relay.var("y", relay.TensorType(values_shape, "float32"))
z = relay.searchsorted(x, y)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.sort(np.random.uniform(size=sorted_sequence_shape_np).astype("float32"), axis=-1)
y_np = np.random.uniform(size=values_shape_np).astype("float32")
ref_res = searchsorted_ref(x_np, y_np, False, "int32")
check_result([x_np, y_np], mod, [ref_res])
for shape_np, values_shape_np in zip([(8, 9, 10), (10,), (11,)], [(8, 9, 20), (5,), (8, 9, 7)]):
sorted_sequence_shape = (relay.Any(),) * len(shape_np)
values_shape = (relay.Any(),) * len(values_shape_np)
verify_searchsorted(
sorted_sequence_shape,
values_shape,
shape_np,
values_shape_np,
)
if __name__ == "__main__":
tvm.testing.main()
| 77,491 | 33.780969 | 100 | py |
tvm | tvm-main/tests/python/relay/test_param_dict.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import numpy as np
import tvm
from tvm import te, runtime
import json
import base64
from tvm._ffi.base import py_str
from tvm.relay.op import add
from tvm import relay
from tvm import rpc
from tvm.contrib import utils, graph_executor
def test_save_load():
x = np.ones((10, 2)).astype("float32")
y = np.ones((1, 2, 3)).astype("float32")
params = {"x": x, "y": y}
param_bytes = runtime.save_param_dict(params)
assert isinstance(param_bytes, bytearray)
param2 = relay.load_param_dict(param_bytes)
assert len(param2) == 2
np.testing.assert_equal(param2["x"].numpy(), x)
np.testing.assert_equal(param2["y"].numpy(), y)
def test_ndarray_reflection():
# Make two `NDArrayWrapper`s that point to the same underlying array.
np_array = np.random.uniform(size=(10, 2)).astype("float32")
tvm_array = tvm.nd.array(np_array)
param_dict = {"x": tvm_array, "y": tvm_array}
assert param_dict["x"].same_as(param_dict["y"])
# Serialize then deserialize `param_dict`.
deser_param_dict = relay.load_param_dict(runtime.save_param_dict(param_dict))
# Make sure the data matches the original data and `x` and `y` contain the same data.
np.testing.assert_equal(deser_param_dict["x"].numpy(), tvm_array.numpy())
# Make sure `x` and `y` contain the same data.
np.testing.assert_equal(deser_param_dict["x"].numpy(), deser_param_dict["y"].numpy())
def test_bigendian_rpc_param():
"""Test big endian rpc when there is a PowerPC RPC server available"""
host = os.environ.get("TVM_POWERPC_TEST_HOST", None)
port = os.environ.get("TVM_POWERPC_TEST_PORT", 9090)
if host is None:
return
def verify_graph_executor(remote, target, shape, dtype):
x = relay.var("x")
y = relay.const(1)
z = relay.add(x, y)
func = relay.Function([x], z)
x_in = np.ones(shape).astype(dtype)
params = {"x": x_in}
graph, lib, params = relay.build(func, target=target, params=params)
temp = utils.tempdir()
path_dso = temp.relpath("dev_lib.o")
lib.save(path_dso)
remote.upload(path_dso)
lib = remote.load_module("dev_lib.o")
dev = remote.cpu(0)
mod = graph_executor.create(graph, lib, dev)
mod.load_params(runtime.save_param_dict(params))
mod.run()
out = mod.get_output(0, tvm.nd.empty(shape, dtype=dtype, device=dev))
tvm.testing.assert_allclose(x_in + 1, out.numpy())
print("Test RPC connection to PowerPC...")
remote = rpc.connect(host, port)
target = "llvm -mtriple=powerpc-linux-gnu"
for dtype in ["float32", "float64", "int32", "int8"]:
verify_graph_executor(remote, target, (10,), dtype)
if __name__ == "__main__":
test_save_load()
test_ndarray_reflection()
test_bigendian_rpc_param()
| 3,626 | 36.78125 | 89 | py |
tvm | tvm-main/tests/python/relay/test_op_fast_math.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import scipy
from scipy import special
import tvm
import tvm.testing
import tvm.relay as relay
from tvm import topi
from tvm import te
from tvm.contrib import graph_executor
from tvm.topi import testing
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_fastmath(target, dev):
def test_apply(relay_op, name, f_numpy, low, high, step, dtype="float32"):
a_np = np.arange(low, high, step).astype(dtype).reshape((1, -1))
b_np = f_numpy(a_np)
x = relay.var("x", shape=a_np.shape, dtype="float32")
y = relay_op(x)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
with tvm.transform.PassContext(opt_level=3, required_pass=["FastMath"]):
graph, lib, params = relay.build(mod, target=target, params=None)
# Check that the op related to fast math have been convered to function in lib
func_name = "tvmgen_default_fused_" + name
# When there're multiple targets in tvm.testing.parametrize_targets, the function
# built will have a "_1" in function name
assert func_name in graph
m = graph_executor.create(graph, lib, dev)
# Set inputs
m.set_input("x", tvm.nd.array(a_np, dev))
m.set_input(**params)
# Execute
m.run()
# Get outputs
tvm_output = m.get_output(0)
tvm.testing.assert_allclose(tvm_output.numpy(), b_np, rtol=1e-5, atol=1e-5)
test_apply(relay.exp, "fast_exp", np.exp, low=-88, high=88, step=0.01)
test_apply(relay.erf, "fast_erf", scipy.special.erf, low=-10, high=10, step=0.01)
test_apply(relay.tanh, "fast_tanh", np.tanh, low=-10, high=10, step=0.01)
test_apply(
relay.nn.fast_softmax,
"nn_fast_softmax",
tvm.topi.testing.softmax_python,
low=-10,
high=10,
step=0.01,
)
if __name__ == "__main__":
test_fastmath()
| 2,705 | 35.567568 | 89 | py |
tvm | tvm-main/tests/python/relay/test_pass_convert_op_layout.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test alter op layout pass"""
import pytest
import tvm
from tvm import relay, te
from tvm.relay import analysis, transform
from tvm.relay.op import op as reg
from tvm.relay.op import register_alter_op_layout
from tvm.relay.quantize._annotate import (
attach_simulated_quantize,
QAnnotateKind,
)
from tvm.relay.transform.infer_layout_utils import InferCorrectLayoutOutput
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_no_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
return before()
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_binary_no_convert_layout():
def before():
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(1, 2))
return relay.Function(
[x, y],
relay.qnn.op.add(
x,
y,
lhs_scale=relay.const(0.0156863, "float32"),
lhs_zero_point=relay.const(127, "int32"),
rhs_scale=relay.const(0.0117647, "float32"),
rhs_zero_point=relay.const(85, "int32"),
output_scale=relay.const(0.0235294, "float32"),
output_zero_point=relay.const(128, "int32"),
),
)
def expected():
return before()
a = before()
a = run_opt_pass(a, transform.ConvertLayout({}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "OIHW")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_nhwc_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_transpose_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d_transpose(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "IOHW")
y = relay.nn.conv2d_transpose(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d_transpose": ["NCHW", "IOHW"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_bias_pool_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.bias_add(y, bias, axis=3)
# a useless tuple, which will be eliminated
y = relay.Tuple([y])[0]
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NHWC")
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "OIHW")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
bias = relay.layout_transform(bias, "NHWC", "NCHW")
y = relay.add(y, bias)
# a useless tuple, which will be eliminated
y = relay.Tuple([y])[0]
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2))
y = relay.cast(y, "int32")
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_bias_pool_uses_specified_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.bias_add(y, bias, axis=3)
# a useless tuple, which will be eliminated
y = relay.Tuple([y])[0]
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NHWC")
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "OIHW")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
bias = relay.layout_transform(bias, "NHWC", "NCHW")
y = relay.add(y, bias)
# a useless tuple, which will be eliminated
y = relay.Tuple([y])[0]
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NHWC", out_layout="NHWC")
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(
a,
transform.ConvertLayout({"nn.conv2d": ["NCHW", "OIHW"], "nn.max_pool2d": ["NHWC"]}),
)
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
def test_conv_concat_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 64))
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y1 = relay.nn.conv2d(
y,
weight2,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
ret = relay.concatenate([y, y1], axis=3)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 64))
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
y = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(y, weight1, channels=64, kernel_size=(3, 3), padding=(1, 1))
y1 = relay.nn.conv2d(y, weight2, channels=64, kernel_size=(3, 3), padding=(1, 1))
ret = relay.concatenate([y, y1], axis=1)
ret = relay.layout_transform(ret, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_deformable_conv_bias_pool_convert_layout():
def before(N, CI, H, W, CO, KH, KW, layout):
if layout == "NCHW":
data_shape = (N, CI, H, W)
weight_shape = (CO, CI, KH, KW)
kernel_layout = "OIHW"
else:
data_shape = (N, H, W, CI)
weight_shape = (KH, KW, CI, CO)
kernel_layout = "HWIO"
bias_shape = (CO,)
data = relay.var("data", shape=data_shape, dtype="float32")
offset = relay.var("offset")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")
y = relay.nn.deformable_conv2d(
data,
offset,
weight,
kernel_size=(KH, KW),
channels=CO,
data_layout=layout,
kernel_layout=kernel_layout,
)
y = relay.nn.bias_add(y, bias, axis=-1 if layout == "NHWC" else 1)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout=layout)
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected(N, CI, H, W, CO, KH, KW, OH, OW, src_layout, dst_layout):
layout_map = {"src": {}, "dst": {}}
if src_layout == "NCHW":
nchw = layout_map["src"]
nhwc = layout_map["dst"]
else:
nchw = layout_map["dst"]
nhwc = layout_map["src"]
nchw["data_layout"] = "NCHW"
nchw["data_shape"] = (N, CI, H, W)
nchw["offset_shape"] = (N, KH * KW * 2, OH, OW)
nchw["weight_shape"] = (CO, CI, KH, KW)
nchw["kernel_layout"] = "OIHW"
nhwc["data_layout"] = "NHWC"
nhwc["data_shape"] = (N, H, W, CI)
nhwc["offset_shape"] = (N, OH, OW, KH * KW * 2)
nhwc["weight_shape"] = (KH, KW, CI, CO)
nhwc["kernel_layout"] = "HWIO"
bias_shape = (CO,)
data = relay.var("data", shape=layout_map["src"]["data_shape"], dtype="float32")
offset = relay.var("offset", shape=layout_map["src"]["offset_shape"], dtype="float32")
weight = relay.var("weight", shape=layout_map["src"]["weight_shape"], dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")
data = relay.layout_transform(
data, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
offset = relay.layout_transform(
offset, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
weight = relay.layout_transform(
weight, layout_map["src"]["kernel_layout"], layout_map["dst"]["kernel_layout"]
)
y = relay.nn.deformable_conv2d(
data,
offset,
weight,
kernel_size=(KH, KW),
channels=CO,
data_layout=layout_map["dst"]["data_layout"],
kernel_layout=layout_map["dst"]["kernel_layout"],
)
if layout_map["src"]["data_layout"] == "NHWC":
bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
else:
bias = relay.expand_dims(bias, axis=1, num_newaxis=2)
bias = relay.expand_dims(bias, axis=0)
bias = relay.layout_transform(
bias, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
y = relay.add(y, bias)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout=layout_map["dst"]["data_layout"])
y = relay.cast(y, "int32")
y = relay.layout_transform(
y, layout_map["dst"]["data_layout"], layout_map["src"]["data_layout"]
)
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
# NHWC -> NCHW
a = before(1, 3, 224, 224, 32, 3, 3, "NHWC")
a = run_opt_pass(a, transform.ConvertLayout({"nn.deformable_conv2d": ["NCHW", "default"]}))
b = run_opt_pass(
expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NHWC", "NCHW"), transform.InferType()
)
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
# NCHW -> NHWC
a = before(1, 3, 224, 224, 32, 3, 3, "NCHW")
a = run_opt_pass(a, transform.ConvertLayout({"nn.deformable_conv2d": ["NHWC", "default"]}))
b = run_opt_pass(
expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NCHW", "NHWC"), transform.InferType()
)
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_deformable_conv_bias_pool_uses_specified_convert_layout():
def before(N, CI, H, W, CO, KH, KW, layout):
if layout == "NCHW":
data_shape = (N, CI, H, W)
weight_shape = (CO, CI, KH, KW)
kernel_layout = "OIHW"
else:
data_shape = (N, H, W, CI)
weight_shape = (KH, KW, CI, CO)
kernel_layout = "HWIO"
bias_shape = (CO,)
data = relay.var("data", shape=data_shape, dtype="float32")
offset = relay.var("offset")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")
y = relay.nn.deformable_conv2d(
data,
offset,
weight,
kernel_size=(KH, KW),
channels=CO,
data_layout=layout,
kernel_layout=kernel_layout,
)
y = relay.nn.bias_add(y, bias, axis=-1 if layout == "NHWC" else 1)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout=layout)
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected(N, CI, H, W, CO, KH, KW, OH, OW, src_layout, dst_layout, max_pool_layout=None):
layout_map = {"src": {}, "dst": {}}
if src_layout == "NCHW":
nchw = layout_map["src"]
nhwc = layout_map["dst"]
else:
nchw = layout_map["dst"]
nhwc = layout_map["src"]
nchw["data_layout"] = "NCHW"
nchw["data_shape"] = (N, CI, H, W)
nchw["offset_shape"] = (N, KH * KW * 2, OH, OW)
nchw["weight_shape"] = (CO, CI, KH, KW)
nchw["kernel_layout"] = "OIHW"
nhwc["data_layout"] = "NHWC"
nhwc["data_shape"] = (N, H, W, CI)
nhwc["offset_shape"] = (N, OH, OW, KH * KW * 2)
nhwc["weight_shape"] = (KH, KW, CI, CO)
nhwc["kernel_layout"] = "HWIO"
bias_shape = (CO,)
data = relay.var("data", shape=layout_map["src"]["data_shape"], dtype="float32")
offset = relay.var("offset", shape=layout_map["src"]["offset_shape"], dtype="float32")
weight = relay.var("weight", shape=layout_map["src"]["weight_shape"], dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")
data = relay.layout_transform(
data, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
offset = relay.layout_transform(
offset, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
weight = relay.layout_transform(
weight, layout_map["src"]["kernel_layout"], layout_map["dst"]["kernel_layout"]
)
y = relay.nn.deformable_conv2d(
data,
offset,
weight,
kernel_size=(KH, KW),
channels=CO,
data_layout=layout_map["dst"]["data_layout"],
kernel_layout=layout_map["dst"]["kernel_layout"],
)
if layout_map["src"]["data_layout"] == "NHWC":
bias = relay.expand_dims(bias, axis=0, num_newaxis=3)
else:
bias = relay.expand_dims(bias, axis=1, num_newaxis=2)
bias = relay.expand_dims(bias, axis=0)
bias = relay.layout_transform(
bias, layout_map["src"]["data_layout"], layout_map["dst"]["data_layout"]
)
y = relay.add(y, bias)
y = relay.nn.relu(y)
if max_pool_layout != layout_map["dst"]["data_layout"]:
y = relay.layout_transform(y, layout_map["dst"]["data_layout"], max_pool_layout)
y = relay.nn.max_pool2d(
y, pool_size=(2, 2), layout=max_pool_layout, out_layout=max_pool_layout
)
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
# NHWC -> NCHW
a = before(1, 3, 224, 224, 32, 3, 3, "NHWC")
a = run_opt_pass(
a,
transform.ConvertLayout(
{"nn.deformable_conv2d": ["NCHW", "default"], "nn.max_pool2d": ["NHWC"]}
),
)
# - in the before() func, its last argument "NHWC" is also the layout of max_pool
b = run_opt_pass(
# max_pool has its own layout argument
expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NHWC", "NCHW", max_pool_layout="NHWC"),
transform.InferType(),
)
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
# NCHW -> NHWC
a = before(1, 3, 224, 224, 32, 3, 3, "NCHW")
a = run_opt_pass(
a,
transform.ConvertLayout(
{"nn.deformable_conv2d": ["NHWC", "default"], "nn.max_pool2d": ["NCHW"]}
),
)
# - in the before() func, its last argument "NCHW" is also the layout of max_pool
b = run_opt_pass(
# max_pool has its own layout argument
expected(1, 3, 224, 224, 32, 3, 3, 222, 222, "NCHW", "NHWC", max_pool_layout="NCHW"),
transform.InferType(),
)
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
def test_dual_path_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(3, 3, 32, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y1 = relay.nn.conv2d(
y,
weight2,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.batch_flatten(y)
ret = relay.Tuple([y1, y2])
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(3, 3, 32, 32))
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
y = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y1 = relay.nn.conv2d(y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1))
y1 = relay.nn.relu(y1)
y1 = relay.layout_transform(y1, "NCHW", "NHWC")
y2 = relay.layout_transform(y, "NCHW", "NHWC")
y2 = relay.nn.batch_flatten(y2)
ret = relay.Tuple([y1, y2])
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_bn_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
gamma = relay.var("gamma")
beta = relay.var("beta")
mean = relay.var("mean")
variance = relay.var("variance")
y, _, _ = relay.nn.batch_norm(y, gamma, beta, mean, variance, axis=3)
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
# Check that there is only 1 NHWC to NCHW transform.
has_lt = list()
find_op = lambda x: has_lt.append(
isinstance(x, tvm.relay.expr.Call)
and x.op.name == "layout_transform"
and x.attrs.src_layout == "NCHW"
and x.attrs.dst_layout == "NHWC"
)
relay.analysis.post_order_visit(a, find_op)
has_lt = list(filter(lambda x: x, has_lt))
assert len(has_lt) == 1
def test_slice_like_convert_layout():
def verify_slice_like(after, expected_axes):
# Verify if the slice_like after the convert layout has the expected axes.
has_expected = list()
checker = lambda x: has_expected.append(
isinstance(x, tvm.relay.expr.Call)
and x.op.name == "slice_like"
and str(x.attrs.axes) == str(expected_axes)
)
relay.analysis.post_order_visit(after, checker)
assert any(has_expected)
def func_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
out = relay.slice_like(y, y, axes=[1, 2])
return relay.Function(analysis.free_vars(out), out)
after = run_opt_pass(func_nhwc(), transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
verify_slice_like(after, [2, 3])
def func_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(32, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
out = relay.slice_like(y, y, axes=[2, 3])
return relay.Function(analysis.free_vars(out), out)
after = run_opt_pass(func_nchw(), transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
verify_slice_like(after, [1, 2])
def func_vars():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
# z has no layout information so convert layout won't happen.
z = relay.var("y", shape=(1, 56, 56, 32))
out = relay.slice_like(y, z, axes=[1, 2])
return relay.Function(analysis.free_vars(out), out)
after = run_opt_pass(func_vars(), transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
verify_slice_like(after, [1, 2])
def test_transpose_convert_layout():
def verify_transpose(after, expected_axes, expected_transform_cnt):
# Verify if the transpose after the convert layout has the expected axes.
has_expected = list()
checker = lambda x: has_expected.append(
isinstance(x, tvm.relay.expr.Call)
and x.op.name == "transpose"
and str(x.attrs.axes) == str(expected_axes)
)
relay.analysis.post_order_visit(after, checker)
assert any(has_expected), after
is_transform = list()
checker = lambda x: is_transform.append(
1 if isinstance(x, tvm.relay.expr.Call) and x.op.name == "layout_transform" else 0
)
relay.analysis.post_order_visit(after, checker)
assert (
sum(is_transform) == expected_transform_cnt
), "Expected %s layout_transform, but get\n%s" % (expected_transform_cnt, after)
def nhwc_to_nchw():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
z = relay.var("z", shape=(56, 56, 32))
out = relay.add(y, z)
out = relay.transpose(out, axes=[0, 3, 1, 2])
out = relay.nn.batch_flatten(out)
func = relay.Function(analysis.free_vars(out), out)
return run_opt_pass(func, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
verify_transpose(nhwc_to_nchw(), [0, 1, 2, 3], 3)
def nchw_to_nhwc():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(32, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
z = relay.var("z", shape=(32, 56, 56))
out = relay.add(y, z)
out = relay.transpose(out, axes=[0, 2, -1, 1]) # Also test a negative axis.
out = relay.nn.batch_flatten(out)
func = relay.Function(analysis.free_vars(out), out)
return run_opt_pass(func, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
verify_transpose(nchw_to_nhwc(), [0, 1, 2, 3], 3)
def default_axes():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(32, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
z = relay.var("z", shape=(32, 56, 56))
out = relay.add(y, z)
out = relay.transpose(out) # No axes provided, will use the reversed axes.
func = relay.Function(analysis.free_vars(out), out)
return run_opt_pass(func, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
verify_transpose(default_axes(), [2, 1, 3, 0], 3)
def test_resnet_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(1, 1, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(
x, weight2, channels=32, kernel_size=(1, 1), data_layout="NHWC", kernel_layout="HWIO"
)
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y, layout="NHWC")
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(1, 1, 64, 32))
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(x, weight2, channels=32, kernel_size=(1, 1))
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_resnet_pool_uses_specified_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(1, 1, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(
x, weight2, channels=32, kernel_size=(1, 1), data_layout="NHWC", kernel_layout="HWIO"
)
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y, layout="NHWC")
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
weight2 = relay.var("weight2", shape=(1, 1, 64, 32))
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(x, weight2, channels=32, kernel_size=(1, 1))
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.nn.global_max_pool2d(y, layout="NHWC", out_layout="NHWC")
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(
a,
transform.ConvertLayout(
{"nn.conv2d": ["NCHW", "default"], "nn.global_max_pool2d": ["NHWC"]}
),
)
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
def test_scalar_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.add(y, relay.const(1, "float32"))
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.add(y, relay.const(1.0, "float32"))
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_ln_convert_layout():
"""Check that layout transforms are propagated through ln."""
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
dtype = "float32"
beta = relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
y = relay.nn.layer_norm(y, gamma, beta, axis=3)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
dtype = "float32"
beta = relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
y = relay.nn.layer_norm(y, gamma, beta, axis=1)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_InstanceNorm_convert_layout():
"""Check that layout transforms are propagated through instance norm."""
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
dtype = "float32"
beta = relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
y = relay.nn.instance_norm(y, gamma, beta, axis=3)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
dtype = "float32"
beta = relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
y = relay.nn.instance_norm(y, gamma, beta, axis=1)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_bn_convert_layout():
"""Check that layout transforms are propagated through bn."""
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
dtype = "float32"
beta = relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((64,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((64,), dtype))
y = relay.nn.batch_norm(y, gamma, beta, moving_mean, moving_var, axis=3)
y = relay.nn.relu(y[0])
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
dtype = "float32"
beta = relay.var("beta", relay.TensorType((64,), dtype))
gamma = relay.var("gamma", relay.TensorType((64,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((64,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((64,), dtype))
y = relay.nn.batch_norm(y, gamma, beta, moving_mean, moving_var, axis=1)
y = relay.nn.relu(y[0])
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_requantize_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
y = relay.qnn.op.conv2d(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.qnn.op.requantize(
y,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
out_dtype="int32",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "OIHW")
y = relay.qnn.op.conv2d(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.qnn.op.requantize(
y,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=1,
out_dtype="int32",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_concat_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="int8")
weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
y = relay.qnn.op.conv2d(
x,
weight1,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y1 = relay.qnn.op.conv2d(
y,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.cast(y, "int8")
y1 = relay.cast(y, "int8")
ret = relay.qnn.op.concatenate(
[y, y1],
[relay.const(1, "float32"), relay.const(1, "float32")],
[relay.const(1, "int32"), relay.const(1, "int32")],
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=3,
)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="int8")
weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
y = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.qnn.op.conv2d(
y,
weight1,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y1 = relay.qnn.op.conv2d(
y,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.cast(y, "int8")
y1 = relay.cast(y, "int8")
ret = relay.qnn.op.concatenate(
[y, y1],
[relay.const(1, "float32"), relay.const(1, "float32")],
[relay.const(1, "int32"), relay.const(1, "int32")],
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=1,
)
ret = relay.layout_transform(ret, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_add_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="int8")
weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
y = relay.qnn.op.conv2d(
x,
weight1,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y1 = relay.qnn.op.conv2d(
y,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.cast(y, "int8")
y1 = relay.cast(y, "int8")
ret = relay.qnn.op.add(
y,
y1,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight1 = relay.var("weight1", shape=(3, 3, 64, 64), dtype="int8")
weight2 = relay.var("weight2", shape=(3, 3, 64, 64), dtype="int8")
weight1 = relay.layout_transform(weight1, "HWIO", "OIHW")
weight2 = relay.layout_transform(weight2, "HWIO", "OIHW")
y = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.qnn.op.conv2d(
y,
weight1,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y1 = relay.qnn.op.conv2d(
y,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.cast(y, "int8")
y1 = relay.cast(y, "int8")
ret = relay.qnn.op.add(
y,
y1,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
)
ret = relay.layout_transform(ret, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_nhwc_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56), dtype="int8")
weight = relay.var("weight", shape=(64, 64, 3, 3), dtype="int8")
y = relay.qnn.op.conv2d(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56), dtype="int8")
weight = relay.var("weight", shape=(64, 64, 3, 3), dtype="int8")
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "HWIO")
y = relay.qnn.op.conv2d(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d": ["NHWC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_conv_transpose_requantize_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
y = relay.qnn.op.conv2d_transpose(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
y = relay.qnn.op.requantize(
y,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
out_dtype="int32",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
x = relay.layout_transform(x, "NHWC", "NCHW")
weight = relay.layout_transform(weight, "HWIO", "IOHW")
y = relay.qnn.op.conv2d_transpose(
x,
weight,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
out_dtype="int32",
)
y = relay.qnn.op.requantize(
y,
relay.const(1, "float32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "int32"),
axis=1,
out_dtype="int32",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"qnn.conv2d_transpose": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_convert_kernel_layout():
"""Check that convolution kernel layout is correctly transformed."""
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
w = relay.layout_transform(w, "HWIO", "OHWI")
y = relay.nn.conv2d(
x,
w,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="OHWI",
)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "OHWI"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_roi_align_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_align(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, sample_ratio=2, layout="NCHW"
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight1 = relay.layout_transform(weight1, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_align(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, sample_ratio=2, layout="NHWC"
)
ret = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
desired_layouts = {
"nn.conv2d": ["NHWC", "HWIO"],
"vision.roi_align": ["NHWC", "default"],
}
a = run_opt_pass(a, transform.ConvertLayout(desired_layouts))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_strided_slice_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.strided_slice(y, begin=[0, 1], end=[1, -1, 10], strides=[1, 1, 2, 1])
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.strided_slice(y, begin=[0, 0, 0, 1], end=[1, 10, 56, -1], strides=[1, 2, 1, 1])
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_split_convert_layout():
def _test_conv_split_convert_layout1():
def before():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
y = relay.nn.conv2d(
x,
weight,
channels=512,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=2, axis=-1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
out = relay.Tuple([a, b])
return relay.Function(analysis.free_vars(out), out)
def expected():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=512, kernel_size=(3, 3))
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=2, axis=1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
a = relay.layout_transform(a, "NCHW", "NHWC")
b = relay.layout_transform(b, "NCHW", "NHWC")
out = relay.Tuple([a, b])
return relay.Function(analysis.free_vars(out), out)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def _test_conv_split_convert_layout2():
def before():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
y = relay.nn.conv2d(
x,
weight,
channels=512,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=2, axis=3).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
out = relay.Tuple([a, b])
return relay.Function(analysis.free_vars(out), out)
def expected():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=512, kernel_size=(3, 3))
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=2, axis=1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
a = relay.layout_transform(a, "NCHW", "NHWC")
b = relay.layout_transform(b, "NCHW", "NHWC")
out = relay.Tuple([a, b])
return relay.Function(analysis.free_vars(out), out)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def _test_conv_split_convert_layout3():
def before():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
y = relay.nn.conv2d(
x,
weight,
channels=512,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=(5, 10), axis=-1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
c = relay.TupleGetItem(y, 2)
out = relay.Tuple([a, b, c])
return relay.Function(analysis.free_vars(out), out)
def expected():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=512, kernel_size=(3, 3))
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=(5, 10), axis=1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
c = relay.TupleGetItem(y, 2)
a = relay.layout_transform(a, "NCHW", "NHWC")
b = relay.layout_transform(b, "NCHW", "NHWC")
c = relay.layout_transform(c, "NCHW", "NHWC")
out = relay.Tuple([a, b, c])
return relay.Function(analysis.free_vars(out), out)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def _test_conv_split_convert_layout_blocking():
def before():
x = relay.var("x", shape=(1, 512, 38, 38))
weight = relay.var("weight", shape=(512, 512, 3, 3))
y = relay.nn.conv2d(
x,
weight,
channels=512,
kernel_size=(3, 3),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=[256], axis=1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
out = relay.Tuple([a, b])
return relay.Function(analysis.free_vars(out), out)
def expected():
x = relay.var("x", shape=(1, 512, 38, 38))
weight = relay.var("weight", shape=(512, 512, 3, 3))
weight = relay.layout_transform(weight, "OIHW", "OIHW4o")
x = relay.layout_transform(x, "NCHW", "NCHW4c")
y = relay.op.nn.contrib_conv2d_nchwc(
x,
weight,
channels=512,
kernel_size=(3, 3),
padding=(0, 0),
data_layout="NCHW4c",
kernel_layout="OIHW4o",
)
y = relay.nn.relu(y)
y = relay.op.split(y, indices_or_sections=[64], axis=1).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
a = relay.layout_transform(a, "NCHW4c", "NCHW")
b = relay.layout_transform(b, "NCHW4c", "NCHW")
out = relay.Tuple([a, b])
return relay.Function(analysis.free_vars(out), out)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW4c", "OIHW4o"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
_test_conv_split_convert_layout1()
_test_conv_split_convert_layout2()
_test_conv_split_convert_layout3()
_test_conv_split_convert_layout_blocking()
def test_conv_strided_slice_axes_convert_layout():
def before():
x = relay.var("x", shape=(1, 28, 28, 32))
weight = relay.var("weight", shape=(3, 3, 32, 32))
y = relay.nn.conv2d(
x,
weight,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.strided_slice(y, begin=[0, 16], end=[1, 33], strides=[1, 1], axes=[0, 3])
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 28, 28, 32))
weight = relay.var("weight", shape=(3, 3, 32, 32))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(
x,
weight,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.strided_slice(y, begin=[0, 16], end=[1, 33], strides=[1, 1], axes=[0, 1])
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = run_opt_pass(before(), transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_topk_convert_layout():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.topk(y, k=2, axis=2)
if isinstance(y, relay.expr.TupleWrapper):
y = y.astuple()
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.topk(y, k=2, axis=3).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
a = relay.layout_transform(a, "NCHW", "NHWC")
b = relay.layout_transform(b, "NCHW", "NHWC")
out = relay.Tuple([a, b])
return relay.Function(analysis.free_vars(out), out)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_roi_pool_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_pool(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, layout="NCHW"
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight1 = relay.layout_transform(weight1, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_pool(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, layout="NHWC"
)
ret = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
a = before()
desired_layouts = {
"nn.conv2d": ["NHWC", "HWIO"],
"vision.roi_pool": ["NHWC", "default"],
}
a = run_opt_pass(a, transform.ConvertLayout(desired_layouts))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_default_keyword():
"""Check that the default keyword selects correct TVM default layout."""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 3, 3, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OHWI",
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
w = relay.var("weight", shape=(64, 3, 3, 64))
w = relay.layout_transform(w, "OHWI", "OIHW")
y = relay.nn.conv2d(
x,
w,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_different_ops_convert_layout():
"""Check convert layout correctly supports converting the layout of
different ops in the same graph.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 3, 3, 64))
weight2 = relay.var("weight2", shape=(64, 3, 3, 64), dtype="int8")
weight3 = relay.var("weight3", shape=(64, 3, 3, 64))
out = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OHWI",
)
out = relay.cast(out, "int8")
out = relay.qnn.op.conv2d(
out,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OHWI",
)
out = relay.cast(out, "float32")
out = relay.nn.conv2d_transpose(
out,
weight3,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OHWI",
)
out = relay.Function(analysis.free_vars(out), out)
return out
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 3, 3, 64))
weight2 = relay.var("weight2", shape=(64, 3, 3, 64), dtype="int8")
weight3 = relay.var("weight3", shape=(64, 3, 3, 64))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight1 = relay.layout_transform(weight1, "OHWI", "HWIO")
out = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
out = relay.cast(out, "int8")
out = relay.layout_transform(out, "NHWC", "NCHW")
weight2 = relay.layout_transform(weight2, "OHWI", "OIHW")
out = relay.qnn.op.conv2d(
out,
weight2,
relay.const(1, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
out = relay.cast(out, "float32")
out = relay.layout_transform(out, "NCHW", "NHWC")
weight3 = relay.layout_transform(weight3, "OHWI", "HWIO")
out = relay.nn.conv2d_transpose(
out,
weight3,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
out = relay.layout_transform(out, "NHWC", "NCHW")
out = relay.Function(analysis.free_vars(out), out)
return out
a = before()
desired_layouts = {
"nn.conv2d": ["NHWC", "HWIO"],
"qnn.conv2d": ["NCHW", "OIHW"],
"nn.conv2d_transpose": ["NHWC", "HWIO"],
}
a = run_opt_pass(a, transform.ConvertLayout(desired_layouts))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_no_desired_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_align(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, sample_ratio=2, layout="NCHW"
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight1 = relay.layout_transform(weight1, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight1,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.layout_transform(y, "NHWC", "NCHW")
rois = relay.var("rois", shape=(32, 5))
y = relay.vision.roi_align(
y, rois, pooled_size=(14, 14), spatial_scale=0.0625, sample_ratio=2, layout="NCHW"
)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "HWIO"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_convert_with_config():
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
y2 = relay.nn.conv2d(
y,
weight2,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y2 = relay.nn.relu(y2)
out = relay.Function([x, weight, weight2], y2)
return out
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
weight2 = relay.layout_transform(weight2, "HWIO", "HWOI")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NHWC", "HWNC")
y2 = relay.nn.conv2d(
y,
weight2,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="HWNC",
kernel_layout="HWOI",
)
y2 = relay.nn.relu(y2)
y2 = relay.layout_transform(y2, "HWNC", "NHWC")
output = relay.Function(relay.analysis.free_vars(y2), y2)
return output
a = before()
layout_config = relay.transform.LayoutConfig(skip_layers=[0])
with layout_config:
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["HWNC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_squeeze_convert_layout():
def _test_conv_squeeze_convert_layout1():
# specified axis is squeezed
def before():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
y = relay.nn.conv2d(
x,
weight,
channels=1000,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.squeeze(y, axis=[-3])
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=1000, kernel_size=(1, 1))
y = relay.nn.relu(y)
y = relay.squeeze(y, axis=[2])
y = relay.layout_transform(y, "NCW", "NWC")
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def _test_conv_squeeze_convert_layout2():
# all axes of dimension 1 are squeezed
def before():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
y = relay.nn.conv2d(
x,
weight,
channels=1000,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.squeeze(y)
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=1000, kernel_size=(1, 1))
y = relay.nn.relu(y)
y = relay.squeeze(y, [0, 2, 3])
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def _test_conv_squeeze_convert_layout3():
# squeeze axis is empty
def before():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
y = relay.nn.conv2d(
x,
weight,
channels=1000,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.squeeze(y, axis=[])
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=1000, kernel_size=(1, 1))
y = relay.nn.relu(y)
y = relay.squeeze(y, axis=[])
y = relay.layout_transform(y, "NCHW", "NHWC")
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
_test_conv_squeeze_convert_layout1()
_test_conv_squeeze_convert_layout2()
_test_conv_squeeze_convert_layout3()
def test_conv_reduce_convert_layout():
def _test_conv_reduce_convert_layout1():
def before():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
y = relay.nn.conv2d(
x,
weight,
channels=1000,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.sum(y, axis=(1, 2))
y = relay.sum(y, axis=(1,))
y = relay.sum(y)
y = relay.sum(y)
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 1, 1, 2048))
weight = relay.var("weight", shape=(1, 1, 2048, 1000))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=1000, kernel_size=(1, 1))
y = relay.nn.relu(y)
y = relay.sum(y, axis=(2, 3))
y = relay.sum(y, axis=(1,))
y = relay.sum(y)
y = relay.sum(y)
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def _test_conv_reduce_convert_layout2():
def _set_span(y, text):
return relay.Call(
y.op, y.args, y.attrs, y.type_args, relay.Span(relay.SourceName(text), 0, 0, 0, 0)
)
def before():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
y = relay.nn.conv2d(
x,
weight,
channels=512,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = _set_span(y, "SpanConv2D")
y = relay.nn.relu(y)
y = _set_span(y, "SpanRelu")
y = relay.multiply(y, y)
y = _set_span(y, "SpanMultiply")
y = relay.sum(y, axis=(3,), keepdims=True)
y = _set_span(y, "SpanSum")
return relay.Function(analysis.free_vars(y), y)
def expected():
x = relay.var("x", shape=(1, 38, 38, 512))
weight = relay.var("weight", shape=(3, 3, 512, 512))
weight = relay.layout_transform(weight, "HWIO", "OIHW")
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.nn.conv2d(x, weight, channels=512, kernel_size=(3, 3))
y = relay.nn.relu(y)
y = relay.multiply(y, y)
y = relay.sum(y, axis=(1,), keepdims=True)
y = relay.layout_transform(y, "NCHW", "NHWC")
return relay.Function(analysis.free_vars(y), y)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
assert "SpanConv2D" in a.astext()
assert "SpanRelu" in a.astext()
assert "SpanMultiply" in a.astext()
assert "SpanSum" in a.astext()
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
_test_conv_reduce_convert_layout1()
_test_conv_reduce_convert_layout2()
def test_image_resize2d_convert_layout():
def _test_image_resize_convert_layout_nchw_to_nhwc():
def before():
x = relay.var("x", shape=(1, 2, 4, 4))
y = relay.image.resize2d(x, (8, 8))
y = relay.Function([x], y)
return y
def expected():
x = relay.var("x", shape=(1, 2, 4, 4))
x = relay.layout_transform(x, "NCHW", "NHWC")
y = relay.image.resize2d(x, (8, 8), layout="NHWC")
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"image.resize2d": ["NHWC"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def _test_image_resize_convert_layout_nhwc_to_nchw():
def before():
x = relay.var("x", shape=(1, 4, 4, 2))
y = relay.image.resize2d(x, (8, 8), layout="NHWC")
y = relay.Function([x], y)
return y
def expected():
x = relay.var("x", shape=(1, 4, 4, 2))
x = relay.layout_transform(x, "NHWC", "NCHW")
y = relay.image.resize2d(x, (8, 8), layout="NCHW")
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"image.resize2d": ["NCHW"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
_test_image_resize_convert_layout_nchw_to_nhwc()
_test_image_resize_convert_layout_nhwc_to_nchw()
def test_conv_image_resize2d_convert_layout():
"""Check that layout transforms are propagated through image resize."""
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.image.resize2d(y, (112, 112), layout="NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.image.resize2d(y, (112, 112), layout="NCHW")
y = relay.layout_transform(y, "NCHW", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_infer_correct_layout():
test_infer_correct_layout_flag = False
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
@reg.register_infer_correct_layout("nn.relu", level=11)
def infer_correct_layout_relu(attrs, new_in_layouts, old_in_layouts, old_in_types):
nonlocal test_infer_correct_layout_flag
test_infer_correct_layout_flag = True
ret = tvm.tir.layout("")
if new_in_layouts:
assert len(new_in_layouts) >= 1
ret = new_in_layouts[0]
else:
for i in range(len(old_in_layouts)):
if old_in_layouts[i]:
ret = old_in_layouts[i]
break
input_layouts = []
for i in range(len(old_in_layouts)):
input_layouts.append(ret)
return InferCorrectLayoutOutput(input_layouts, [ret], attrs)
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
assert test_infer_correct_layout_flag == True
def test_reduce_op_convert_layout():
for reduce_op in [relay.argmax, relay.mean, relay.max]:
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = reduce_op(y, axis=[2, 3])
y = relay.Function([x, weight], y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "HWIO")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = reduce_op(y, axis=[1, 2])
y = relay.Function(relay.analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_conv_max_pool_uses_specified_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NCHW")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "OHWI")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="OHWI",
)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NHWC", out_layout="NHWC")
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(
a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "OHWI"], "nn.max_pool2d": ["NHWC"]})
)
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
def test_simulated_quantize_uses_specified_convert_layout():
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = attach_simulated_quantize(y, QAnnotateKind.INPUT)
y = relay.nn.relu(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.layout_transform(x, "NCHW", "NHWC")
weight = relay.layout_transform(weight, "OIHW", "OHWI")
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="OHWI",
)
y = attach_simulated_quantize(y, QAnnotateKind.INPUT)
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NHWC", "OHWI"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
@pytest.mark.parametrize(
"data_layout, kernel_layout",
[
("NCHW1c", "OIHW1i1o"),
("NCHW4c", "OIHW4i4o"),
("NCHW8c", "OIHW8i8o"),
("NCHW16c", "OIHW16i16o"),
],
)
def test_resnet_convert_layout_nchwc(data_layout, kernel_layout):
x = relay.var("x", shape=(1, 3, 224, 224))
weight1 = relay.var("weight1", shape=(64, 3, 7, 7))
weight2 = relay.var("weight2", shape=(64, 64, 3, 3))
weight3 = relay.var("weight3", shape=(64, 64, 1, 1))
def before():
y = relay.nn.conv2d(
x,
weight1,
strides=(2, 2),
padding=(3, 3),
channels=64,
kernel_size=(7, 7),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(3, 3), strides=(2, 2), padding=(1, 1))
y1 = relay.nn.conv2d(
y,
weight2,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(
y,
weight3,
channels=64,
kernel_size=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y2 = relay.nn.relu(y2)
y = y1 + y2
y = relay.nn.global_max_pool2d(y, layout="NCHW")
return y
def expected():
if data_layout == "NCHW1c":
y = relay.nn.contrib_conv2d_nchwc(
relay.layout_transform(x, "NCHW", data_layout),
relay.layout_transform(weight1, "OIHW", kernel_layout),
strides=(2, 2),
padding=(3, 3),
channels=64,
kernel_size=(7, 7),
data_layout=data_layout,
kernel_layout=kernel_layout,
)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(
y, pool_size=(3, 3), strides=(2, 2), padding=(1, 1), layout=data_layout
)
else:
y = relay.nn.conv2d(
x,
weight1,
strides=(2, 2),
padding=(3, 3),
channels=64,
kernel_size=(7, 7),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(3, 3), strides=(2, 2), padding=(1, 1))
y = relay.layout_transform(y, "NCHW", data_layout)
y1 = relay.nn.contrib_conv2d_nchwc(
y,
relay.layout_transform(weight2, "OIHW", kernel_layout),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout=data_layout,
kernel_layout=kernel_layout,
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.contrib_conv2d_nchwc(
y,
relay.layout_transform(weight3, "OIHW", kernel_layout),
channels=64,
kernel_size=(1, 1),
data_layout=data_layout,
kernel_layout=kernel_layout,
)
y2 = relay.nn.relu(y2)
y = y1 + y2
y = relay.nn.global_max_pool2d(y, layout=data_layout)
y = relay.layout_transform(y, data_layout, "NCHW")
return y
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": [data_layout, kernel_layout]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n Expect = \n" + str(b)
def test_conv_l2n_convert_layout():
"""Check that layout transforms are propagated through bn."""
axis_list = ([3], [-1], [2, 3])
expected_axis = ([1], [1], [3, 1])
for i, axis in enumerate(axis_list):
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x,
weight,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
z = relay.nn.l2_normalize(y, eps=0.001, axis=axis)
z = relay.Function(analysis.free_vars(z), z)
return z
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
w = relay.var("weight", shape=(3, 3, 64, 64))
x = relay.layout_transform(x, "NHWC", "NCHW")
w = relay.layout_transform(w, "HWIO", "OIHW")
y = relay.nn.conv2d(x, w, channels=64, kernel_size=(3, 3), padding=(1, 1))
z = relay.nn.l2_normalize(y, eps=0.001, axis=expected_axis[i])
z = relay.layout_transform(z, "NCHW", "NHWC")
z = relay.Function(analysis.free_vars(z), z)
return z
a = before()
a = run_opt_pass(a, transform.ConvertLayout({"nn.conv2d": ["NCHW", "default"]}))
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\n\n Expected = \n" + str(b)
if __name__ == "__main__":
tvm.testing.main()
| 101,431 | 34.728073 | 98 | py |
tvm | tvm-main/tests/python/relay/test_call_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name
import pytest
import tvm
from tvm import relay
def test_callgraph_construct():
mod = tvm.IRModule({})
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
mod["g1"] = relay.Function([x, y], x + y)
call_graph = relay.analysis.CallGraph(mod)
assert "g1" in str(call_graph)
assert tvm.ir.structural_equal(mod, call_graph.module)
def test_print_element():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(2, 3))
y0 = relay.var("y0", shape=(2, 3))
mod["g0"] = relay.Function([x0, y0], x0 + y0)
x1 = relay.var("x1", shape=(2, 3))
y1 = relay.var("y1", shape=(2, 3))
mod["g1"] = relay.Function([x1, y1], x1 - y1)
call_graph = relay.analysis.CallGraph(mod)
assert "#refs = 0" in str(call_graph.print_var("g0"))
assert "#refs = 0" in str(call_graph.print_var("g1"))
def test_global_call_count():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(2, 3))
y0 = relay.var("y0", shape=(2, 3))
g0 = relay.GlobalVar("g0")
mod[g0] = relay.Function([x0, y0], x0 + y0)
x1 = relay.var("x1", shape=(2, 3))
y1 = relay.var("y1", shape=(2, 3))
g1 = relay.GlobalVar("g1")
mod[g1] = relay.Function([x1, y1], g0(x1, y1))
call_graph = relay.analysis.CallGraph(mod)
p0 = relay.var("p0", shape=(2, 3))
p1 = relay.var("p1", shape=(2, 3))
func = relay.Function([p0, p1], g0(p0, p1) * g1(p0, p1))
mod["main"] = func
call_graph = relay.analysis.CallGraph(mod)
assert call_graph.global_call_count(g0) == 0
assert call_graph.global_call_count(g1) == 1
assert call_graph.global_call_count("main") == 2
def test_ref_count():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(2, 3))
y0 = relay.var("y0", shape=(2, 3))
g0 = relay.GlobalVar("g0")
mod[g0] = relay.Function([x0, y0], x0 + y0)
x1 = relay.var("x1", shape=(2, 3))
y1 = relay.var("y1", shape=(2, 3))
g1 = relay.GlobalVar("g1")
mod[g1] = relay.Function([x1, y1], x1 - y1)
call_graph = relay.analysis.CallGraph(mod)
p0 = relay.var("p0", shape=(2, 3))
p1 = relay.var("p1", shape=(2, 3))
func = relay.Function([p0, p1], g0(p0, p1) * g1(p0, p1))
mod["main"] = func
call_graph = relay.analysis.CallGraph(mod)
assert call_graph.ref_count(g0) == 1
assert call_graph.ref_count(g1) == 1
assert call_graph.ref_count("main") == 0
def test_nested_ref():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(2, 3))
y0 = relay.var("y0", shape=(2, 3))
g0 = relay.GlobalVar("g0")
mod[g0] = relay.Function([x0, y0], x0 + y0)
x1 = relay.var("x1", shape=(2, 3))
y1 = relay.var("y1", shape=(2, 3))
g1 = relay.GlobalVar("g1")
mod[g1] = relay.Function([x1, y1], g0(x1, y1))
call_graph = relay.analysis.CallGraph(mod)
p0 = relay.var("p0", shape=(2, 3))
p1 = relay.var("p1", shape=(2, 3))
func = relay.Function([p0, p1], g0(p0, p1) * g1(p0, p1))
mod["main"] = func
call_graph = relay.analysis.CallGraph(mod)
assert call_graph.ref_count(g0) == 2
assert call_graph.ref_count(g1) == 1
assert call_graph.ref_count("main") == 0
def test_recursive_func():
mod = tvm.IRModule({})
x = relay.var("x", shape=[], dtype="int32")
fn0 = relay.Function([x], x)
gx = relay.GlobalVar("gx")
mod[gx] = fn0
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
sb = relay.ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype="int32"))
global_call = gx(i)
rec_call = relay.Call(sum_up, [one_less]) + global_call
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
func = func.with_attr("Compiler", "a")
mod[sum_up] = func
iarg = relay.var("i", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg], sum_up(iarg))
call_graph = relay.analysis.CallGraph(mod)
assert call_graph.is_recursive(sum_up)
assert call_graph.ref_count(sum_up) == 2
assert call_graph.ref_count(gx) == 1
assert call_graph.ref_count("main") == 0
if __name__ == "__main__":
tvm.testing.main()
| 5,132 | 33.449664 | 80 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_leaky_relu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
from tvm import relay
def dequantize(data, scale, zp):
return scale * (np.asarray(data) - zp)
def generate_golden_output(x_data, dequantized_x, alpha, o_scale, o_zero_point, i_zero_point):
prod = np.multiply(dequantized_x, alpha)
prod = np.around(prod / o_scale + o_zero_point)
q_min = np.iinfo(np.uint8).min
q_max = np.iinfo(np.uint8).max
prod = np.clip(prod, q_min, q_max)
requantized = np.clip(np.round(dequantized_x / o_scale + o_zero_point), q_min, q_max)
output = np.where(x_data < i_zero_point, prod, requantized)
return output
def test_qnn_leaky_relu():
data_dtype = "uint8"
input_scale = 0.125
input_zero_point = 60
output_scale = 0.6
output_zero_point = 17
alpha = 0.9
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.qnn.op.leaky_relu(
x=x,
alpha=alpha,
input_scale=relay.const(input_scale, "float32"),
input_zero_point=relay.const(input_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 133, 0, 9)).reshape((1, 4))
x_dequantized = dequantize(x_data, input_scale, input_zero_point)
golden_output = generate_golden_output(
x_data, x_dequantized, alpha, output_scale, output_zero_point, input_zero_point
)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(x_data)
np.testing.assert_equal(op_res.numpy(), golden_output)
if __name__ == "__main__":
test_qnn_leaky_relu()
| 2,616 | 32.551282 | 100 | py |
tvm | tvm-main/tests/python/relay/test_json_compact.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
from tvm import te
import json
# 0.6 BACKWARDS COMPATIBILITY TESTS
def test_type_var():
# type var in 0.6
nodes = [
{"type_key": ""},
{"type_key": "relay.TypeVar", "attrs": {"kind": "0", "span": "0", "var": "2"}},
{"type_key": "Variable", "attrs": {"dtype": "int32", "name": "in0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.TypeVar)
assert tvar.name_hint == "in0"
nodes[1]["type_key"] = "relay.GlobalTypeVar"
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.GlobalTypeVar)
assert tvar.name_hint == "in0"
def test_var():
# type var in 0.6
nodes = [
{"type_key": ""},
{
"type_key": "relay.Var",
"attrs": {
"_checked_type_": "0",
"span": "0",
"type_annotation": "0",
"vid": "2",
},
},
{"type_key": "relay.Id", "attrs": {"name_hint": "a3"}},
{"type_key": "relay.TensorType", "attrs": {"dtype": "float32", "shape": "4", "span": "0"}},
{"type_key": "Array", "data": [5, 6]},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "16", "span": "0"}},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "8", "span": "0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, relay.Var)
assert tvar.name_hint == "a3"
def test_incomplete_type():
nodes = [
{"type_key": ""},
{"type_key": "relay.IncompleteType", "attrs": {"kind": "0", "span": "0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.IncompleteType)
def test_func_tuple_type():
nodes = [
{"type_key": ""},
{
"type_key": "relay.FuncType",
"attrs": {
"arg_types": "2",
"ret_type": "3",
"span": "0",
"type_constraints": "6",
"type_params": "5",
},
},
{"type_key": "Array"},
{"type_key": "relay.TupleType", "attrs": {"fields": "4", "span": "0"}},
{"type_key": "Array"},
{"type_key": "Array"},
{"type_key": "Array"},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.FuncType)
def test_global_var():
nodes = [
{"type_key": ""},
{
"type_key": "relay.GlobalVar",
"attrs": {"_checked_type_": "0", "name_hint": "x", "span": "0"},
},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.GlobalVar)
nodes = [
{"type_key": ""},
{
"type_key": "GlobalVar",
"attrs": {"_checked_type_": "0", "name_hint": "x", "span": "0"},
},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
tvar = tvm.ir.load_json(json.dumps(data))
assert isinstance(tvar, tvm.ir.GlobalVar)
def test_op():
nodes = [{"type_key": ""}, {"type_key": "relay.Op", "global_key": "nn.conv2d"}]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
op = tvm.ir.load_json(json.dumps(data))
assert op == relay.op.get("nn.conv2d")
def test_tir_var():
nodes = [
{"type_key": ""},
{"type_key": "Variable", "attrs": {"dtype": "int32", "name": "x", "span": "0"}},
{"type_key": "SizeVar", "attrs": {"dtype": "int32", "name": "y", "span": "0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
x = tvm.ir.load_json(json.dumps(data))
assert isinstance(x, tvm.tir.Var)
assert x.name == "x"
data["root"] = 2
y = tvm.ir.load_json(json.dumps(data))
assert isinstance(y, tvm.tir.SizeVar)
assert y.name == "y"
def test_str_map():
nodes = [
{"type_key": ""},
{"type_key": "StrMap", "keys": ["z", "x"], "data": [2, 3]},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "2", "span": "0"}},
{"type_key": "Max", "attrs": {"a": "4", "b": "10", "dtype": "int32", "span": "0"}},
{"type_key": "Add", "attrs": {"a": "5", "b": "9", "dtype": "int32", "span": "0"}},
{"type_key": "Add", "attrs": {"a": "6", "b": "8", "dtype": "int32", "span": "0"}},
{
"type_key": "tir.Var",
"attrs": {"dtype": "int32", "name": "7", "type_annotation": "0", "span": "0"},
},
{"type_key": "runtime.String", "repr_str": "x"},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "1", "span": "0"}},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "2", "span": "0"}},
{"type_key": "IntImm", "attrs": {"dtype": "int32", "value": "100", "span": "0"}},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.6.0"},
"b64ndarrays": [],
}
x = tvm.ir.load_json(json.dumps(data))
assert isinstance(x, tvm.ir.container.Map)
assert len(x) == 2
assert "x" in x
assert "z" in x
assert bool(x["z"] == 2)
# 0.7 BACKWARDS COMPATIBILITY TESTS
def test_irmodule_attributes():
nodes = [
{"type_key": ""},
{
"type_key": "IRModule",
"attrs": {
"functions": "0",
"global_type_var_map_": "0",
"global_var_map_": "0",
"source_map": "0",
"type_definitions": "0",
},
},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.7.0"},
"b64ndarrays": [],
}
mod = tvm.ir.load_json(json.dumps(data))
assert isinstance(mod, tvm.ir.IRModule)
# IRModule attributes should defualt to null
assert not mod.attrs
# 0.8 BACKWARDS COMPATIBILITY TESTS
def test_virtual_device():
nodes = [
{"type_key": ""},
{
"type_key": "relay.Function",
"attrs": {
"_checked_type_": "0",
"attrs": "0",
"body": "0",
"params": "0",
"ret_type": "0",
"span": "0",
"type_params": "0",
},
},
]
data = {
"root": 1,
"nodes": nodes,
"attrs": {"tvm_version": "0.8.0"},
"b64ndarrays": [],
}
func = tvm.ir.load_json(json.dumps(data))
assert isinstance(func, relay.Function)
assert not func.virtual_device_
if __name__ == "__main__":
test_op()
test_type_var()
test_var()
test_incomplete_type()
test_func_tuple_type()
test_global_var()
test_tir_var()
test_str_map()
| 8,355 | 28.013889 | 99 | py |
tvm | tvm-main/tests/python/relay/test_analysis_extract_fake_quantized_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test function extraction"""
import tvm
from tvm import relay
def test_fake_quantize_conv():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
zero = relay.const(0)
op = relay.op.nn.conv2d(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
kernel_size=[5, 5],
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.conv2d": 1}
def test_fake_quantize_dense():
x = relay.var("x", shape=[128, 64], dtype="int8")
w = relay.var("w", shape=[256, 64], dtype="int8")
zero = relay.const(0)
op = relay.op.nn.dense(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.dense": 1}
def test_fake_quantize_multiple_regions():
x = relay.var("x", shape=[128, 64], dtype="int8")
w = relay.var("w", shape=[256, 64], dtype="int8")
zero = relay.const(0)
op = relay.op.nn.dense(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
op = relay.qnn.op.dequantize(op, relay.const(2.0), relay.const(114))
op = relay.op.nn.relu(op)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
w2 = relay.var("w2", shape=[64, 256], dtype="int8")
op = relay.op.nn.dense(
relay.qnn.op.dequantize(op, relay.const(1.0), zero),
relay.qnn.op.dequantize(w2, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
# We expect to ignore this sigmoid op since it's just outside a fake
# quantized region
op = relay.op.sigmoid(op)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.dense": 2, "nn.relu": 1}
def test_fake_quantize_maxpool():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.nn.max_pool2d(x, [3, 3])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.max_pool2d": 1}
def test_fake_quantize_transpose_reshape():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.transpose(x, [1, 0, 2, 3])
op = relay.op.reshape(op, [3, -1])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"transpose": 1, "reshape": 1}
def test_fake_quantize_concat():
zero = relay.const(0)
inputs = []
for i in range(4):
inputs.append(
relay.qnn.op.dequantize(
relay.var("x%d" % i, shape=[1, 4], dtype="int8"), relay.const(i + 0.5), zero
)
)
concat = relay.op.concatenate(inputs, axis=1)
op = relay.qnn.op.quantize(concat, relay.const(3.5), zero)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"concatenate": 1}
| 4,814 | 34.932836 | 92 | py |
tvm | tvm-main/tests/python/relay/test_auto_scheduler_task_extraction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test task extraction for auto-scheduler"""
import json
import tempfile
import pytest
import tvm.relay.testing
import tvm.testing
from tvm import _ffi as _ffi_api
from tvm import auto_scheduler, relay
def get_network(name, batch_size=1, layout="NHWC"):
"""Get the symbol definition and random weight of a network"""
# auto-scheduler prefer NHWC layout
if layout == "NHWC":
image_shape = (224, 224, 3)
elif layout == "NCHW":
image_shape = (3, 224, 224)
elif layout == "NCDHW":
image_shape = (3, 16, 224, 224)
elif layout == "NDHWC":
image_shape = (3, 224, 224, 16)
else:
raise ValueError("Invalid layout: " + layout)
if name == "resnet-18":
mod, params = relay.testing.resnet.get_workload(
num_layers=18, batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "resnet-50":
mod, params = relay.testing.resnet.get_workload(
num_layers=50, batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "winograd-test":
input_shape = [1, 23, 40, 32]
data = relay.var("data", shape=input_shape, dtype="float32")
net = relay.testing.layers.conv2d(
data=data,
channels=128,
kernel_size=3,
strides=1,
padding=1,
data_layout="NHWC",
kernel_layout="HWIO",
name="",
)
bias = relay.var("conv1_bias")
net = relay.nn.bias_add(net, bias, 3)
net = relay.nn.relu(net)
mod, params = relay.testing.create_workload(net)
elif name == "resnet3d-18":
mod, params = relay.testing.resnet_3d.get_workload(
num_layers=18, batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(
batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "resnet3d-18":
mod, params = relay.testing.resnet_3d.get_workload(
num_layers=18, batch_size=batch_size, layout=layout, image_shape=image_shape
)
elif name == "dcgan":
mod, params = relay.testing.dcgan.get_workload(batch_size=batch_size, layout=layout)
elif name == "mlp":
data = relay.var("data", shape=(batch_size, 32))
fc1 = relay.nn.dense(data, relay.var("fc1_weight"), units=32)
fc1 = relay.nn.bias_add(fc1, relay.var("fc1_bias"), axis=-1)
act1 = relay.nn.relu(fc1)
fc2 = relay.nn.dense(act1, relay.var("fc2_weight"), units=32)
fc2 = relay.nn.bias_add(fc2, relay.var("fc2_bias"), axis=-1)
act2 = relay.nn.relu(fc2)
mlp = act2
args = relay.analysis.free_vars(act2)
mlp = relay.Function(args, mlp)
mod, params = relay.testing.init.create_workload(mlp)
else:
raise ValueError("Unsupported network: " + name)
return mod, params
@tvm.testing.requires_cuda
@pytest.mark.parametrize(
"params",
[
("mlp", "NHWC", 1, 2),
("resnet-18", "NHWC", 24, 25),
("resnet-18", "NCHW", 24, 25),
("mobilenet", "NHWC", 22, 30),
("mobilenet", "NCHW", 22, 30),
("resnet3d-18", "NCDHW", 23, 24),
("resnet3d-18", "NDHWC", 23, 24),
],
)
def test_task_extraction_cuda(params):
target = tvm.target.Target("cuda")
network, layout, expected_task, expected_weights = params
mod, params = get_network(network, layout=layout)
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
for task, weight in zip(tasks, task_weights):
print(task.desc, task.workload_key, weight)
assert len(tasks) == expected_task
assert sum(task_weights) == expected_weights
@pytest.mark.parametrize(
"params",
[
# Relay FuseOps puts two conv2ds to separate functions and results in two tasks.
("basic_func", 2, False),
# Relay FuseOps will not break the primitive function and result in one task.
("fused_func", 1, False),
# The Relay function without complex ops will not form a task by default.
("simple_func", 0, False),
# Every Relay function becomes a task regardless what ops in its body.
("simple_func", 1, True),
# The Relay function without any reduce op is considered as a simple task.
("shape_of_func", 0, False),
("shape_of_func", 1, True),
# The Relay function with dynamic shape inputs/outputs will not be extracted.
("dyn_shape_func", 0, False),
# The Conv2D in the Relay function with control flow could still be a task.
# Also, two identical Conv2D should only be one task with weight=2.
("control_flow_func", 1, False),
# The first function with unsupported op (NMS) will not be extracted.
("func_w_unsupported_op", 1, True),
],
)
def test_task_extraction_cpu(params):
ishape = (1, 3, 224, 224)
w1shape = (32, 3, 3, 3)
w2shape = (32, 32, 3, 3)
dtype = "float32"
target = tvm.target.Target("llvm")
def get_func():
data = relay.var("data", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight1", shape=(w1shape), dtype=dtype)
weight2 = relay.var("weight2", shape=(w2shape), dtype=dtype)
conv2d = relay.nn.conv2d(data, weight1, kernel_size=(3, 3), padding=(1, 1))
relu = relay.nn.relu(conv2d)
conv2d = relay.nn.conv2d(relu, weight2, kernel_size=(3, 3), padding=(1, 1))
out = relay.nn.relu(conv2d)
return relay.Function([data, weight1, weight2], out)
def get_fused_func():
data = relay.var("data", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight1", shape=(w1shape), dtype=dtype)
weight2 = relay.var("weight2", shape=(w2shape), dtype=dtype)
fused_func = get_func()
# Set to primitive to keep fuse_ops untouch.
fused_func = fused_func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
call = relay.Call(fused_func, [data, weight1, weight2])
return relay.Function([data, weight1, weight2], call)
def get_simple_func():
data = relay.var("data", relay.TensorType((1, 2, 3), "float32"))
out = relay.image.affine_grid(data, (150, 150))
return relay.Function([data], out)
def get_shape_of_func():
data = relay.var("data", shape=(relay.Any(), 28, 28), dtype="float32")
out = relay.shape_of(data)
return relay.Function([data], out)
def get_func_with_dynamic_shape():
data = relay.var("data", shape=(relay.Any(), 32), dtype="float32")
out = relay.max(data)
return relay.Function(relay.analysis.free_vars(out), out)
def get_func_with_control_flow():
data = relay.var("data", shape=(1, 3, 224, 224))
weight = relay.var("weight", shape=(3, 3, 3, 3))
eq1 = relay.var("e1", shape=[], dtype="float32")
eq2 = relay.var("e2", shape=[], dtype="float32")
eq = relay.equal(eq1, eq2)
true_branch = relay.zeros(shape=(1, 3, 224, 224), dtype="float32")
false_branch = relay.nn.conv2d(data, weight, kernel_size=(3, 3), channels=3, padding=(1, 1))
false_branch = relay.nn.conv2d(
false_branch, weight, kernel_size=(3, 3), channels=3, padding=(1, 1)
)
ife = relay.If(eq, true_branch, false_branch)
out = relay.erf(ife)
return relay.Function([data, weight, eq1, eq2], out)
def get_func_with_unsupported_op():
def get_postproc_func():
data = relay.var("data", shape=((1, 3, 6)), dtype=dtype)
out = relay.nn.relu(data)
func = relay.Function([data], out)
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
return func
cls_prob = relay.var("cls_prob", relay.ty.TensorType((1, 3, 3), "float32"))
loc_pred = relay.var("loc_pred", relay.ty.TensorType((1, 3 * 4), "float32"))
anchors = relay.var("anchors", relay.ty.TensorType((1, 3, 4), "float32"))
mtl = relay.vision.multibox_transform_loc(
cls_prob=cls_prob, loc_pred=loc_pred, anchor=anchors
)
nms = relay.vision.non_max_suppression(mtl[0], mtl[1], mtl[0], return_indices=False)
out = relay.Call(get_postproc_func(), [nms])
return relay.Function([cls_prob, loc_pred, anchors], out)
func_map = {
"basic_func": get_func,
"fused_func": get_fused_func,
"simple_func": get_simple_func,
"shape_of_func": get_shape_of_func,
"dyn_shape_func": get_func_with_dynamic_shape,
"control_flow_func": get_func_with_control_flow,
"func_w_unsupported_op": get_func_with_unsupported_op,
}
def verify_task_extraction(func_name, expected_task, include_simple_tasks=False):
func = func_map[func_name]()
mod = tvm.IRModule.from_expr(func)
tasks, task_weights = auto_scheduler.extract_tasks(
mod["main"], None, target, include_simple_tasks=include_simple_tasks
)
assert len(tasks) == expected_task
assert len(task_weights) == expected_task
verify_task_extraction(*params)
def test_dump_workload_to_dag_extract_tasks():
mod, _ = get_network("mobilenet", layout="NHWC")
with tempfile.NamedTemporaryFile() as f:
tasks, _ = auto_scheduler.extract_tasks(
mod["main"], None, "llvm", include_simple_tasks=True, dump_workload_to_dag_log=f.name
)
expected = {task.workload_key: str(task.compute_dag) for task in tasks}
actual = json.load(f)
assert expected == actual
def test_custom_hash_func_extract_tasks():
@_ffi_api.register_func("auto_scheduler.compute_dag.hash_func")
def counting_unique_hash(str_dag):
ret = counting_unique_hash.i
counting_unique_hash.i += 1
return ret
counting_unique_hash.i = 0
mod, _ = get_network("mobilenet", layout="NHWC")
tasks, _ = auto_scheduler.extract_tasks(mod["main"], None, "llvm", include_simple_tasks=True)
hash_values = []
for task in tasks:
# task.workload_key should look like
# [43, [3, 3, 1024, 1], [1024], [3, 3, 1024, 1]] where the first int is the result of the hash
# Extract the hash and keep track of every hash
hash_value = int(task.workload_key[1:].split(",")[0])
hash_values.append(hash_value)
# All values are unique, and we know the min and max
# This is a sufficient condition to know that hashes in hash_values are an increasing list
# of hashes up to counting_unique_hash.i - 1
assert len(hash_values) == len(set(hash_values))
assert min(hash_values) == 0
assert max(hash_values) == counting_unique_hash.i - 1
if __name__ == "__main__":
tvm.testing.main()
| 11,673 | 38.572881 | 102 | py |
tvm | tvm-main/tests/python/relay/test_op_grad_level1.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import te, relay
from tvm.relay.testing import check_grad, run_infer_type
from tvm.relay.transform import gradient
executor_kind = tvm.testing.parameter("debug")
def sigmoid(x):
one = np.ones_like(x)
return one / (one + np.exp(-x))
def relu(x):
x_copy = np.copy(x)
np.maximum(x_copy, 0, x_copy)
return x_copy
class TestUnaryOp:
config = {
"log": (tvm.relay.log, lambda x, g: g * (1 / x)),
"exp": (tvm.relay.exp, lambda x, g: g * np.exp(x)),
"sigmoid": (tvm.relay.sigmoid, lambda x, g: g * sigmoid(x) * (1 - sigmoid(x))),
"tanh": (tvm.relay.tanh, lambda x, g: g * (1 - np.tanh(x) * np.tanh(x))),
"sqrt": (tvm.relay.sqrt, lambda x, g: g * 0.5 * np.power(x, -0.5)),
"abs": (tvm.relay.abs, lambda x, g: np.where(x < 0, -g, g)),
"relu": (relay.nn.relu, lambda x, g: np.where(x < 0, np.zeros_like(x), g)),
"erf": (tvm.relay.erf, lambda x, g: g * (2.0 / (np.pi ** (0.5)) * np.exp(-x * x))),
"cos": (tvm.relay.cos, lambda x, g: g * -1.0 * np.sin(x)),
"sin": (tvm.relay.sin, lambda x, g: g * np.cos(x)),
"tan": (tvm.relay.tan, lambda x, g: g * (1.0 / (np.cos(x) ** 2))),
"atan": (tvm.relay.atan, lambda x, g: g * (1 / (1 + np.power(x, 2.0)))),
"log2": (tvm.relay.log2, lambda x, g: g * (1 / (np.log(2) * x))),
"log10": (tvm.relay.log10, lambda x, g: g * (1 / (np.log(10) * x))),
"cosh": (tvm.relay.cosh, lambda x, g: g * (np.sinh(x))),
"sinh": (tvm.relay.sinh, lambda x, g: g * (np.cosh(x))),
"asin": (tvm.relay.asin, lambda x, g: g * (1.0 / (1.0 - x**2) ** (1.0 / 2.0))),
"acos": (tvm.relay.acos, lambda x, g: g * (-1.0 / (1.0 - x**2.0) ** (1.0 / 2.0))),
"acosh": (tvm.relay.acosh, lambda x, g: g * (1.0 / (x**2 - 1.0) ** (1.0 / 2.0))),
"asinh": (tvm.relay.asinh, lambda x, g: g * (1.0 / (x**2 + 1.0) ** (1.0 / 2.0))),
"atanh": (tvm.relay.atanh, lambda x, g: g * (-1.0 / (x**2 - 1.0))),
}
relay_op, ref_func = tvm.testing.parameters(*config.values(), ids=config.keys())
dtype = tvm.testing.parameter("float32", "float64")
shape = tvm.testing.parameter((10, 4))
def test_op(self, target, dev, executor_kind, relay_op, ref_func, shape, dtype):
target = tvm.target.Target(target)
if target.kind.name == "vulkan":
known_breaks = {
"float32": [
tvm.relay.erf,
tvm.relay.tan,
tvm.relay.atan,
tvm.relay.log10,
tvm.relay.cosh,
tvm.relay.sinh,
tvm.relay.asin,
tvm.relay.acos,
tvm.relay.acosh,
tvm.relay.asinh,
tvm.relay.atanh,
],
"float64": [
tvm.relay.log,
tvm.relay.exp,
tvm.relay.sigmoid,
tvm.relay.tanh,
tvm.relay.sqrt,
tvm.relay.erf,
tvm.relay.cos,
tvm.relay.sin,
tvm.relay.tan,
tvm.relay.atan,
tvm.relay.log2,
tvm.relay.log10,
tvm.relay.cosh,
tvm.relay.sinh,
tvm.relay.asin,
tvm.relay.acos,
tvm.relay.acosh,
tvm.relay.asinh,
tvm.relay.atanh,
],
}
if relay_op in known_breaks[dtype]:
pytest.xfail(f"{dtype} {relay_op.__name__} not yet supported on Vulkan runtime")
tp = relay.TensorType(shape, dtype)
x = relay.var("x", tp)
g = relay.var("g", tp)
y = relay_op(x) * g
fwd_func = relay.Function([x, g], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data_in = np.random.rand(*shape).astype(dtype)
grad_in = np.random.rand(*shape).astype(dtype)
ref_grad_out = ref_func(data_in, grad_in)
op_res, (op_grad, _) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data_in, grad_in)
np.testing.assert_allclose(op_grad.numpy(), ref_grad_out, rtol=0.01)
class TestBinaryOp:
config = {
"add": (relay.add, lambda x, y: [np.ones_like(x), np.ones_like(y)]),
"subtract": (relay.subtract, lambda x, y: [np.ones_like(x), -np.ones_like(y)]),
"multiply": (relay.multiply, lambda x, y: [y, x]),
"divide": (relay.divide, lambda x, y: [1 / y, -x / (y**2)]),
}
relay_op, ref_func = tvm.testing.parameters(*config.values(), ids=config.keys())
dtype = tvm.testing.parameter("float32", "float64")
shape = tvm.testing.parameter((5, 10, 5))
def test_binary_op(self, target, dev, executor_kind, relay_op, ref_func, shape, dtype):
t = relay.TensorType(shape, dtype=dtype)
x = relay.var("x", t)
y = relay.var("y", t)
z = relay_op(x, y)
x_data = np.random.rand(*shape).astype(t.dtype)
y_data = np.random.rand(*shape).astype(t.dtype)
ref_grad0, ref_grad1 = ref_func(x_data, y_data)
fwd_func = relay.Function([x, y], z)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
op_res, (op_grad0, op_grad1) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(x_data, y_data)
np.testing.assert_allclose(op_grad0.numpy(), ref_grad0, rtol=0.01)
np.testing.assert_allclose(op_grad1.numpy(), ref_grad1, rtol=0.01)
def test_softmax_grad(executor_kind, target, dev):
target = tvm.target.Target(target)
if target.kind.name == "vulkan":
pytest.xfail("Known failure on vulkan")
data = relay.var("data", relay.TensorType((1, 16), "float64"))
fwd_func = relay.Function([data], relay.nn.softmax(data))
check_grad(fwd_func, scale=1, target_devices=[(target, dev)], executor_kind=executor_kind)
def test_log_softmax_grad(executor_kind, target, dev):
target = tvm.target.Target(target)
if target.kind.name == "vulkan":
pytest.xfail("Known failure on vulkan")
data = relay.var("data", relay.TensorType((2, 16), "float64"))
fwd_func = relay.Function([data], relay.nn.log_softmax(data))
check_grad(fwd_func, scale=1, target_devices=[(target, dev)], executor_kind=executor_kind)
class TestBiasAddGrad:
d_shape, b_shape, axis = tvm.testing.parameters(
((1, 16), (16,), 1),
((1, 8, 2, 2), (8,), 1),
((1, 2, 2, 8), (8,), 3),
((4, 8), (8,), 1),
)
def test_bias_add(self, executor_kind, target, dev, d_shape, b_shape, axis):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
bias = relay.var("bias", relay.TensorType(b_shape, "float32"))
fwd_func = relay.Function([data, bias], relay.nn.bias_add(data, bias, axis=axis))
check_grad(fwd_func, target_devices=[(target, dev)], executor_kind=executor_kind)
def test_expand_dims_grad(executor_kind, target, dev):
data = relay.var("data", shape=(2, 3), dtype="float64")
fwd_func = relay.Function([data], relay.expand_dims(data, axis=1, num_newaxis=2))
check_grad(fwd_func, target_devices=[(target, dev)], executor_kind=executor_kind)
def test_concatenate_grad(executor_kind, target, dev):
x = relay.var("x", shape=(2, 2, 5))
y = relay.var("y", shape=(2, 1, 5))
z = relay.var("z", shape=(2, 4, 5))
fwd_func = relay.Function([x, y, z], relay.concatenate([x, y, z], axis=1))
check_grad(fwd_func, target_devices=[(target, dev)], executor_kind=executor_kind)
if __name__ == "__main__":
tvm.testing.main()
| 8,733 | 38.881279 | 96 | py |
tvm | tvm-main/tests/python/relay/test_pass_to_a_normal_form.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import sys
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm import relay
from tvm.relay.analysis import detect_feature
from tvm.relay import op, create_executor, transform
from tvm.relay.prelude import Prelude
from tvm.relay.testing import count
from tvm.relay.analysis import Feature
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def check_eval(expr, expected_result, mod=None, rtol=1e-07):
dev = tvm.device("llvm", 0)
result = create_executor(mod=mod, device=dev, target="llvm").evaluate(expr)
np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
def test_explicit_bound():
x = relay.const(1)
y = op.add(x, x)
z = op.add(y, y)
f = relay.Function([], op.add(z, z))
assert not Feature.fLet in detect_feature(f)
anf = run_opt_pass(f, transform.ToANormalForm())
assert Feature.fLet in detect_feature(anf)
check_eval(f(), 8.0)
check_eval(anf(), 8.0)
# test that the construction order does not matter,
# and is instead ordered by the scope and by post-dfs ordering.
def test_order():
z = relay.const(3)
y = relay.const(2)
x = relay.const(1)
val = x + y * z
check_eval(val, 7.0)
anf = run_opt_pass(val, [transform.ToANormalForm(), transform.InferType()])
a = relay.Var("a", relay.IncompleteType())
b = relay.Var("b", relay.IncompleteType())
c = relay.Var("c", relay.IncompleteType())
d = relay.Var("d", relay.IncompleteType())
e = relay.Var("e", relay.IncompleteType())
expected_output = e
expected_output = relay.Let(e, a + d, expected_output)
expected_output = relay.Let(d, b * c, expected_output)
expected_output = relay.Let(c, z, expected_output)
expected_output = relay.Let(b, y, expected_output)
expected_output = relay.Let(a, x, expected_output)
expected_output = run_opt_pass(expected_output, transform.InferType())
assert tvm.ir.structural_equal(anf, expected_output)
def test_if():
cond = relay.const(True)
x = relay.If(cond, relay.const(2), relay.const(3))
anf = run_opt_pass(x, [transform.ToANormalForm(), transform.InferType()])
a = relay.Var("a", relay.IncompleteType())
b = relay.Var("b", relay.IncompleteType())
c = relay.Var("c", relay.IncompleteType())
d = relay.Var("d", relay.IncompleteType())
true_branch = relay.Let(a, relay.const(2), a)
false_branch = relay.Let(b, relay.const(3), b)
expected_output = relay.If(c, true_branch, false_branch)
expected_output = relay.Let(d, expected_output, d)
expected_output = relay.Let(c, cond, expected_output)
expected_output = run_opt_pass(expected_output, transform.InferType())
assert tvm.ir.structural_equal(anf, expected_output)
def test_let_as_subexpr():
def on_cpu(x):
return relay.annotation.on_device(x, tvm.device("cpu"), constrain_result=True)
x = relay.Var("x", relay.IncompleteType())
c = relay.const(1)
l = relay.Let(x, on_cpu(c + c), x)
body = l * l
anf = run_opt_pass(body, [transform.ToANormalForm(), transform.InferType()])
v0 = relay.Var("v0", relay.IncompleteType())
v1 = relay.Var("v1", relay.IncompleteType())
v2 = relay.Var("v2", relay.IncompleteType())
expected_output = relay.Let(
v0,
on_cpu(c),
relay.Let(
x,
on_cpu(v0 + v0),
relay.Let(v1, x, relay.Let(v2, v1 * v1, v2)),
),
)
expected_output = run_opt_pass(expected_output, transform.InferType())
tvm.ir.assert_structural_equal(anf, expected_output)
# make sure we dont infinite loop.
# it is too large so we wont check for the exact program.
def test_recursion():
"""
Program:
let f(n: i32) -> i32 = {
m = (n * 2)
if (n == 0) {
return m;
} else {
return m + f(n - 1);
}
}
f(5);
"""
mod = tvm.IRModule()
i64 = relay.TensorType((), "int64")
f = relay.GlobalVar("f")
n = relay.Var("n", i64)
m = n * relay.const(2, "int64")
funcbody = relay.If(
relay.equal(n, relay.const(0, "int64")), m, m + f(n - relay.const(1, "int64"))
)
value = relay.Function([n], funcbody, i64, [])
mod[f] = value
check_eval(f(relay.const(5, "int64")), 30.0, mod=mod)
old_f = mod[f]
mod = transform.ToANormalForm()(mod)
f = mod[f]
check_eval(f(relay.const(5, "int64")), 30.0, mod=mod)
def test_ref():
i = relay.Var("i")
iv = relay.Var("iv")
u = relay.Var("u")
uv = relay.Var("uv")
body = relay.add(iv, uv)
body = relay.Let(uv, relay.RefRead(i), body)
body = relay.Let(u, relay.RefWrite(i, relay.const(2)), body)
body = relay.Let(iv, relay.RefRead(i), body)
body = relay.Let(i, relay.RefCreate(relay.const(1)), body)
check_eval(body, 3)
opt_body = run_opt_pass(body, transform.ToANormalForm())
check_eval(opt_body, 3)
def test_nat_add():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
add = p.mod.get_global_var("nat_add")
dev = tvm.device("llvm", 0)
intrp = create_executor(mod=mod, device=dev, target="llvm")
# CAUTION: Following calls to intrp.evaluate(...) will re-prepare the prelude.
assert mod[add].checked_type == relay.FuncType([nat(), nat()], nat())
assert count(p, intrp.evaluate(add(s(z()), s(z())))) == 2
expr = add(s(z()), s(z()))
f = relay.GlobalVar("f")
mod[f] = relay.Function([], expr)
mod = transform.ToANormalForm()(mod)
expr = mod["f"]
assert count(p, intrp.evaluate(expr.body)) == 2
assert Feature.fLet in detect_feature(mod[add])
def test_let():
x = relay.Var("x")
y = relay.Var("y")
d = relay.const(4.0, "float32")
body = relay.Let(y, x, x + y)
body = relay.Let(x, d, body)
check_eval(body, 8)
opt_body = run_opt_pass(body, transform.ToANormalForm())
check_eval(opt_body, 8)
def test_function():
t = relay.TensorType((), "float32")
x = relay.Var("x", t)
f = relay.Function([x], x + x)
d = relay.const(4.0, "float32")
anf_f = run_opt_pass(f, transform.ToANormalForm())
assert isinstance(anf_f, relay.Function)
check_eval(f(d), 8)
check_eval(anf_f(d), 8)
def test_gradient_if():
x = relay.var("a", shape=(1, 16))
y = relay.var("y", shape=(1, 16))
cond = relay.var("cond", shape=(), dtype="uint1")
net = relay.If(cond, x, x)
net = relay.add(x, net)
net = relay.Function([cond, x, y], net)
mod = tvm.IRModule.from_expr(net)
mod = relay.transform.ToANormalForm()(mod)
mod = relay.transform.InferType()(mod)
mod["main"] = relay.transform.gradient(mod["main"], mode="higher_order")
mod = relay.transform.ToANormalForm()(mod)
if __name__ == "__main__":
tvm.testing.main()
| 7,919 | 32.991416 | 86 | py |
tvm | tvm-main/tests/python/relay/test_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tvm import TVMError
from tvm.relay.backend import Runtime
def test_create():
runtime = Runtime("cpp")
assert str(runtime) == "cpp"
def test_create_runtime_with_options():
runtime = Runtime("crt", {"system-lib": True})
assert str(runtime) == "crt"
assert runtime["system-lib"]
def test_attr_check():
runtime = Runtime("crt", {"system-lib": True})
assert "woof" not in runtime
assert "system-lib" in runtime
def test_create_runtime_not_found():
with pytest.raises(TVMError, match='Runtime "woof" is not defined'):
Runtime("woof", {})
def test_create_runtime_attr_not_found():
with pytest.raises(TVMError, match='Attribute "woof" is not available on this Runtime'):
Runtime("crt", {"woof": "bark"})
def test_create_runtime_attr_type_incorrect():
with pytest.raises(
TVMError,
match='Attribute "system-lib" should have type "IntImm"'
' but instead found "runtime.String"',
):
Runtime("crt", {"system-lib": "woof"})
def test_list_runtimes():
assert "crt" in Runtime.list_registered()
@pytest.mark.parametrize("runtime", [Runtime("crt"), "crt"])
def test_list_runtime_options(runtime):
aot_options = Runtime.list_registered_options(runtime)
assert "system-lib" in aot_options
assert aot_options["system-lib"] == "IntImm"
def test_list_runtime_options_not_found():
with pytest.raises(TVMError, match='Runtime "woof" is not defined'):
Runtime.list_registered_options("woof")
| 2,315 | 30.297297 | 92 | py |
tvm | tvm-main/tests/python/relay/test_name_supply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import relay
from tvm.ir import GlobalVar, structural_equal
from tvm.ir.supply import NameSupply
from tvm.ir.supply import GlobalVarSupply
def test_name_supply():
name_supply = NameSupply("prefix")
name_supply.reserve_name("test")
assert name_supply.contains_name("test")
assert name_supply.fresh_name("test") == "prefix_test_1"
assert name_supply.contains_name("test_1")
assert not name_supply.contains_name("test_1", False)
assert not name_supply.contains_name("test_2")
def test_global_var_supply_from_none():
var_supply = GlobalVarSupply()
global_var = GlobalVar("test")
var_supply.reserve_global(global_var)
assert structural_equal(var_supply.unique_global_for("test"), global_var)
assert not structural_equal(var_supply.fresh_global("test"), global_var)
def test_global_var_supply_from_name_supply():
name_supply = NameSupply("prefix")
var_supply = GlobalVarSupply(name_supply)
global_var = GlobalVar("test")
var_supply.reserve_global(global_var)
assert structural_equal(var_supply.unique_global_for("test", False), global_var)
assert not structural_equal(var_supply.unique_global_for("test"), global_var)
def test_global_var_supply_from_ir_mod():
x = relay.var("x")
y = relay.var("y")
mod = tvm.IRModule()
global_var = GlobalVar("test")
mod[global_var] = relay.Function([x, y], relay.add(x, y))
var_supply = GlobalVarSupply(mod)
second_global_var = var_supply.fresh_global("test", False)
assert structural_equal(var_supply.unique_global_for("test", False), global_var)
assert not structural_equal(var_supply.unique_global_for("test"), global_var)
assert not structural_equal(second_global_var, global_var)
if __name__ == "__main__":
tvm.testing.main()
| 2,615 | 34.835616 | 84 | py |
tvm | tvm-main/tests/python/relay/test_name_transforms.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License" you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tvm import TVMError
from tvm.relay.backend.name_transforms import (
to_c_function_style,
to_c_variable_style,
to_c_constant_style,
prefix_name,
prefix_generated_name,
)
from tvm.runtime.name_transforms import sanitize_name
def test_to_c_function_style():
assert to_c_function_style("TVM_Woof") == "TVMWoof"
assert to_c_function_style("TVM_woof") == "TVMWoof"
assert to_c_function_style("TVM_woof_woof") == "TVMWoofWoof"
assert to_c_function_style("TVMGen_woof_woof") == "TVMGenWoofWoof"
# Incorrect prefix
with pytest.raises(TVMError, match="Function not TVM prefixed"):
to_c_function_style("Cake_Bakery")
with pytest.raises(TVMError, match="Function name is empty"):
to_c_function_style("")
def test_to_c_variable_style():
assert to_c_variable_style("TVM_Woof") == "tvm_woof"
assert to_c_variable_style("TVM_woof") == "tvm_woof"
assert to_c_variable_style("TVM_woof_Woof") == "tvm_woof_woof"
# Incorrect prefix
with pytest.raises(TVMError, match="Variable not TVM prefixed"):
to_c_variable_style("Cake_Bakery")
with pytest.raises(TVMError, match="Variable name is empty"):
to_c_variable_style("")
def test_to_c_constant_style():
assert to_c_constant_style("TVM_Woof") == "TVM_WOOF"
assert to_c_constant_style("TVM_woof") == "TVM_WOOF"
assert to_c_constant_style("TVM_woof_Woof") == "TVM_WOOF_WOOF"
with pytest.raises(TVMError, match="Constant not TVM prefixed"):
to_c_constant_style("Cake_Bakery")
with pytest.raises(TVMError):
to_c_constant_style("")
def test_prefix_name():
assert prefix_name("Woof") == "TVM_Woof"
assert prefix_name(["Woof"]) == "TVM_Woof"
assert prefix_name(["woof"]) == "TVM_woof"
assert prefix_name(["woof", "moo"]) == "TVM_woof_moo"
with pytest.raises(TVMError, match="Name is empty"):
prefix_name("")
with pytest.raises(TVMError, match="Name segments empty"):
prefix_name([])
with pytest.raises(TVMError, match="Name segment is empty"):
prefix_name([""])
def test_prefix_generated_name():
assert prefix_generated_name("Woof") == "TVMGen_Woof"
assert prefix_generated_name(["Woof"]) == "TVMGen_Woof"
assert prefix_generated_name(["Woof"]) == "TVMGen_Woof"
assert prefix_generated_name(["woof"]) == "TVMGen_woof"
assert prefix_generated_name(["woof", "moo"]) == "TVMGen_woof_moo"
with pytest.raises(TVMError, match="Name is empty"):
prefix_generated_name("")
with pytest.raises(TVMError, match="Name segments empty"):
prefix_generated_name([])
with pytest.raises(TVMError, match="Name segment is empty"):
prefix_generated_name([""])
def test_sanitize_name():
assert sanitize_name("+_+ ") == "____"
assert sanitize_name("input+") == "input_"
assert sanitize_name("input-") == "input_"
assert sanitize_name("input++") == "input__"
assert sanitize_name("woof:1") == "woof_1"
with pytest.raises(TVMError, match="Name is empty"):
sanitize_name("")
def test_combined_logic():
assert (
to_c_function_style(prefix_name(["Device", "target", "Invoke"])) == "TVMDeviceTargetInvoke"
)
assert to_c_function_style(prefix_generated_name(["model", "Run"])) == "TVMGenModelRun"
assert to_c_variable_style(prefix_name(["Device", "target", "t"])) == "tvm_device_target_t"
assert (
to_c_variable_style(prefix_generated_name(["model", "Devices"])) == "tvmgen_model_devices"
)
| 4,322 | 36.591304 | 99 | py |
tvm | tvm-main/tests/python/relay/test_pass_fold_constant.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.backend import Executor
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.testing import run_infer_type, create_workload
def annot_expr(e):
"""Returns e wrapped with an on_device annotation."""
return relay.op.annotation.on_device(e, tvm.cpu(), constrain_result=True)
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = relay.transform.InferType()(mod)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_concatenate_const():
def before():
data = tvm.nd.array(np.array([1.0, 2.0, 3.0]))
const = relay.const(data)
concat = relay.op.concatenate([const, const], axis=0)
func = relay.Function([], concat)
return func
def expected():
data = tvm.nd.array(np.array([1.0, 2.0, 3.0, 1.0, 2.0, 3.0]))
const = relay.const(data)
func = relay.Function([], const)
return func
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(zz, zexpected)
def test_fold_const():
c_data = np.array([1, 2, 3]).astype("float32")
t = relay.TensorType([1, 2, 3], "float32")
def before():
c = relay.const(c_data)
x = relay.var("x", t)
y = relay.add(c, c)
y = relay.multiply(y, relay.const(2, "float32"))
y = relay.add(x, y)
z = relay.add(y, c)
return relay.Function([x], z)
def expected():
x = relay.var("x", t)
c_folded = (c_data + c_data) * 2
y = relay.add(x, relay.const(c_folded))
z = relay.add(y, relay.const(c_data))
return relay.Function([x], z)
# the fold constant should work on any context.
with tvm.target.Target("cuda"):
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_const_with_on_device():
"""Make sure on_device annotations don't get in the way of constant folding"""
c_data = np.array([1, 2, 3]).astype("float32")
t = relay.TensorType([1, 2, 3], "float32")
def before():
c = relay.const(c_data)
x = relay.var("x", t)
x.virtual_device_ = tvm.cpu()
y = relay.add(c, c)
y = relay.multiply(y, relay.const(2, "float32"))
y = relay.add(x, y)
z = relay.add(y, c)
f = relay.Function([x], z)
f.virtual_device_ = tvm.cpu()
return f
def expected():
x = relay.var("x", t)
x.virtual_device_ = tvm.cpu()
c_folded = (c_data + c_data) * 2
y = relay.add(x, relay.const(c_folded))
z = relay.add(y, relay.const(c_data))
f = relay.Function([x], z)
f.virtual_device_ = tvm.cpu()
return f
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_let():
c_data = np.array(1).astype("float32")
t = relay.TensorType([1], "float32")
def before():
sb = relay.ScopeBuilder()
x = relay.var("x", t)
t1 = sb.let("t1", relay.const(c_data))
t2 = sb.let("t2", relay.add(t1, t1))
t3 = sb.let("t3", relay.add(t2, x))
sb.ret(t3)
return relay.Function([x], sb.get())
def expected():
sb = relay.ScopeBuilder()
x = relay.var("x", t)
c_folded = c_data + c_data
t3 = sb.let("t3", relay.add(relay.const(c_folded), x))
sb.ret(t3)
return relay.Function([x], sb.get())
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_let_with_on_device():
"""Make sure on_device annotations don't get in the way of constant folding,
and inlined constants bring their annotations with them."""
c_data = np.array(1).astype("float32")
t = relay.TensorType([1], "float32")
def before():
sb = relay.ScopeBuilder()
x = relay.var("x", t)
x.virtual_device_ = tvm.cpu()
t1 = sb.let("t1", annot_expr(relay.const(c_data)))
t2 = sb.let("t2", annot_expr(relay.add(t1, t1)))
t3 = sb.let("t3", annot_expr(relay.add(t2, x)))
sb.ret(t3)
f = relay.Function([x], sb.get())
f.virtual_device_ = tvm.cpu()
return f
def expected():
sb = relay.ScopeBuilder()
x = relay.var("x", t)
x.virtual_device_ = tvm.cpu()
c_folded = c_data + c_data
t3 = sb.let("t3", annot_expr(relay.add(annot_expr(relay.const(c_folded)), x)))
sb.ret(t3)
f = relay.Function([x], sb.get())
f.virtual_device_ = tvm.cpu()
return f
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_tuple():
c_data = np.array(1).astype("float32")
t = relay.TensorType([1], "float32")
def before():
c = relay.const(c_data)
x = relay.var("x", t)
y = relay.Tuple([x, c])
z = relay.add(y[1], c)
z = relay.add(z, y[0])
return relay.Function([x], z)
def expected():
c = relay.const(c_data + c_data)
x = relay.var("x", t)
z = relay.add(c, x)
return relay.Function([x], z)
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_concat():
c_data = np.array([[1, 2, 3]]).astype("float32")
def before():
a = relay.const(c_data)
b = relay.const(c_data)
y = relay.concatenate((a, b), axis=0)
return relay.Function([], y)
def expected():
y_data = np.concatenate((c_data, c_data), axis=0)
y = relay.const(y_data)
return relay.Function([], y)
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_if():
cond_data = np.array(1).astype("bool")
x_data = np.array([[1, 2, 3]]).astype("float32")
def before():
a = relay.const(cond_data)
x = relay.const(x_data)
y = relay.const(x_data)
iff = relay.If(a, x + y, x - y)
return relay.Function([], iff)
def expected():
y_data = x_data + x_data
y = relay.const(y_data)
return relay.Function([], y)
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
cond_data = np.array(0).astype("bool")
def before():
a = relay.const(cond_data)
x = relay.const(x_data)
y = relay.const(x_data)
iff = relay.If(a, x + y, x - y)
return relay.Function([], iff)
def expected():
y_data = x_data - x_data
y = relay.const(y_data)
return relay.Function([], y)
zz = run_opt_pass(before(), transform.FoldConstant())
zexpected = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_shape_of():
c_shape = (8, 9, 10)
def before(dtype):
x = relay.var("x", shape=c_shape, dtype="float32")
y = relay.var("y", shape=c_shape, dtype="float32")
z = relay.shape_of(x + y, dtype)
return relay.Function([x, y], z)
def expected(dtype):
x = relay.var("x", shape=c_shape, dtype="float32")
y = relay.var("y", shape=c_shape, dtype="float32")
z = relay.const(np.array(c_shape).astype(dtype), dtype=dtype)
func = relay.Function([x, y], z)
return func
for dtype in ["int32", "float32"]:
zz = run_opt_pass(before(dtype), transform.FoldConstant())
zexpected = run_opt_pass(expected(dtype), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_ndarray_size():
c_shape = (8, 9, 10)
def before(dtype):
x = relay.var("x", shape=c_shape, dtype="float32")
y = relay.var("y", shape=c_shape, dtype="float32")
z = relay.ndarray_size(x + y, dtype)
return relay.Function([x, y], z)
def expected(dtype):
x = relay.var("x", shape=c_shape, dtype="float32")
y = relay.var("y", shape=c_shape, dtype="float32")
z = relay.const(np.size(np.zeros(c_shape)), dtype=dtype)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
return mod["main"]
for dtype in ["int32", "float32"]:
zz = run_opt_pass(before(dtype), transform.FoldConstant())
zexpected = run_opt_pass(expected(dtype), transform.InferType())
tvm.ir.assert_structural_equal(zz, zexpected)
def test_fold_batch_norm():
def expected():
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.const(np.zeros((16, 3, 3, 3)))
bias = relay.const(np.zeros((16, 1, 1)))
conv = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
add = relay.add(conv, bias)
return relay.Function(relay.analysis.free_vars(add), add)
remove_bn_pass = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.SimplifyInference(),
relay.transform.FoldConstant(),
relay.transform.FoldScaleAxis(),
]
)
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.var("weight")
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
conv = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
bn_output = relay.nn.batch_norm(conv, bn_gamma, bn_beta, bn_mmean, bn_mvar)
def initializer(_, param):
param = np.zeros(param.shape)
mod, params = create_workload(bn_output[0], initializer)
mod["main"] = bind_params_by_name(mod["main"], params)
with tvm.transform.PassContext(opt_level=3):
mod = remove_bn_pass(mod)
expect = run_infer_type(expected())
tvm.ir.assert_structural_equal(mod["main"], expect)
def test_fold_dropout():
def before():
# A constant graph to fire fold constant
data = relay.const(np.arange(10).astype(np.float32))
dropout = relay.nn.dropout(data)
add = dropout + relay.const(1.0)
return relay.Function(relay.analysis.free_vars(add), add)
passes = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
]
)
before_mod = tvm.IRModule.from_expr(before())
with tvm.transform.PassContext(opt_level=3):
after_mod = passes(before_mod)
tvm.ir.assert_structural_equal(run_infer_type(before_mod["main"]), after_mod["main"])
def test_fold_qnn_const():
def before():
# QNN op with 2 constant arguments.
add = relay.qnn.op.add(
relay.const(np.ones((2, 3), dtype="uint8"), dtype="uint8"),
relay.const(np.ones((2, 3), dtype="uint8"), dtype="uint8"),
lhs_scale=relay.const(2.0),
lhs_zero_point=relay.const(0),
rhs_scale=relay.const(2.0),
rhs_zero_point=relay.const(0),
output_scale=relay.const(1.0),
output_zero_point=relay.const(0),
)
# QNN op with 1 constant and 1 non-constant arguments.
a = relay.var("a", shape=[2, 3], dtype="float32")
dense = relay.qnn.op.dense(
relay.qnn.op.quantize(a, relay.const(1.0), relay.const(0)),
add,
input_zero_point=relay.const(0),
kernel_zero_point=relay.const(0),
input_scale=relay.const(2.0),
kernel_scale=relay.const(2.0),
units=None,
)
# QNN op with 2 non-constant arguments.
b = relay.var("b", shape=[2], dtype="float32")
bias = relay.qnn.op.add(
dense,
relay.qnn.op.quantize(b, relay.const(1.0), relay.const(0), out_dtype="int32"),
lhs_scale=relay.const(2.0),
lhs_zero_point=relay.const(0),
rhs_scale=relay.const(2.0),
rhs_zero_point=relay.const(0),
output_scale=relay.const(1.0),
output_zero_point=relay.const(0),
)
return relay.Function([a, b], bias)
def expected():
a = relay.var("a", shape=[2, 3], dtype="float32")
dense = relay.qnn.op.dense(
relay.qnn.op.quantize(a, relay.const(1.0), relay.const(0)),
relay.const(np.array([[4, 4, 4], [4, 4, 4]], dtype="uint8"), dtype="uint8"),
input_zero_point=relay.const(0),
kernel_zero_point=relay.const(0),
input_scale=relay.const(2.0),
kernel_scale=relay.const(2.0),
units=None,
)
b = relay.var("b", shape=[2], dtype="float32")
bias = relay.qnn.op.add(
dense,
relay.qnn.op.quantize(b, relay.const(1.0), relay.const(0), out_dtype="int32"),
lhs_scale=relay.const(2.0),
lhs_zero_point=relay.const(0),
rhs_scale=relay.const(2.0),
rhs_zero_point=relay.const(0),
output_scale=relay.const(1.0),
output_zero_point=relay.const(0),
)
return relay.Function([a, b], bias)
# Nothing changed after applying FoldConstant
a = run_opt_pass(before(), transform.FoldConstant())
b = run_opt_pass(before(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
# Fold QNN constants
a = run_opt_pass(before(), transform.FoldConstant(fold_qnn=True))
b = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
def test_fold_quantize():
t = relay.TensorType([1, 2, 3], "int8")
def before():
data = tvm.nd.array(np.array([1.0, 2.0, 3.0], dtype="float32"))
const_fp = relay.const(data, dtype="float32")
const_i8 = relay.qnn.op.quantize(
const_fp, output_scale=relay.const(0.5), output_zero_point=relay.const(0)
)
x = relay.var("x", t)
sub = relay.op.subtract(x, const_i8)
func = relay.Function([x], sub)
return func
def expected():
data = tvm.nd.array(np.array([2, 4, 6], dtype="int8"))
const_i8 = relay.const(data, dtype="int8")
x = relay.var("x", t)
sub = relay.op.subtract(x, const_i8)
func = relay.Function([x], sub)
return func
# Nothing changed after applying FoldConstant
a = run_opt_pass(before(), transform.FoldConstant())
b = run_opt_pass(before(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
# Fold QNN constants
a = run_opt_pass(before(), transform.FoldConstant(fold_qnn=True))
b = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
def test_fold_qnn_conv2d_qnn_mul():
def before():
dtype = "uint8"
op0 = relay.qnn.op.conv2d(
relay.const(np.ones((1, 1, 2, 2), dtype=dtype), dtype=dtype),
relay.const(np.ones((1, 1, 2, 2), dtype=dtype), dtype=dtype),
input_zero_point=relay.const(0, "int32"),
kernel_zero_point=relay.const(0, "int32"),
input_scale=relay.const(1.0, "float32"),
kernel_scale=relay.const(1.0, "float32"),
kernel_size=(2, 2),
channels=1,
)
op = relay.qnn.op.mul(
op0,
relay.const(np.array([10], dtype="int32"), dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
)
func = relay.Function([], op)
return func
def expected():
data = relay.const(np.array([[[[40]]]], dtype="int32"), dtype="int32")
func = relay.Function([], data)
return func
# Nothing changed after applying FoldConstant
a = run_opt_pass(before(), transform.FoldConstant())
b = run_opt_pass(before(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
# Fold QNN constants
a = run_opt_pass(before(), transform.FoldConstant(fold_qnn=True))
b = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
def test_fold_requantize():
def before():
data = tvm.nd.array(np.array([1, 2, 3], dtype="int8"))
const_i8 = relay.const(data, dtype="int8")
op = relay.qnn.op.requantize(
const_i8,
input_scale=relay.const(2.0, dtype="float32"),
input_zero_point=relay.const(1, dtype="int32"),
output_scale=relay.const(1.0, dtype="float32"),
output_zero_point=relay.const(1, dtype="int32"),
)
x = relay.var("x", relay.TensorType([3], "int8"))
add = relay.op.add(op, x)
func = relay.Function([x], add)
return func
def expected():
data = tvm.nd.array(np.array([1, 3, 5], dtype="int8"))
const_i8 = relay.const(data, dtype="int8")
x = relay.var("x", relay.TensorType([3], "int8"))
add = relay.op.add(const_i8, x)
func = relay.Function([x], add)
return func
# Nothing changed after applying FoldConstant
a = run_opt_pass(before(), transform.FoldConstant())
b = run_opt_pass(before(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
# Fold QNN constants
a = run_opt_pass(before(), transform.FoldConstant(fold_qnn=True))
b = run_opt_pass(expected(), transform.InferType())
tvm.ir.assert_structural_equal(a, b)
def test_pass_link_params():
"""
This test checks ensures that proper executor is passed to interpreter instance
The test will fail if FoldConstant does not override the executor due to "int8"
is not supported in ScheduleBuilder
"""
def expr():
z = relay.const(10, dtype="int8")
return relay.cast(z, dtype="int32")
mod = tvm.IRModule.from_expr(expr())
mod = tvm.relay.transform.InferType()(mod)
# Add executor with link-params
mod = mod.with_attr("executor", Executor("aot", {"link-params": True}))
mod = tvm.relay.transform.FoldConstant()(mod)
if __name__ == "__main__":
tvm.testing.main()
| 19,895 | 33.481802 | 90 | py |
tvm | tvm-main/tests/python/relay/test_pass_check_kind.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay.analysis import check_kind
import pytest
def test_typevar_kind():
# returns the same kind
tp1 = relay.TypeVar("tp1", relay.TypeKind.Type)
tp2 = relay.TypeVar("tp2", relay.TypeKind.ShapeVar)
tp3 = relay.TypeVar("tp3", relay.TypeKind.Constraint)
assert check_kind(tp1) == relay.TypeKind.Type
assert check_kind(tp2) == relay.TypeKind.ShapeVar
assert check_kind(tp3) == relay.TypeKind.Constraint
def test_tuple_kind():
# only contain type kinds
tp = relay.TypeVar("tp", relay.TypeKind.Type)
tt = relay.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
tf = relay.FuncType(
tvm.runtime.convert([]), tt, tvm.runtime.convert([]), tvm.runtime.convert([])
)
fields = tvm.runtime.convert([tp, tf, tt])
tup_ty = relay.TupleType(fields)
assert check_kind(tup_ty) == relay.TypeKind.Type
def test_func_kind():
# only contain type kinds
tp1 = relay.TypeVar("tp1", relay.TypeKind.Type)
tp2 = relay.TypeVar("tp2", relay.TypeKind.Type)
shape = tvm.runtime.convert([1, 2, 3])
dtype = "float32"
tensor_type = relay.TensorType(shape, dtype)
tr = relay.TypeRelation(None, tvm.runtime.convert([tensor_type, tp1]), 1, None)
type_params = tvm.runtime.convert([tp1, tp2])
type_constraints = tvm.runtime.convert([tr])
arg_types = tvm.runtime.convert([tp1, tensor_type])
ret_type = relay.TupleType(tvm.runtime.convert([tp2, tensor_type]))
tf = relay.FuncType(arg_types, ret_type, type_params, type_constraints)
assert check_kind(tf) == relay.TypeKind.Type
def test_ref_kind():
# only contain type kinds
tt = relay.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
ft = relay.FuncType(
tvm.runtime.convert([]), tt, tvm.runtime.convert([]), tvm.runtime.convert([])
)
rt1 = relay.RefType(tt)
assert check_kind(rt1) == relay.TypeKind.Type
rt2 = relay.RefType(ft)
assert check_kind(rt2) == relay.TypeKind.Type
rt3 = relay.RefType(relay.TupleType([rt1, rt2]))
assert check_kind(rt3) == relay.TypeKind.Type
def test_relation_kind():
# only have type kinds for arguments
tp = relay.TypeVar("tp", relay.TypeKind.Type)
tt = relay.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
tf = relay.FuncType(
tvm.runtime.convert([]), tt, tvm.runtime.convert([]), tvm.runtime.convert([])
)
args = tvm.runtime.convert([tf, tt, tp])
tr = relay.TypeRelation(None, args, 2, None)
assert check_kind(tr) == relay.TypeKind.Constraint
def test_global_typevar_kind():
v1 = relay.GlobalTypeVar("gtv1", relay.TypeKind.AdtHandle)
v2 = relay.GlobalTypeVar("gtv2", relay.TypeKind.Type)
assert check_kind(v1) == relay.TypeKind.AdtHandle
assert check_kind(v2) == relay.TypeKind.Type
def test_typecall_kind():
gtv = relay.GlobalTypeVar("gtv")
mod = tvm.IRModule()
data = relay.TypeData(gtv, [], [])
mod[gtv] = data
empty_call = relay.TypeCall(gtv, [])
assert check_kind(empty_call, mod) == relay.TypeKind.Type
new_mod = tvm.IRModule()
tv = relay.TypeVar("tv")
new_data = relay.TypeData(gtv, [tv], [])
new_mod[gtv] = new_data
call = relay.TypeCall(gtv, [relay.TupleType([])])
assert check_kind(call, new_mod) == relay.TypeKind.Type
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_tuple_kind():
tp1 = relay.TypeVar("tp1", relay.TypeKind.ShapeVar)
tp2 = relay.TypeVar("tp2", relay.TypeKind.BaseType)
tp3 = relay.TypeVar("tp3", relay.TypeKind.Constraint)
fields = tvm.runtime.convert([tp1, tp2, tp3])
tup_ty = relay.TupleType(fields)
check_kind(tup_ty)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_func_kind():
tp1 = relay.TypeVar("tp1", relay.TypeKind.ShapeVar)
tp2 = relay.TypeVar("tp2", relay.TypeKind.BaseType)
tp3 = relay.TypeVar("tp3", relay.TypeKind.Constraint)
type_params = tvm.runtime.convert([tp1, tp2, tp3])
type_constraints = tvm.runtime.convert([])
arg_types = tvm.runtime.convert([tp1, tp2])
ret_type = tp3
tf = relay.FuncType(arg_types, ret_type, type_params, type_constraints)
check_kind(tf)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_ref_kind():
tp = relay.TypeVar("tp", relay.TypeKind.ShapeVar)
rt = relay.RefType(tp)
check_kind(rt)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_relation_kind():
tp1 = relay.TypeVar("tp1", relay.TypeKind.ShapeVar)
tp2 = relay.TypeVar("tp2", relay.TypeKind.BaseType)
tp3 = relay.TypeVar("tp3", relay.TypeKind.Constraint)
args = tvm.runtime.convert([tp1, tp2, tp3])
func = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
tr = relay.TypeRelation(func, args, 2, None)
check_kind(tr)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_typecall_invalid_callee():
# global type var must be an ADT handle
gtv = relay.GlobalTypeVar("v1", relay.TypeKind.Type)
check_kind(relay.TypeCall(gtv, []))
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_typecall_invalid_args():
# args must all be type kind
mod = tvm.IRModule()
gtv = relay.GlobalTypeVar("v1")
data = relay.TypeData(gtv, [], [])
mod[gtv] = data
check_kind(relay.TypeCall(gtv, [data]))
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_typecall_invalid_num_args():
mod = tvm.IRModule()
gtv = relay.GlobalTypeVar("v1")
tv = relay.TypeVar("tv")
data = relay.TypeData(gtv, [tv], [])
mod[gtv] = data
check_kind(relay.TypeCall(gtv, []))
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_func_with_invalid_ret_type():
tp1 = relay.TypeVar("tp1", relay.TypeKind.Type)
tp2 = relay.TypeVar("tp2", relay.TypeKind.ShapeVar)
tf = relay.FuncType(
tvm.runtime.convert([tp1]), tp2, tvm.runtime.convert([tp1, tp2]), tvm.runtime.convert([])
)
check_kind(tf)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_func_with_invalid_arg_types():
tp1 = relay.TypeVar("tp1", relay.TypeKind.ShapeVar)
tp2 = relay.TypeVar("tp2", relay.TypeKind.Type)
tf = relay.FuncType(
tvm.runtime.convert([tp1]), tp2, tvm.runtime.convert([tp1, tp2]), tvm.runtime.convert([])
)
check_kind(tf)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_func_with_invalid_tuple():
tp1 = relay.TypeVar("tp1", relay.TypeKind.ShapeVar)
ret_type = relay.TupleType(tvm.runtime.convert([tp1, tp1, tp1]))
tf = relay.FuncType(
tvm.runtime.convert([]), ret_type, tvm.runtime.convert([tp1]), tvm.runtime.convert([])
)
check_kind(tf)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_func_with_invalid_relation():
tp1 = relay.TypeVar("tp1", relay.TypeKind.Type)
tp2 = relay.TypeVar("tp2", relay.TypeKind.ShapeVar)
tp3 = relay.TypeVar("tp3", relay.TypeKind.Constraint)
func = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Identity")
tr = relay.TypeRelation(func, tvm.runtime.convert([tp2, tp3]), 1, None)
tf = relay.FuncType(
tvm.runtime.convert([tp1]),
tp1,
tvm.runtime.convert([tp1, tp2, tp3]),
tvm.runtime.convert([tr]),
)
check_kind(tf)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_tuple_with_invalid_func():
tensor_type = relay.TensorType(tvm.runtime.convert([1, 2, 3]), "float32")
tp1 = relay.TypeVar("tp1", relay.TypeKind.ShapeVar)
tf = relay.FuncType(
tvm.runtime.convert([]), tp1, tvm.runtime.convert([tp1]), tvm.runtime.convert([])
)
tup_ty = relay.TupleType(tvm.runtime.convert([tensor_type, tf]))
check_kind(tup_ty)
if __name__ == "__main__":
test_tuple_kind()
test_func_kind()
test_ref_kind()
test_relation_kind()
test_global_typevar_kind()
test_typecall_kind()
test_invalid_tuple_kind()
test_invalid_func_kind()
test_invalid_ref_kind()
test_invalid_relation_kind()
test_typecall_invalid_callee()
test_typecall_invalid_args()
test_typecall_invalid_num_args()
test_func_with_invalid_ret_type()
test_func_with_invalid_arg_types()
test_func_with_invalid_tuple()
test_func_with_invalid_relation()
test_tuple_with_invalid_func()
| 9,035 | 31.503597 | 97 | py |
tvm | tvm-main/tests/python/relay/test_pass_gradient.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.relay import GlobalVar
from tvm.relay.analysis import free_vars, free_type_vars
from tvm.relay import create_executor, transform
from tvm.relay.transform import gradient
from tvm.relay.prelude import Prelude
from tvm.relay.testing import (
make_nat_expr,
run_infer_type,
check_grad,
rand,
count_ops,
)
import tvm.relay.op as op
def test_fo_id():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func, mode="first_order"))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x = rand(dtype, *shape)
forward, (grad,) = create_executor().evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.numpy(), x.numpy())
tvm.testing.assert_allclose(grad.numpy(), np.ones_like(x.numpy()))
def test_id():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x = rand(dtype, *shape)
forward, (grad,) = create_executor().evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.numpy(), x.numpy())
tvm.testing.assert_allclose(grad.numpy(), np.ones_like(x.numpy()))
def test_relu():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], op.nn.relu(x))
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
# gradient will implicitly check that no graph appear in result
def test_add():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x + x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x = rand(dtype, *shape)
forward, (grad,) = create_executor().evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.numpy(), 2 * x.numpy())
tvm.testing.assert_allclose(grad.numpy(), 2 * np.ones_like(x.numpy()))
def test_check_grad():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
func = relay.Function([x, y], x + y)
check_grad(func)
def test_temp_add():
scope = relay.ScopeBuilder()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = scope.let("y", x + x)
scope.ret(y + y)
func = relay.Function([x], scope.get())
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x = rand(dtype, *shape)
forward, (grad,) = create_executor().evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.numpy(), 4 * x.numpy())
tvm.testing.assert_allclose(grad.numpy(), 4 * np.ones_like(x.numpy()))
def test_sub():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x - x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x = rand(dtype, *shape)
forward, (grad,) = create_executor().evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.numpy(), np.zeros_like(x.numpy()))
tvm.testing.assert_allclose(grad.numpy(), np.zeros_like(x.numpy()))
def test_broadcast_add():
shape1 = (3, 4, 1)
shape2 = (1, 5)
dtype = "float32"
x_nd = rand(dtype, *shape1)
y_nd = rand(dtype, *shape2)
x_np = x_nd.numpy()
y_np = y_nd.numpy()
expected_forward = x_np + y_np
t1 = relay.TensorType(shape1, dtype)
t2 = relay.TensorType(shape2, dtype)
x = relay.var("x", t1)
y = relay.var("y", t2)
func = relay.Function([x, y], x + y)
func = run_infer_type(func)
full_func = run_infer_type(gradient(func))
assert full_func.checked_type == relay.FuncType(
[t1, t2],
relay.TupleType(
[relay.TensorType(expected_forward.shape, dtype), relay.TupleType([t1, t2])]
),
)
forward, (grad_x, grad_y) = create_executor().evaluate(full_func)(x_nd, y_nd)
tvm.testing.assert_allclose(forward.numpy(), expected_forward)
tvm.testing.assert_allclose(
grad_x.numpy(), np.ones_like(expected_forward).sum(axis=2, keepdims=True)
)
tvm.testing.assert_allclose(
grad_y.numpy(),
np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0),
)
def test_broadcast_subtract():
shape1 = (3, 4, 1)
shape2 = (1, 5)
dtype = "float32"
x_nd = rand(dtype, *shape1)
y_nd = rand(dtype, *shape2)
x_np = x_nd.numpy()
y_np = y_nd.numpy()
expected_forward = x_np - y_np
t1 = relay.TensorType(shape1, dtype)
t2 = relay.TensorType(shape2, dtype)
x = relay.var("x", t1)
y = relay.var("y", t2)
func = relay.Function([x, y], x - y)
func = run_infer_type(func)
full_func = run_infer_type(gradient(func))
assert full_func.checked_type == relay.FuncType(
[t1, t2],
relay.TupleType(
[relay.TensorType(expected_forward.shape, dtype), relay.TupleType([t1, t2])]
),
)
forward, (grad_x, grad_y) = create_executor().evaluate(full_func)(x_nd, y_nd)
tvm.testing.assert_allclose(forward.numpy(), expected_forward)
tvm.testing.assert_allclose(
grad_x.numpy(), np.ones_like(expected_forward).sum(axis=2, keepdims=True)
)
tvm.testing.assert_allclose(
grad_y.numpy(),
-np.ones_like(expected_forward).sum(axis=(0, 1), keepdims=True).squeeze(axis=0),
)
def _test_tuple(mode):
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = relay.var("y", t)
z = relay.var("z", t)
if mode == "higher_order":
tup = relay.Var("tup")
func = relay.Function(
[x, y, z],
relay.Let(
tup,
relay.Tuple([x, y, z]),
relay.TupleGetItem(tup, 0)
+ relay.TupleGetItem(tup, 1)
- relay.TupleGetItem(tup, 2),
),
)
else:
# first order does not do let.
tup = relay.Tuple([x, y, z])
func = relay.Function(
[x, y, z],
relay.TupleGetItem(tup, 0) + relay.TupleGetItem(tup, 1) - relay.TupleGetItem(tup, 2),
)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func, mode=mode))
assert back_func.checked_type == relay.FuncType(
[t, t, t], relay.TupleType([t, relay.TupleType([t, t, t])])
)
x_nd = rand(dtype, *shape)
y_nd = rand(dtype, *shape)
z_nd = rand(dtype, *shape)
x_np = x_nd.numpy()
y_np = y_nd.numpy()
z_np = z_nd.numpy()
expected_forward = x_np + y_np - z_np
forward, (grad_x, grad_y, grad_z) = create_executor().evaluate(back_func)(x_nd, y_nd, z_nd)
tvm.testing.assert_allclose(forward.numpy(), expected_forward)
tvm.testing.assert_allclose(grad_x.numpy(), np.ones_like(grad_x.numpy()))
tvm.testing.assert_allclose(grad_y.numpy(), np.ones_like(grad_y.numpy()))
tvm.testing.assert_allclose(grad_z.numpy(), -1 * np.ones_like(grad_z.numpy()))
def _test_tuple_argument(mode):
shape = (2, 3)
dtype = "float32"
tensor_type = relay.TensorType(shape, dtype)
fields = 3
tuple_type = relay.TupleType([tensor_type] * fields)
tup = relay.var("tup", type_annotation=tuple_type)
body = relay.TupleGetItem(tup, 0)
for i in range(1, fields):
body = relay.add(body, relay.TupleGetItem(tup, i))
func = relay.Function([tup], body)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func, mode=mode))
xs = [rand(dtype, *shape) for _ in range(fields)]
xs_np = np.array([x.numpy() for x in xs])
expected_forward = np.sum(xs_np, axis=0)
forward, grad = create_executor().evaluate(back_func)(tuple(xs))
tvm.testing.assert_allclose(forward.numpy(), expected_forward)
for field in grad[0]:
tvm.testing.assert_allclose(field.numpy(), np.ones_like(field.numpy()))
def test_tuple():
_test_tuple("higher_order")
def test_tuple_first_order():
_test_tuple("first_order")
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_tuple_argument():
# fails until we add support for top-level tuple arguments in higher-order AD
_test_tuple_argument("higher_order")
def test_tuple_argument_first_order():
_test_tuple_argument("first_order")
def test_pow():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat_iterate = mod.get_global_var("nat_iterate")
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
double = relay.Function([x], x + x)
i = relay.var("i", t)
func = relay.Function([i], nat_iterate(double, make_nat_expr(p, 3))(i))
mod["main"] = func
mod = transform.InferType()(mod)
mod["main"] = gradient(mod["main"], mod=mod)
m = transform.InferType()(mod)
back_func = m["main"]
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
i_nd = rand(dtype, *shape)
forward, (grad_i,) = create_executor(mod=mod).evaluate(back_func)(i_nd)
tvm.testing.assert_allclose(forward.numpy(), 8 * i_nd.numpy())
tvm.testing.assert_allclose(grad_i.numpy(), 8 * np.ones_like(grad_i.numpy()))
def test_ref():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
r = relay.Var("r")
u = relay.Var("u")
body = relay.RefRead(r)
body = relay.Let(u, relay.RefWrite(r, relay.RefRead(r) + relay.RefRead(r)), body)
body = relay.Let(r, relay.RefCreate(x), body)
func = relay.Function([x], body)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x_nd = rand(dtype, *shape)
forward, (grad_x,) = create_executor().evaluate(back_func)(x_nd)
tvm.testing.assert_allclose(forward.numpy(), 2 * x_nd.numpy())
tvm.testing.assert_allclose(grad_x.numpy(), 2 * np.ones_like(grad_x.numpy()))
def test_square_second_order():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x * x)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
y = relay.var("y", t)
back_func_adjusted = relay.Function(
[y], relay.TupleGetItem(relay.TupleGetItem(back_func(y), 1), 0)
)
back_func_adjusted = run_infer_type(back_func_adjusted)
back_back_func = run_infer_type(gradient(back_func_adjusted))
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x_nd = rand(dtype, *shape)
forward, (grad_x,) = create_executor().evaluate(back_back_func)(x_nd)
tvm.testing.assert_allclose(forward.numpy(), 2 * x_nd.numpy())
tvm.testing.assert_allclose(grad_x.numpy(), 2 * np.ones_like(grad_x.numpy()))
def test_if():
x = relay.var("x", shape=(1, 16, 64, 64))
y = relay.var("y", shape=(1, 16, 64, 64))
cond = relay.var("cond", shape=(), dtype="uint1")
net = relay.If(cond, x, y)
net = relay.log(net)
func = relay.Function(free_vars(net), net)
func = run_infer_type(func)
net = gradient(func, mode="higher_order")
net = run_infer_type(net)
def test_grad_tuple():
scope = relay.ScopeBuilder()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
y = scope.let("y", x + x)
scope.ret(relay.Tuple([y + y, y]))
func = relay.Function([x], scope.get())
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
assert back_func.checked_type == relay.FuncType(
[t], relay.TupleType([relay.TupleType([t, t]), relay.TupleType([t])])
)
x = rand(dtype, *shape)
(forward_four, forward_two), (grad,) = create_executor().evaluate(back_func)(x)
tvm.testing.assert_allclose(forward_four.numpy(), 4 * x.numpy())
tvm.testing.assert_allclose(forward_two.numpy(), 2 * x.numpy())
tvm.testing.assert_allclose(grad.numpy(), 4 * np.ones_like(x.numpy()))
def test_concat():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
rt = relay.TensorType((10, 20), dtype)
x = relay.var("x", t)
y = op.concatenate([x, x], axis=1)
func = relay.Function([x], y)
func = run_infer_type(func)
back_func = run_infer_type(gradient(func))
tvm.ir.assert_structural_equal(
back_func.checked_type, relay.FuncType([t], relay.TupleType([rt, relay.TupleType([t])]))
)
# no value validation as concatenate has dummy gradient right now.
def test_no_duplication():
x = tvm.relay.Var("x", type_annotation=tvm.relay.TensorType([12, 12]))
y = tvm.relay.Var("y", type_annotation=tvm.relay.TensorType([12, 12]))
xy = tvm.relay.nn.dense(x, y)
m = tvm.relay.sum(xy, keepdims=True)
s = tvm.relay.sum(xy - m)
fn = tvm.relay.Function([x, y], s)
fn = run_infer_type(fn)
gr = tvm.relay.transform.gradient(fn, mode="first_order")
counts = count_ops(gr)
assert counts["nn.dense"] == 3, "We expect 3 dense (1 forward, two backward)"
def test_no_duplication_tuples():
x = tvm.relay.Var("x", type_annotation=tvm.relay.TensorType([12, 12]))
y = tvm.relay.Var("y", type_annotation=tvm.relay.TensorType([12, 12]))
xy = tvm.relay.nn.dense(x, y)
t = relay.Tuple([xy, xy])
m = tvm.relay.sum(xy, keepdims=True)
s = tvm.relay.sum(relay.TupleGetItem(t, 0) - m)
fn = tvm.relay.Function([x, y], s)
fn = run_infer_type(fn)
gr = tvm.relay.transform.gradient(fn, mode="first_order")
counts = count_ops(gr)
assert counts["nn.dense"] == 3, "We expect 3 dense (1 forward, two backward)"
def test_global_function():
m = tvm.IRModule()
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.Var("x", t)
d = GlobalVar("double")
m[d] = relay.Function([x], x + x)
y = relay.Var("y", t)
q = GlobalVar("q")
m[q] = relay.Function([y], d(d(y)))
g = GlobalVar("grad")
m = tvm.relay.transform.InferType()(m)
m[g] = tvm.relay.transform.gradient(q, m)
m = tvm.relay.transform.InferType()(m)
back_func = m[g]
assert back_func.checked_type == relay.FuncType([t], relay.TupleType([t, relay.TupleType([t])]))
x = rand(dtype, *shape)
forward, (grad,) = create_executor(mod=m).evaluate(back_func)(x)
tvm.testing.assert_allclose(forward.numpy(), 4 * x.numpy())
tvm.testing.assert_allclose(grad.numpy(), 4 * np.ones_like(x.numpy()))
if __name__ == "__main__":
tvm.testing.main()
| 16,351 | 34.470716 | 100 | py |
tvm | tvm-main/tests/python/relay/test_build_module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.testing
from tvm import relay
from tvm.target.target import Target
from tvm.relay import testing
from tvm.relay.backend import Runtime, Executor, graph_executor_codegen
@pytest.mark.parametrize(
"test_target,unsupported_config",
[
["c", "-runtime=c"],
["c", "-system-lib=1"],
["c", "-executor=aot"],
["c", "-interface-api=c"],
["c", "-unpacked-api=1"],
["c", "-link-params=1"],
],
)
def test_deprecated_target_parameters(test_target, unsupported_config):
with pytest.raises(ValueError) as e_info:
Target(f"{test_target} {unsupported_config}")
assert f"Cannot recognize '{unsupported_config}" in str(e_info.execption)
def test_build_relay_graph_():
"""Test to build a simple relay graph by using APIs directly"""
def build_graph(mod, target):
target, target_host = tvm.target.Target.canon_target_and_host(target)
mod, _ = relay.optimize(mod, target)
grc = graph_executor_codegen.GraphExecutorCodegen(None, target)
_, lowered_funcs, _ = grc.codegen(mod, mod["main"])
_ = relay.backend._backend.build(lowered_funcs, target)
def add(shape, dtype):
lhs = relay.var("A", shape=shape, dtype=dtype)
rhs = relay.var("B", shape=shape, dtype=dtype)
out = relay.add(lhs, rhs)
expr = relay.Function((lhs, rhs), out)
mod = tvm.IRModule.from_expr(expr)
return mod
build_graph(add((1, 8), "float32"), tvm.target.Target("llvm"))
@tvm.testing.requires_llvm
def test_schedule_record():
"""Test to build a nn model and get schedule_record from build_module"""
def check_schedule(executor):
for func_name, func_meta in executor.function_metadata.items():
# check converted op only
if "main" not in func_name:
primfunc = list(func_meta.relay_primfuncs.values())[0]
# make sure schedule is well-stored in function metadata
assert "schedule" in primfunc.attrs
sch = primfunc.attrs["schedule"]
assert len(sch.schedule_record) == len(sch.primitive_record)
relay_mod, params = testing.mobilenet.get_workload(batch_size=1, dtype="float32")
target_llvm = tvm.target.Target("llvm")
config = {"te.keep_schedule_record": True}
with tvm.transform.PassContext(opt_level=3, config=config):
aot_executor_factory = relay.build(
relay_mod,
target_llvm,
runtime=Runtime("cpp"),
executor=Executor("aot"),
params=params,
)
graph_executor_factory = relay.build(
relay_mod,
target_llvm,
params=params,
)
check_schedule(aot_executor_factory)
check_schedule(graph_executor_factory)
if __name__ == "__main__":
tvm.testing.main()
| 3,681 | 34.403846 | 85 | py |
tvm | tvm-main/tests/python/relay/test_pass_auto_quantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.relay import testing
from tvm.relay.expr import Call
from tvm.topi.utils import get_const_tuple
def quantize_and_build(out, skip_conv_layers=[]):
f = relay.Function(relay.analysis.free_vars(out), out)
mod, params = testing.create_workload(f)
with relay.quantize.qconfig(skip_conv_layers=skip_conv_layers):
qmod = relay.quantize.quantize(mod, params)
relay.build(qmod, "llvm", params=params)
return qmod
def test_mul_rewrite():
"""a test case where rhs of mul is not constant"""
data = relay.var("data", shape=(1, 16, 64, 64))
multiplier = relay.sigmoid(relay.var("data", shape=(1, 16, 1, 1)))
conv = relay.nn.conv2d(
data, relay.var("weight"), kernel_size=(3, 3), padding=(1, 1), channels=16
)
act = relay.nn.relu(data=conv)
quantize_and_build(act * multiplier)
pool = relay.nn.global_avg_pool2d(data=act)
quantize_and_build(act * pool)
def test_skip_conv():
data = relay.var("data", shape=(1, 16, 64, 64))
np_weight = np.random.rand(16, 16, 3, 3)
conv0_weight = relay.Constant(tvm.nd.array(np_weight)).astype("float32")
conv1_weight = relay.Constant(tvm.nd.array(np_weight)).astype("float32")
multiplier = relay.sigmoid(relay.var("data", shape=(1, 16, 1, 1)))
conv0 = relay.nn.conv2d(data, conv0_weight, kernel_size=(3, 3), padding=(1, 1), channels=16)
act0 = relay.nn.relu(data=conv0)
conv1 = relay.nn.conv2d(act0, conv1_weight, kernel_size=(3, 3), padding=(1, 1), channels=16)
act1 = relay.nn.relu(data=conv1)
quantize_and_build(act1 * multiplier)
quantize_and_build(act1 * multiplier, skip_conv_layers=[0])
quantize_and_build(act1 * multiplier, skip_conv_layers=[1])
quantize_and_build(act1 * multiplier, skip_conv_layers=[0, 1])
def test_stop_quantize():
data = relay.var("data", shape=(1, 16, 64, 64))
np_weight0 = np.random.rand(16, 16, 3, 3)
conv0_weight = relay.Constant(tvm.nd.array(np_weight0)).astype("float32")
np_weight1 = np.random.rand(16, 16, 1, 1)
conv1_weight = relay.Constant(tvm.nd.array(np_weight1)).astype("float32")
multiplier = relay.sigmoid(relay.var("data", shape=(1, 16, 1, 1)))
conv0 = relay.nn.conv2d(data, conv0_weight, kernel_size=(3, 3), padding=(1, 1), channels=16)
act0 = relay.nn.relu(data=conv0)
pool = relay.nn.global_avg_pool2d(data=act0)
conv1 = relay.nn.conv2d(pool, conv1_weight, kernel_size=(1, 1), padding=(0, 0), channels=16)
act1 = relay.nn.relu(data=conv1)
quantize_and_build(act1 * multiplier)
def test_batch_flatten_rewrite():
data = relay.var("data", shape=(1, 16, 64, 64), dtype="float32")
out = relay.nn.conv2d(
data, relay.var("weight"), kernel_size=(3, 3), padding=(1, 1), channels=16
)
out = relay.nn.batch_flatten(out)
qmod = quantize_and_build(out)
def _check_batch_flatten(node):
if isinstance(node, Call):
if node.op.name == "nn.batch_flatten":
assert node.checked_type.dtype == "int8"
# check if batch_flatten is quantized
relay.analysis.post_order_visit(qmod["main"], _check_batch_flatten)
def test_batch_matmul_rewrite():
data = relay.var("data", shape=(1, 4, 16, 16))
data2 = relay.sigmoid(relay.var("data", shape=(4, 16, 64)))
out = relay.nn.conv2d(data, relay.var("weight"), kernel_size=(3, 3), padding=(1, 1), channels=8)
out = relay.nn.batch_flatten(out)
out = relay.reshape(out, [1, 32, 64])
out = relay.nn.batch_matmul(out, data2)
qmod = quantize_and_build(out)
def _check_batch_matmul(node):
if isinstance(node, Call):
if node.op.name in ["nn.batch_matmul", "nn.conv2d"]:
assert node.checked_type.dtype == "int32"
elif node.op.name == "nn.batch_flatten":
assert node.checked_type.dtype == "int8"
# check if batch_matmul is quantized
relay.analysis.post_order_visit(qmod["main"], _check_batch_matmul)
def get_calibration_dataset(mod, input_name):
dataset = []
input_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
for i in range(5):
data = np.random.uniform(size=input_shape)
dataset.append({input_name: data})
return dataset
@pytest.mark.parametrize("create_target", [True, False])
def test_calibrate_target(create_target):
mod, params = testing.synthetic.get_workload()
dataset = get_calibration_dataset(mod, "data")
with relay.quantize.qconfig(calibrate_mode="kl_divergence"):
if create_target:
with tvm.target.Target("llvm"):
relay.quantize.quantize(mod, params, dataset)
else:
# current_target = None
relay.quantize.quantize(mod, params, dataset)
def test_calibrate_memory_bound():
mod, params = testing.synthetic.get_workload()
dataset = get_calibration_dataset(mod, "data")
import multiprocessing
num_cpu = multiprocessing.cpu_count()
with relay.quantize.qconfig(calibrate_mode="kl_divergence", calibrate_chunk_by=num_cpu):
relay.quantize.quantize(mod, params, dataset)
def test_calibrate_percentile():
mod, params = testing.synthetic.get_workload()
dataset = get_calibration_dataset(mod, "data")
with relay.quantize.qconfig(calibrate_mode="percentile"):
relay.quantize.quantize(mod, params, dataset)
####################################
# Quant/Dequant Partitioning Tests #
####################################
BASE_CFG = {
"skip_conv_layers": [],
"skip_dense_layers": False,
"dtype_input": "int8",
"dtype_weight": "int8",
"dtype_activation": "int32",
}
def gen_rand_tvm(tt, low, high):
if "int" in tt.dtype:
data_np = np.random.randint(low, high, size=get_const_tuple(tt.shape), dtype=tt.dtype)
elif "float" in tt.dtype:
data_np = np.random.uniform(low, high, size=get_const_tuple(tt.shape)).astype(tt.dtype)
else:
assert False, "unknown dtype"
return tvm.nd.array(data_np, device=tvm.cpu(0))
def verify_partition_fails(mod, params):
# standard partition should always succeed
with relay.quantize.qconfig(**BASE_CFG, partition_conversions="enabled"):
partitioned_mod = relay.quantize.quantize(mod, params)
try:
with relay.quantize.qconfig(**BASE_CFG, partition_conversions="fully_integral"):
partitioned_mod = relay.quantize.quantize(mod, params)
raise RuntimeError("partitioning should have failed")
except AssertionError:
pass
def verify_partition(mod, params):
with relay.quantize.qconfig(**BASE_CFG, paritition_conversions="disabled"):
unpartitioned_mod = relay.quantize.quantize(mod, params)
assert (
len(unpartitioned_mod.get_global_vars()) == 1
), "unpartitioned module should only have one function"
with relay.quantize.qconfig(**BASE_CFG, partition_conversions="fully_integral"):
partitioned_mod = relay.quantize.quantize(mod, params)
# ensure partitioned and unpartitioned results agree
params = [gen_rand_tvm(param.type_annotation, 0, 1) for param in partitioned_mod["main"].params]
def _eval_mod(mod):
return relay.create_executor("vm", device=tvm.cpu(0), target="llvm", mod=mod).evaluate()(
*params
)
partitioned_mod_result = _eval_mod(partitioned_mod)
unpartitioned_mod_result = _eval_mod(unpartitioned_mod)
tvm.testing.assert_allclose(unpartitioned_mod_result.numpy(), partitioned_mod_result.numpy())
def test_add_partition():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(
%x: Tensor[(10, 10), float32],
%y: Tensor[(10, 10), float32]) {
add(%x, %y)
}
"""
)
params = {}
verify_partition_fails(mod, params)
def test_conv2d_partition():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(
%x: Tensor[(1, 4, 16, 16), float32],
%w: Tensor[(4, 4, 3, 3), float32]) -> Tensor[(1, 4, 16, 16), float32] {
nn.conv2d(%x, %w,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3])
}
"""
)
weight_ty = mod["main"].params[1].checked_type
params = {"w": gen_rand_tvm(weight_ty, 0, 1)}
verify_partition(mod, params)
def test_multiple_arg_conversions_partition():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(
%x1: Tensor[(1, 4, 16, 16), float32],
%w1: Tensor[(4, 4, 3, 3), float32],
%x2: Tensor[(1, 4, 16, 16), float32],
%w2: Tensor[(4, 4, 3, 3), float32]
) -> Tensor[(1, 4, 16, 16), float32] {
%0 = nn.conv2d(%x1, %w1,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3]);
%1 = nn.conv2d(%x2, %w2,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3]);
add(%0, %1)
}
"""
)
w1_ty = mod["main"].params[1].checked_type
w2_ty = mod["main"].params[3].checked_type
params = {"w1": gen_rand_tvm(w1_ty, 0, 1), "w2": gen_rand_tvm(w2_ty, 0, 1)}
verify_partition(mod, params)
def test_unquantizable_prefix_partition():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(
%x: Tensor[(1, 4, 16, 16), float32],
%b: Tensor[(4), float32],
%w: Tensor[(4, 4, 3, 3), float32]) -> Tensor[(1, 4, 16, 16), float32] {
// NOTE bias_add isn't currently quantizable
%0 = nn.bias_add(%x, %b);
nn.conv2d(%0, %w,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3])
}
"""
)
bias_ty = mod["main"].params[1].checked_type
weight_ty = mod["main"].params[2].checked_type
params = {"b": gen_rand_tvm(bias_ty, 0, 1), "w": gen_rand_tvm(weight_ty, 0, 1)}
verify_partition_fails(mod, params)
def test_unquantizable_core_partition():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(
%x1: Tensor[(1, 4, 16, 16), float32],
%w1: Tensor[(4, 4, 3, 3), float32],
%b: Tensor[(4), float32],
%w2: Tensor[(4, 4, 3, 3), float32]) -> Tensor[(1, 4, 16, 16), float32] {
%0 = nn.conv2d(%x1, %w1,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3]);
// NOTE bias_add isn't currently quantizable
%1 = nn.bias_add(%0, %b);
nn.conv2d(%1, %w2,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3])
}
"""
)
w1_ty = mod["main"].params[1].checked_type
bias_ty = mod["main"].params[2].checked_type
w2_ty = mod["main"].params[3].checked_type
params = {
"w1": gen_rand_tvm(w1_ty, 0, 1),
"w2": gen_rand_tvm(w2_ty, 0, 1),
"b": gen_rand_tvm(bias_ty, 0, 1),
}
verify_partition_fails(mod, params)
def test_unquantizable_suffix_partition():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(
%x: Tensor[(1, 4, 16, 16), float32],
%w: Tensor[(4, 4, 3, 3), float32],
%b: Tensor[(4), float32]) -> Tensor[(1, 4, 16, 16), float32] {
%0 = nn.conv2d(%x, %w,
padding=[1, 1, 1, 1],
channels=4,
kernel_size=[3, 3]);
// NOTE bias_add isn't currently quantizable
nn.bias_add(%0, %b)
}
"""
)
weight_ty = mod["main"].params[1].checked_type
bias_ty = mod["main"].params[2].checked_type
params = {"w": gen_rand_tvm(weight_ty, 0, 1), "b": gen_rand_tvm(bias_ty, 0, 1)}
verify_partition_fails(mod, params)
def test_left_shift_negative():
data = relay.var("data", shape=(1, 16, 64, 64))
weight = relay.const(np.full((16, 16, 3, 3), 256.0))
conv2d = relay.nn.conv2d(data, weight, kernel_size=(3, 3), padding=(1, 1), channels=16)
relu = relay.nn.relu(conv2d)
mod = tvm.IRModule.from_expr(relu)
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
calibrate_mode="global_scale", global_scale=8.0, skip_conv_layers=None
):
qnn_mod = relay.quantize.quantize(mod)
class OpFinder(relay.ExprVisitor):
def __init__(self, op_name):
super(OpFinder, self).__init__()
self._op_name = op_name
self.ops = list()
def visit_call(self, call):
super().visit_call(call)
if call.op.name == self._op_name:
self.ops.append(call)
opf = OpFinder("left_shift")
opf.visit(qnn_mod["main"])
assert len(opf.ops) > 0, 'Broken case, can\'t find any "left_shift" operators.'
for left_shift_op in opf.ops:
shift_amount = left_shift_op.args[1].data.numpy()
assert shift_amount >= 0, "Shift amount must be non-negative."
def test_dense_conv2d_rewrite():
n, c, h, w = 1, 16, 64, 64
data = relay.var("data", relay.TensorType((n, c, h, w)))
inp = relay.var("inp", relay.TensorType((n, c * h * w)))
weight_T = relay.const(np.random.random((n, c * h * w)), dtype="float32")
bias = relay.const(np.random.random((n,)), dtype="float32")
conv_w = relay.const(np.random.random((16, 16, 3, 3)), dtype="float32")
dense_o = relay.nn.dense(inp, weight_T)
linear_o = relay.nn.bias_add(dense_o, bias)
conv2d_o = relay.nn.conv2d(data, conv_w, kernel_size=(3, 3), padding=(1, 1), channels=16)
result = relay.Tuple((linear_o, conv2d_o))
mod = tvm.IRModule.from_expr(result)
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
calibrate_mode="global_scale", global_scale=8.0, skip_dense_layer=False
):
qnn_mod = relay.quantize.quantize(mod)
def _check_dense(node):
if isinstance(node, Call):
if node.op.name == "nn.dense":
assert node.args[0].checked_type.dtype == "int8"
assert node.args[1].checked_type.dtype == "int8"
assert node.checked_type.dtype == "int32"
if node.op.name == "nn.conv2d":
assert node.args[0].checked_type.dtype == "float32"
assert node.args[1].checked_type.dtype == "float32"
assert node.checked_type.dtype == "float32"
relay.analysis.post_order_visit(qnn_mod["main"], _check_dense)
if __name__ == "__main__":
test_mul_rewrite()
test_batch_flatten_rewrite()
test_batch_matmul_rewrite()
test_calibrate_target(False)
test_calibrate_target(True)
test_calibrate_memory_bound()
test_calibrate_percentile()
test_add_partition()
test_conv2d_partition()
test_multiple_arg_conversions_partition()
test_unquantizable_prefix_partition()
test_unquantizable_core_partition()
test_unquantizable_suffix_partition()
test_left_shift_negative()
test_dense_conv2d_rewrite()
test_skip_conv()
test_stop_quantize()
| 15,675 | 32.857451 | 100 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_simulated_quantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import relay
from tvm.contrib import graph_executor
from tvm.runtime.vm import VirtualMachine
from tvm.topi.nn.qnn import SQNN_DTYPE_TO_CODE
def allclose_with_rounding(a, b):
# Find number of mismatches in inputs.
mismatch = a != b
# Allow some rounding errors due to GPU fp32 arithmetic.
assert np.sum(mismatch) <= 3
def quantize_test_driver(in_dtype, quant_args, axis, out_dtype, in_data):
shape = in_data.shape
input_data = relay.var("input_data", shape=shape, dtype=in_dtype)
output_zero_point = relay.const(quant_args["out_zero_point"])
output_scale = relay.const(quant_args["out_scale"])
quantized_output = relay.qnn.op.quantize(
input_data,
output_scale=output_scale,
output_zero_point=output_zero_point,
axis=axis,
out_dtype=out_dtype,
)
mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output)
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm", params=None)
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
rt_mod.set_input(input_data=in_data)
rt_mod.set_input(**params)
rt_mod.run()
res = rt_mod.get_output(0).numpy()
return res
def build_simulated_quantize(input_data, scale, zp, dtype, axis=-1):
sim_q = relay.qnn.op.simulated_quantize(
input_data,
scale,
zp,
axis=axis,
out_dtype=dtype,
)
mod = tvm.IRModule.from_expr(sim_q)
with tvm.transform.PassContext(opt_level=3):
vm_exec = relay.vm.compile(mod, "llvm", params=None)
vm = VirtualMachine(vm_exec, tvm.cpu(0))
return vm
def verify_simulated_quantize_simple(dtype):
data = np.random.uniform(low=-128, high=127, size=[2, 5]).astype("float32")
scale_np = np.float32(0.5)
zp_np = np.int32(127)
dtype_np = np.int32(SQNN_DTYPE_TO_CODE[dtype])
quant_args = {"out_zero_point": zp_np, "out_scale": scale_np}
q_out = quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype=dtype,
in_data=data,
)
input_data = relay.var("input_data", shape=data.shape, dtype="float32")
scale = relay.var("scale", shape=[])
zp = relay.var("zp", shape=[], dtype="int32")
dtype = relay.var("dtype", shape=[], dtype="int32")
vm = build_simulated_quantize(input_data, scale, zp, dtype)
sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
allclose_with_rounding(sim_q_out.numpy(), q_out)
def test_simulated_quantize():
verify_simulated_quantize_simple("uint8")
verify_simulated_quantize_simple("int8")
verify_simulated_quantize_simple("int32")
def test_dynamic_channels():
# Compile simulated quantize once but support either per-channel or scalar params.
data = np.random.uniform(low=-64, high=64, size=[2, 5]).astype("float32")
# Test scalar qnn params.
scale_np = np.asarray([0.5]).astype("float32")
zp_np = np.asarray([127]).astype("int32")
dtype_np = np.int32(SQNN_DTYPE_TO_CODE["uint8"])
quant_args = {"out_zero_point": zp_np[0], "out_scale": scale_np[0]}
q_out = quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=0,
out_dtype="uint8",
in_data=data,
)
# Create variables with undefined shape and run with scalar inputs.
input_data = relay.var("input_data", shape=data.shape, dtype="float32")
scale = relay.var("scale", shape=[relay.Any()], dtype="float32")
zp = relay.var("zp", shape=[relay.Any()], dtype="int32")
dtype = relay.var("dtype", shape=[], dtype="int32")
vm = build_simulated_quantize(input_data, scale, zp, dtype, axis=0)
sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
allclose_with_rounding(sim_q_out.numpy(), q_out)
# Now get the perchannel quantize output and compare without recompiling.
scale_np = np.array([0.5, 0.25]).astype("float32")
zp_np = np.array([127, 123]).astype("int32")
# Get the reference quantize output.
quant_args = {"out_zero_point": zp_np, "out_scale": scale_np}
q_out = quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=0,
out_dtype="uint8",
in_data=data,
)
# Run the simulated quantize without recompiling and confirm results match.
sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
allclose_with_rounding(sim_q_out.numpy(), q_out)
def test_dynamic_dtype():
# Compile simulated quantize once but support any type of quantization.
data = np.random.uniform(low=-64, high=64, size=[2, 5]).astype("float32")
# Test scalar float32 to uint8.
scale_np = np.asarray([0.5]).astype("float32")
zp_np = np.asarray([127]).astype("int32")
dtype_np = np.int32(SQNN_DTYPE_TO_CODE["uint8"])
quant_args = {"out_zero_point": zp_np[0], "out_scale": scale_np[0]}
q_out = quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype="uint8",
in_data=data,
)
# Create variables with undefined shape and run with scalar inputs.
input_data = relay.var("input_data", shape=data.shape, dtype="float32")
scale = relay.var("scale", shape=[relay.Any()], dtype="float32")
zp = relay.var("zp", shape=[relay.Any()], dtype="int32")
dtype = relay.var("dtype", shape=[], dtype="int32")
vm = build_simulated_quantize(input_data, scale, zp, dtype)
sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
allclose_with_rounding(sim_q_out.numpy(), q_out)
# Now test float32 to int32 compilation.
# Get the reference quantize output.
q_out = quantize_test_driver(
in_dtype="float32",
quant_args=quant_args,
axis=-1,
out_dtype="int32",
in_data=data,
)
# Run the simulated quantize without recompiling and confirm results match.
dtype_np = np.int32(SQNN_DTYPE_TO_CODE["int32"])
sim_q_out = vm.invoke("main", input_data=data, scale=scale_np, zp=zp_np, dtype=dtype_np)
allclose_with_rounding(sim_q_out.numpy(), q_out)
if __name__ == "__main__":
test_simulated_quantize()
test_dynamic_channels()
test_dynamic_dtype()
| 7,250 | 37.983871 | 92 | py |
tvm | tvm-main/tests/python/relay/test_pass_mac_count.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for MAC counter."""
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm.relay import analysis, transform
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = tvm.relay.transform.InferType()(mod)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_gemm():
n = 512
k = 1024
m = 256
dshape1 = (n, k)
dshape2 = (m, k)
data1 = relay.var("data1", shape=dshape1)
data2 = relay.var("data2", shape=dshape2)
gemm = relay.nn.dense(data1, data2)
func = relay.Function([data1, data2], relay.Tuple(tvm.runtime.convert([gemm])))
func = run_opt_pass(func, transform.InferType())
compute_count = analysis.get_total_mac_number(func)
expect_count = n * m * k
assert compute_count == expect_count
def test_conv():
batch_size = 1
input_channel = 3
h = 224
w = 224
output_channel = 64
kh = 7
kw = 7
h_padding = 1
w_padding = 1
oh = h + h_padding * 2 - kh + 1
ow = w + w_padding * 2 - kw + 1
dshape = (batch_size, input_channel, h, w)
weight = relay.var("weight", shape=(output_channel, input_channel, kh, kw))
data = relay.var("data", shape=dshape)
conv2d = relay.nn.conv2d(
data, weight, channels=output_channel, kernel_size=(kh, kw), padding=(h_padding, w_padding)
)
func = relay.Function([data, weight], relay.Tuple(tvm.runtime.convert([conv2d])))
func = run_opt_pass(func, transform.InferType())
compute_count = analysis.get_total_mac_number(func)
expect_count = batch_size * input_channel * oh * ow * output_channel * kh * kw
assert compute_count == expect_count
def test_simple_network():
batch_size = 1
dshape = (batch_size, 64, 56, 56)
weight_conv = relay.var("weight_conv", shape=(64, 64, 3, 3))
data1 = relay.var("data1", shape=dshape)
data2 = relay.var("data2", shape=dshape)
weight_dense = relay.var("weight_dense", shape=(1, 56 * 56 * 64))
conv2d_1 = relay.nn.conv2d(data1, weight_conv, channels=64, kernel_size=(3, 3), padding=(1, 1))
conv2d_2 = relay.nn.conv2d(data2, weight_conv, channels=64, kernel_size=(3, 3), padding=(1, 1))
add = relay.add(conv2d_1, conv2d_2)
flattened = relay.nn.batch_flatten(add)
dense_1 = relay.nn.dense(flattened, weight_dense)
func = relay.Function(
[data1, data2, weight_conv, weight_dense],
relay.Tuple(tvm.runtime.convert([conv2d_1, conv2d_2, dense_1, add, flattened])),
)
# alter the CONV 2D data layout to test
func = run_opt_pass(func, transform.AlterOpLayout())
compute_count = analysis.get_total_mac_number(func)
expect_count = 231411712
assert compute_count == expect_count
def test_depthwise_conv2d():
batch_size = 1
dshape = (batch_size, 64, 56, 56)
weight_conv = relay.var("weight_depthwiseconv", shape=(64, 1, 3, 3))
data1 = relay.var("data1", shape=dshape)
data2 = relay.var("data2", shape=dshape)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight_conv, kernel_size=(3, 3), padding=(1, 1), groups=64
)
depthwise_conv2d_2 = relay.nn.conv2d(
data2, weight_conv, kernel_size=(3, 3), padding=(1, 1), groups=64
)
add = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
func = relay.Function(
[data1, data2, weight_conv],
relay.Tuple(tvm.runtime.convert([depthwise_conv2d_1, depthwise_conv2d_2, add])),
)
func = run_opt_pass(func, transform.InferType())
compute_count = analysis.get_total_mac_number(func)
assert compute_count == 2 * np.prod(dshape) * 3 * 3
def test_conv_2d_transpose():
batch_size = 1
input_channel = 3
h = 224
w = 224
output_channel = 64
kh = 7
kw = 7
h_padding = 1
w_padding = 1
oh = h - h_padding * 2 + kh - 1
ow = w - w_padding * 2 + kw - 1
dshape = (batch_size, input_channel, h, w)
weight = relay.var("weight", shape=(input_channel, output_channel, kh, kw))
data = relay.var("data", shape=dshape)
conv2d_transpose = relay.nn.conv2d_transpose(
data, weight, channels=output_channel, kernel_size=(kh, kw), padding=(h_padding, w_padding)
)
func = relay.Function([data, weight], relay.Tuple(tvm.runtime.convert([conv2d_transpose])))
func = run_opt_pass(func, transform.InferType())
compute_count = analysis.get_total_mac_number(func)
expect_count = batch_size * input_channel * oh * ow * output_channel * kh * kw
assert compute_count == expect_count
if __name__ == "__main__":
test_conv()
test_gemm()
test_simple_network()
test_depthwise_conv2d()
test_conv_2d_transpose()
| 5,559 | 35.339869 | 99 | py |
tvm | tvm-main/tests/python/relay/test_name_mangling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
import tvm
import tvm.testing
import tvm.relay as relay
import tvm.relay.backend.utils as utils
import pytest
def test_mangle_mod_name():
assert utils.mangle_module_name("default") == "tvmgen_default"
assert utils.mangle_module_name("ccompiler") == "tvmgen_ccompiler"
assert utils.mangle_module_name("1234"), "tvmgen_1234"
assert utils.mangle_module_name(""), "tvmgen"
assert utils.mangle_module_name(None), "tvmgen"
with pytest.raises(ValueError):
utils.mangle_module_name("\u018e")
utils.mangle_module_name("\xf1")
if __name__ == "__main__":
tvm.testing.main()
| 1,397 | 34.846154 | 70 | py |
tvm | tvm-main/tests/python/relay/test_pass_instrument.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Instrument test cases.
"""
import pytest
import tvm
import tvm.relay
from tvm.relay import op
from tvm.ir.instrument import PassTimingInstrument, pass_instrument
def get_test_model():
x, y, z = [tvm.relay.var(c, shape=(3, 4), dtype="float32") for c in "xyz"]
e1 = op.add(x, y)
e2 = op.subtract(x, z)
e3 = op.multiply(e1, e1 / e2)
return tvm.IRModule.from_expr(e3 + e2)
def test_pass_timing_instrument():
pass_timing = PassTimingInstrument()
# Override current PassContext's instruments
tvm.transform.PassContext.current().override_instruments([pass_timing])
mod = get_test_model()
mod = tvm.relay.transform.AnnotateSpans()(mod)
mod = tvm.relay.transform.ToANormalForm()(mod)
mod = tvm.relay.transform.InferType()(mod)
profiles = pass_timing.render()
assert "AnnotateSpans" in profiles
assert "ToANormalForm" in profiles
assert "InferType" in profiles
# Reset current PassContext's instruments to None
tvm.transform.PassContext.current().override_instruments(None)
mod = get_test_model()
mod = tvm.relay.transform.AnnotateSpans()(mod)
mod = tvm.relay.transform.ToANormalForm()(mod)
mod = tvm.relay.transform.InferType()(mod)
profiles = pass_timing.render()
assert profiles == ""
instrument_definition_type = tvm.testing.parameter("decorator", "subclass")
def test_custom_instrument(instrument_definition_type):
class BaseTest:
def __init__(self):
self.events = []
def enter_pass_ctx(self):
self.events.append("enter ctx")
def exit_pass_ctx(self):
self.events.append("exit ctx")
def run_before_pass(self, mod, info):
self.events.append("run before " + info.name)
def run_after_pass(self, mod, info):
self.events.append("run after " + info.name)
if instrument_definition_type == "decorator":
MyTest = pass_instrument(BaseTest)
elif instrument_definition_type == "subclass":
class MyTest(BaseTest, tvm.ir.instrument.PassInstrument):
def __init__(self):
BaseTest.__init__(self)
tvm.ir.instrument.PassInstrument.__init__(self)
mod = get_test_model()
my_test = MyTest()
with tvm.transform.PassContext(instruments=[my_test]):
mod = tvm.relay.transform.InferType()(mod)
assert (
"enter ctx"
"run before InferType"
"run after InferType"
"exit ctx" == "".join(my_test.events)
)
def test_disable_pass():
@pass_instrument
class CustomPI:
def __init__(self):
self.events = []
def should_run(self, mod, info):
# Only run pass name contains "InferType"
if "InferType" not in info.name:
return False
return True
def run_before_pass(self, mod, info):
self.events.append(info.name)
mod = get_test_model()
custom_pi = CustomPI()
with tvm.transform.PassContext(instruments=[custom_pi]):
mod = tvm.relay.transform.AnnotateSpans()(mod)
mod = tvm.relay.transform.ToANormalForm()(mod)
mod = tvm.relay.transform.InferType()(mod)
assert "InferType" == "".join(custom_pi.events)
def test_multiple_instrument():
@pass_instrument
class SkipPass:
def __init__(self, skip_pass_name):
self.skip_pass_name = skip_pass_name
def should_run(self, mod, info):
if self.skip_pass_name in info.name:
return False
return True
skip_annotate = SkipPass("AnnotateSpans")
skip_anf = SkipPass("ToANormalForm")
@pass_instrument
class PrintPassName:
def __init__(self):
self.events = []
def run_before_pass(self, mod, info):
self.events.append(info.name)
mod = get_test_model()
print_pass_name = PrintPassName()
with tvm.transform.PassContext(instruments=[skip_annotate, skip_anf, print_pass_name]):
mod = tvm.relay.transform.AnnotateSpans()(mod)
mod = tvm.relay.transform.ToANormalForm()(mod)
mod = tvm.relay.transform.InferType()(mod)
assert "InferType" == "".join(print_pass_name.events)
def test_instrument_pass_counts():
@pass_instrument
class PassesCounter:
def __init__(self):
self.run_before_count = 0
self.run_after_count = 0
def __clear(self):
self.run_before_count = 0
self.run_after_count = 0
def enter_pass_ctx(self):
self.__clear()
def exit_pass_ctx(self):
self.__clear()
def run_before_pass(self, mod, info):
self.run_before_count = self.run_before_count + 1
def run_after_pass(self, mod, info):
self.run_after_count = self.run_after_count + 1
mod = get_test_model()
passes_counter = PassesCounter()
with tvm.transform.PassContext(instruments=[passes_counter]):
tvm.relay.build(mod, "llvm")
assert passes_counter.run_after_count != 0
assert passes_counter.run_after_count == passes_counter.run_before_count
# Out of pass context scope, should be reset
assert passes_counter.run_before_count == 0
assert passes_counter.run_after_count == 0
def test_list_pass_configs():
configs = tvm.transform.PassContext.list_configs()
assert len(configs) > 0
assert "relay.backend.use_auto_scheduler" in configs.keys()
assert configs["relay.backend.use_auto_scheduler"]["type"] == "IntImm"
def test_enter_pass_ctx_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit ctx")
@pass_instrument
class PIBroken(PI):
def __init__(self, id):
super().__init__(id)
def enter_pass_ctx(self):
events.append(self.id + " enter ctx")
raise RuntimeError("Just a dummy error")
pass_ctx = tvm.transform.PassContext(instruments=[PI("%1"), PIBroken("%2"), PI("%3")])
with pytest.raises(tvm.error.TVMError) as cm:
with pass_ctx:
pass
assert "Just a dummy error" in str(cm.execption)
assert "%1 enter ctx" "%2 enter ctx" "%1 exit ctx" == "".join(events)
# Make sure we get correct PassContext
cur_pass_ctx = tvm.transform.PassContext.current()
assert pass_ctx != cur_pass_ctx
assert not cur_pass_ctx.instruments
def test_enter_pass_ctx_exception_global():
@pass_instrument
class PIBroken:
def enter_pass_ctx(self):
raise RuntimeError("Just a dummy error")
cur_pass_ctx = tvm.transform.PassContext.current()
with pytest.raises(tvm.error.TVMError) as cm:
cur_pass_ctx.override_instruments([PIBroken()])
assert "Just a dummy error" in str(cm.exception)
assert not cur_pass_ctx.instruments
def test_exit_pass_ctx_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def exit_pass_ctx(self):
events.append(self.id + " exit ctx")
@pass_instrument
class PIBroken(PI):
def __init__(self, id):
super().__init__(id)
def exit_pass_ctx(self):
events.append(self.id + " exit ctx")
raise RuntimeError("Just a dummy error")
pass_ctx = tvm.transform.PassContext(instruments=[PI("%1"), PIBroken("%2"), PI("%3")])
with pytest.raises(tvm.error.TVMError) as cm:
with pass_ctx:
pass
assert "Just a dummy error" in str(cm.exception)
assert "%1 exit ctx" "%2 exit ctx" == "".join(events)
# Make sure we get correct PassContext
cur_pass_ctx = tvm.transform.PassContext.current()
assert pass_ctx != cur_pass_ctx
assert not cur_pass_ctx.instruments
def test_exit_pass_ctx_exception_global():
@pass_instrument
class PIBroken:
def exit_pass_ctx(self):
raise RuntimeError("Just a dummy error")
cur_pass_ctx = tvm.transform.PassContext.current()
with pytest.raises(tvm.error.TVMError) as cm:
cur_pass_ctx.override_instruments([PIBroken()])
cur_pass_ctx.override_instruments([PIBroken()])
assert "Just a dummy error" in str(cm.exception)
assert not cur_pass_ctx.instruments
def test_pass_exception():
events = []
@pass_instrument
class PI:
def enter_pass_ctx(self):
events.append("enter_pass_ctx")
def exit_pass_ctx(self):
events.append("exit_pass_ctx")
def should_run(self, mod, info):
events.append("should_run")
return True
def run_before_pass(self, mod, info):
events.append("run_before_pass")
def run_after_pass(self, mod, info):
events.append("run_after_pass")
@tvm.transform.module_pass(opt_level=2)
def transform(mod, ctx):
events.append("transform pass")
raise RuntimeError("Just a dummy error")
return mod
mod = get_test_model()
with pytest.raises(tvm.error.TVMError) as cm:
with tvm.transform.PassContext(instruments=[PI()]):
mod = transform(mod)
assert "Just a dummy error" in str(cm.exception)
assert (
"enter_pass_ctx"
"should_run"
"run_before_pass"
"transform pass"
"exit_pass_ctx" == "".join(events)
)
def test_should_run_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter_pass_ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit_pass_ctx")
def should_run(self, mod, info):
events.append(self.id + " should_run")
raise RuntimeError("Just a dummy error")
return True
def run_before_pass(self, mod, info):
events.append(self.id + " run_before_pass")
def run_after_pass(self, mod, info):
events.append(self.id + " run_after_pass")
@tvm.transform.module_pass(opt_level=2)
def transform(mod, ctx):
events.append("transform pass")
return mod
mod = get_test_model()
with pytest.raises(tvm.error.TVMError) as cm:
with tvm.transform.PassContext(instruments=[PI("%1"), PI("%2")]):
mod = transform(mod)
assert "Just a dummy error" in str(cm.exception)
assert (
"%1 enter_pass_ctx"
"%2 enter_pass_ctx"
"%1 should_run"
"%1 exit_pass_ctx"
"%2 exit_pass_ctx" == "".join(events)
)
def test_run_before_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter_pass_ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit_pass_ctx")
def should_run(self, mod, info):
events.append(self.id + " should_run")
return True
def run_before_pass(self, mod, info):
events.append(self.id + " run_before_pass")
raise RuntimeError("Just a dummy error")
def run_after_pass(self, mod, info):
events.append(self.id + " run_after_pass")
@tvm.transform.module_pass(opt_level=2)
def transform(mod, ctx):
events.append("transform pass")
return mod
mod = get_test_model()
with pytest.raises(tvm.error.TVMError) as cm:
with tvm.transform.PassContext(instruments=[PI("%1"), PI("%2")]):
mod = transform(mod)
assert "Just a dummy error" in str(cm.exception)
assert (
"%1 enter_pass_ctx"
"%2 enter_pass_ctx"
"%1 should_run"
"%2 should_run"
"%1 run_before_pass"
"%1 exit_pass_ctx"
"%2 exit_pass_ctx" == "".join(events)
)
def test_run_after_exception():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter_pass_ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit_pass_ctx")
def should_run(self, mod, info):
events.append(self.id + " should_run")
return True
def run_before_pass(self, mod, info):
events.append(self.id + " run_before_pass")
def run_after_pass(self, mod, info):
events.append(self.id + " run_after_pass")
raise RuntimeError("Just a dummy error")
@tvm.transform.module_pass(opt_level=2)
def transform(mod, ctx):
events.append("transform pass")
return mod
x, y = [tvm.relay.var(c, shape=(3, 4), dtype="float32") for c in "xy"]
mod = tvm.IRModule.from_expr(tvm.relay.add(x, y))
with pytest.raises(tvm.error.TVMError) as cm:
with tvm.transform.PassContext(instruments=[PI("%1"), PI("%2")]):
mod = transform(mod)
assert "Just a dummy error" in str(cm.exception)
assert (
"%1 enter_pass_ctx"
"%2 enter_pass_ctx"
"%1 should_run"
"%2 should_run"
"%1 run_before_pass"
"%2 run_before_pass"
"transform pass"
"%1 run_after_pass"
"%1 exit_pass_ctx"
"%2 exit_pass_ctx" == "".join(events)
)
def test_instrument_call_sequence():
events = []
@pass_instrument
class PI:
def __init__(self, id):
self.id = id
def enter_pass_ctx(self):
events.append(self.id + " enter_pass_ctx")
def exit_pass_ctx(self):
events.append(self.id + " exit_pass_ctx")
def should_run(self, mod, info):
events.append(" " + self.id + " should_run")
return True
def run_before_pass(self, mod, info):
events.append(" " + self.id + " run_before_pass")
def run_after_pass(self, mod, info):
events.append(" " + self.id + " run_after_pass")
@tvm.transform.module_pass(opt_level=2)
def transform1(mod, ctx):
events.append(" transform1 pass")
return mod
@tvm.transform.module_pass(opt_level=2)
def transform2(mod, ctx):
events.append(" transform2 pass")
return mod
mod = get_test_model()
with tvm.transform.PassContext(instruments=[PI("%1"), PI("%2")]):
mod = transform1(mod)
mod = transform2(mod)
assert (
"%1 enter_pass_ctx"
"%2 enter_pass_ctx"
" %1 should_run"
" %2 should_run"
" %1 run_before_pass"
" %2 run_before_pass"
" transform1 pass"
" %1 run_after_pass"
" %2 run_after_pass"
" %1 should_run"
" %2 should_run"
" %1 run_before_pass"
" %2 run_before_pass"
" transform2 pass"
" %1 run_after_pass"
" %2 run_after_pass"
"%1 exit_pass_ctx"
"%2 exit_pass_ctx" == "".join(events)
)
| 16,044 | 28.279197 | 91 | py |
tvm | tvm-main/tests/python/relay/test_pass_dynamic_to_static.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.testing import run_infer_type, create_workload
import tvm.topi.testing
import tvm.testing
def run_opt_pass(expr, opt_pass, params=None):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
if params is not None:
mod["main"] = bind_params_by_name(mod["main"], params)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def verify_func(func, data, ref_res, rtol=1e-5, atol=1e-7):
assert isinstance(data, list)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "vm", "debug"]:
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
*data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol)
@tvm.testing.uses_gpu
def test_dynamic_to_static_reshape():
def verify_reshape(shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(newshape, "float32"))
z = relay.reshape(x, relay.shape_of(y))
func = run_infer_type(relay.Function([x, y], z))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("reshape")
assert "newshape=" in zz.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=newshape).astype("float32")
ref_res = np.reshape(x_data, oshape)
verify_func(func2, [x_data, y_data], ref_res)
verify_reshape((2, 3, 4), (8, 3), (8, 3))
verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))
@tvm.testing.uses_gpu
def test_dynamic_to_static_squeeze():
def verify_squeeze(shape, axis, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(axis, "float32"))
z = relay.squeeze(x, relay.shape_of(y))
func = run_infer_type(relay.Function([x, y], z))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("squeeze")
assert "axis=" in zz.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=axis).astype("float32")
ref_res = np.squeeze(x_data, axis)
verify_func(func2, [x_data, y_data], ref_res)
verify_squeeze((1, 3, 4, 1), (0,), (3, 4, 1))
verify_squeeze((1, 3, 4, 1), (3,), (1, 3, 4))
verify_squeeze((1, 3, 4, 1), (0, 3), (3, 4))
@tvm.testing.uses_gpu
def test_dynamic_to_static_double_reshape():
def verify_reshape(shape, newshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(newshape, "float32"))
z = relay.reshape(x, relay.shape_of(y))
z = relay.reshape(z, relay.shape_of(x))
func = run_infer_type(relay.Function([x, y], z))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("reshape")
assert "newshape=" in zz.astext()
assert zz.checked_type == relay.ty.TensorType(shape, "float32")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=newshape).astype("float32")
verify_func(func2, [x_data, y_data], x_data)
verify_reshape((2, 3, 4), (8, 3))
verify_reshape((4, 7), (2, 7, 2))
@tvm.testing.uses_gpu
def test_dynamic_to_static_quad_reshape():
def verify_reshape(shape, newshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(newshape, "float32"))
z1 = relay.reshape(x, relay.shape_of(y))
z2 = relay.reshape(z1, relay.shape_of(x))
z3 = relay.reshape(z2, relay.shape_of(z1))
z4 = relay.reshape(z3, relay.shape_of(z2))
func = run_infer_type(relay.Function([x, y], z4))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("reshape")
assert "newshape=" in zz.astext()
assert zz.checked_type == relay.ty.TensorType(shape, "float32")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=newshape).astype("float32")
verify_func(func2, [x_data, y_data], x_data)
verify_reshape((2, 3, 4), (8, 3))
verify_reshape((4, 7), (2, 7, 2))
@tvm.testing.uses_gpu
def test_dynamic_to_static_tile():
def verify_tile(shape, reps, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(reps, "float32"))
z = relay.tile(x, relay.shape_of(y))
func = run_infer_type(relay.Function([x, y], z))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("tile")
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=reps).astype("float32")
ref_res = np.tile(x_data, reps)
verify_func(func2, [x_data, y_data], ref_res)
verify_tile((2, 3, 4), (2, 1, 5), (4, 3, 20))
verify_tile((4, 7), (4, 2), (16, 14))
@tvm.testing.uses_gpu
def test_dynamic_to_static_topk():
def verify_topk(k, axis, ret_type, is_ascend, dtype):
shape = (20, 100)
x = relay.var("x", relay.TensorType(shape, "float32"))
k_var = relay.var("k", relay.TensorType((), "int32"))
out = relay.topk(x, k_var, axis, ret_type, is_ascend, dtype)
if isinstance(out, relay.expr.TupleWrapper):
out = out.astuple()
func = relay.Function([x, k_var], out)
params = {"k": k}
np_data = np.random.uniform(size=shape).astype("float32")
if is_ascend:
np_indices = np.argsort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
kk = k if k >= 1 else shape[axis]
if axis == 0:
np_indices = np_indices[:kk, :]
np_values = np.zeros(np_indices.shape).astype("float32")
for i in range(shape[1]):
np_values[:, i] = np_data[np_indices[:, i], i]
else:
np_indices = np_indices[:, :kk]
np_values = np.zeros(np_indices.shape).astype("float32")
for i in range(shape[0]):
np_values[i, :] = np_data[i, np_indices[i, :]]
np_indices = np_indices.astype(dtype)
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("topk")
for target, dev in tvm.testing.enabled_targets():
if "llvm" not in target:
continue
for kind in ["graph", "vm", "debug"]:
mod = tvm.ir.IRModule.from_expr(func2)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
np_data
)
if ret_type == "both":
tvm.testing.assert_allclose(op_res[0].numpy(), np_values)
tvm.testing.assert_allclose(op_res[1].numpy(), np_indices)
elif ret_type == "values":
tvm.testing.assert_allclose(op_res.numpy(), np_values)
else:
tvm.testing.assert_allclose(op_res.numpy(), np_indices)
np.random.seed(0)
for k in [0, 1, 5]:
for axis in [0, -1, 1]:
for ret_type in ["both", "values", "indices"]:
verify_topk(k, axis, ret_type, True, "int64")
verify_topk(k, axis, ret_type, False, "float32")
@tvm.testing.uses_gpu
def test_dynamic_to_static_broadcast_to():
def verify_broadcast_to(shape, broadcast_shape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(broadcast_shape, "float32"))
z = relay.broadcast_to(x, shape=relay.shape_of(y))
func = run_infer_type(relay.Function([x, y], z))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("broadcast_to")
assert zz.checked_type == relay.ty.TensorType(broadcast_shape, "float32")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=broadcast_shape).astype("float32")
ref_res = np.broadcast_to(x_data, y_data.shape)
verify_func(func2, [x_data, y_data], ref_res)
verify_broadcast_to((3, 1), (3, 3))
@tvm.testing.uses_gpu
def test_dynamic_to_static_zeros_ones():
def verify_ones_zeros(shape, dtype):
for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
x = relay.var("x", relay.TensorType(shape, dtype))
y = op(relay.shape_of(x), dtype)
func = run_infer_type(relay.Function([x], y))
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic()),
transform.InferType(),
)
zz = func2.body
assert zz.checked_type == relay.ty.TensorType(shape, dtype)
x_data = np.random.uniform(low=1, high=1, size=shape)
ref_res = ref(x_data.shape)
verify_func(func2, [x_data], ref_res)
verify_ones_zeros((1, 2, 3), "int64")
verify_ones_zeros((9, 8, 3, 4), "float32")
@tvm.testing.uses_gpu
def test_dynamic_to_static_resize2d():
def verify_resize(shape, scale, method, layout):
if layout == "NHWC":
size = (shape[1] * scale, shape[2] * scale)
else:
size = (shape[2] * scale, shape[3] * scale)
x = relay.var("x", relay.TensorType(shape, "float32"))
size_var = relay.var("size", relay.TensorType((len(size),), "float32"))
coord_trans = "asymmetric" if method == "nearest_neighbor" else "align_corners"
z = relay.image.resize2d(
x, size_var, None, layout, method, coordinate_transformation_mode=coord_trans
)
params = {"size": np.array(size).astype("float32")}
func = run_infer_type(relay.Function([x, size_var], z))
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("image.resize2d")
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = tvm.topi.testing.resize2d_python(
x_data, (scale, scale), layout, method, coord_trans
)
for method in ["linear", "nearest_neighbor"]:
for layout in ["NCHW", "NHWC"]:
verify_resize((1, 4, 4, 4), 2, method, layout)
@tvm.testing.uses_gpu
def test_dynamic_to_static_one_hot():
def _verify(indices_shape, depth, on_value, off_value, axis, dtype):
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
depth_var = relay.const(depth)
on_value_var = relay.var("on_value", relay.TensorType((), "int32"))
off_value_var = relay.var("off_value", relay.TensorType((), "int32"))
out = relay.one_hot(indices, on_value_var, off_value_var, depth_var, axis, dtype)
params = {
"on_value": on_value,
"off_value": off_value,
}
func = relay.Function([indices, on_value_var, off_value_var], out)
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("one_hot")
indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32")
out_np = tvm.topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype)
verify_func(func2, [indices_np], out_np)
_verify((3,), 3, 1, 0, -1, "int32")
_verify((3,), 3, 1.0, 0.0, -1, "float32")
_verify((2, 2), 5, 2, -2, 0, "int32")
_verify((2, 2), 5, 0.5, -0.5, 1, "float32")
_verify((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_verify((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
@tvm.testing.uses_gpu
def test_dynamic_to_static_full():
def verify_full(fill_value, fill_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
y = relay.var("y", relay.TensorType(fill_shape, "int64"))
z = relay.full(x, relay.shape_of(y), dtype)
func = run_infer_type(relay.Function([x, y], z))
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("full")
ref_res = np.full(fill_shape, fill_value).astype(dtype)
y_data = np.random.uniform(low=-1, high=1, size=fill_shape).astype("int64")
verify_func(func2, [fill_value, y_data], ref_res)
verify_full(4, (1, 2, 3, 4), "int32")
verify_full(4.0, (1, 2, 8, 10), "float32")
def test_dynamic_to_static_upsampling():
def verify_upsampling(data_shape, scale_h_val, scale_w_val, dtype):
x = relay.var("x", relay.TensorType(data_shape, dtype))
scale_h = relay.var("scale_h", relay.TensorType((), "float32"))
scale_w = relay.var("scale_w", relay.TensorType((), "float32"))
z = relay.nn.upsampling(x, scale_h, scale_w)
params = {
"scale_h": scale_h_val,
"scale_w": scale_w_val,
}
func = run_infer_type(relay.Function([x, scale_h, scale_w], z))
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("nn.upsampling")
x_data = np.random.uniform(size=data_shape).astype(dtype)
ref_res = tvm.topi.testing.resize2d_python(
x_data, (scale_h_val, scale_w_val), "NCHW", "nearest_neighbor", "asymmetric"
)
verify_func(func2, [x_data], ref_res)
verify_upsampling((1, 16, 32, 32), 2, 2, "int8")
verify_upsampling((1, 16, 32, 32), 4, 4, "int32")
def test_dynamic_to_static_upsampling3d():
def verify_upsampling3d(data_shape, scale_d_val, scale_h_val, scale_w_val, dtype):
x = relay.var("x", relay.TensorType(data_shape, dtype))
scale_d = relay.var("scale_d", relay.TensorType((), "float32"))
scale_h = relay.var("scale_h", relay.TensorType((), "float32"))
scale_w = relay.var("scale_w", relay.TensorType((), "float32"))
z = relay.nn.upsampling3d(x, scale_d, scale_h, scale_w)
params = {
"scale_d": scale_d_val,
"scale_h": scale_h_val,
"scale_w": scale_w_val,
}
func = run_infer_type(relay.Function([x, scale_d, scale_h, scale_w], z))
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("nn.upsampling3d")
x_data = np.random.uniform(size=data_shape).astype(dtype)
ref_res = tvm.topi.testing.resize3d_python(
x_data,
(scale_d_val, scale_h_val, scale_w_val),
"NCDHW",
"nearest_neighbor",
"asymmetric",
)
verify_func(func2, [x_data], ref_res)
verify_upsampling3d((1, 1, 1, 1, 1), 2, 3, 4, "int8")
verify_upsampling3d((5, 7, 8, 10, 32), 3, 2, 2, "int8")
verify_upsampling3d((1, 4, 2, 5, 3), 5, 4, 3, "int32")
def test_dynamic_to_static_pad():
def verify_pad(data_shape, pad_width_val, pad_val, dtype):
x = relay.var("x", relay.TensorType(data_shape, dtype))
pad_width = relay.var(
"pad_width", relay.TensorType((len(pad_width_val), len(pad_width_val[0])), "int32")
)
z = relay.nn.pad(x, pad_width, pad_val)
func = run_infer_type(relay.Function([x, pad_width], z))
params = {"pad_width": np.array(pad_width_val)}
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("nn.pad")
x_data = np.random.uniform(size=data_shape).astype(dtype)
ref_res = np.pad(
x_data, pad_width_val, "constant", constant_values=(((pad_val,) * 2),) * len(data_shape)
)
verify_func(func2, [x_data], ref_res)
verify_pad((4, 10, 7, 7), ((1, 1), (2, 2), (3, 3), (4, 4)), 2.0, "int32")
verify_pad((2, 7), ((1, 4), (2, 2)), 4.0, "float64")
def test_dynamic_to_static_strided_slice():
def verify(
dshape,
begin_val,
end_val,
strides_val,
output,
slice_mode="end",
test_ref=True,
dtype="int32",
):
x = relay.var("x", relay.TensorType(dshape, "float32"))
ndim = len(dshape)
begin_val = begin_val if begin_val else [0] * ndim
end_val = end_val if end_val else list(dshape)
if strides_val:
if len(strides_val) == 1:
strides_val = strides_val * ndim
else:
strides_val = [1] * ndim
# target numpy result
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.strided_slice_python(
x_data, begin_val, end_val, strides_val, slice_mode
)
data = [x_data, np.array(begin_val), np.array(end_val)]
begin = relay.var("begin", relay.TensorType((len(begin_val),), dtype))
end = relay.var("end", relay.TensorType((len(end_val),), dtype))
func_params = [x, begin, end]
if strides_val:
data.append(np.array(strides_val))
strides = relay.var("strides", relay.TensorType((len(strides_val),), dtype))
z = relay.strided_slice(x, begin=begin, end=end, strides=strides, slice_mode=slice_mode)
func_params.append(strides)
else:
z = relay.strided_slice(x, begin=begin, end=end, slice_mode=slice_mode)
func = relay.Function(func_params, z)
params = {"begin": begin_val, "end": end_val, "strides": strides_val}
func = run_infer_type(func)
func2 = run_opt_pass(
run_opt_pass(func, transform.DynamicToStatic(), params), transform.InferType()
)
assert isinstance(func2.body, relay.Call)
assert func2.body.op == relay.op.get("strided_slice")
verify_func(func2, [x_data], ref_res)
verify((1, 3, 10, 10), [0, 0, 0, 0], [1, 3, 10, 10], [1], (0, 3, 10, 10), dtype="int64")
verify(
(1, 224, 224, 3),
[0, 20, 20, 0],
[1, 140, 140, 3],
[1, 1, 1, 1],
(1, 120, 120, 3),
dtype="int64",
)
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], (1, 3, 3), dtype="int16")
verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4, 4], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], (1, 4, 3))
verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1], (1, 2, 3))
verify(
(3, 4, 3), [1, 0, 0], [3, -1, 3], [1, 1, 1], (2, 4, 3), slice_mode="size", test_ref=False
)
verify((3, 4, 3), [1, 0, 0], [-1, 2, 3], [1, 1, 1], (2, 2, 3), slice_mode="size", test_ref=True)
@tvm.testing.uses_gpu
def test_dyn_to_static_sparse_to_dense():
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
default_value_data = np.array(default_value)
output_shape_data = np.array(output_shape)
a = relay.var(
"a", relay.TensorType(sparse_indices_data.shape, str(sparse_indices_data.dtype))
)
b = relay.var(
"b", relay.TensorType(sparse_values_data.shape, str(sparse_values_data.dtype))
)
output_shape_const = relay.const(output_shape_data)
if default_value is None:
args = [a, b]
d = relay.sparse_to_dense(a, output_shape_const, b)
else:
c = relay.var(
"c", relay.TensorType(default_value_data.shape, str(default_value_data.dtype))
)
args = [a, b, c]
d = relay.sparse_to_dense(a, output_shape_const, b, c)
zz = run_infer_type(d)
assert len(zz.checked_type.shape) == len(output_shape)
func = relay.Function(args, d)
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
assert isinstance(func2.body, relay.Call)
assert func2.body.op == relay.op.get("sparse_to_dense")
if default_value is None:
arguments = [sparse_indices_data, sparse_values_data]
else:
arguments = [sparse_indices_data, sparse_values_data, default_value_data]
verify_func(func2, arguments, xpected)
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0]) # scalar
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3]) # vector
verify_sparse_to_dense(
[[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]
) # nXd
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]],
) # nXd
verify_sparse_to_dense(
[0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]
) # floats
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0]) # default value not specified
@tvm.testing.uses_gpu
def test_dynamic_to_static_dynamic_rank():
def verify_full(fill_value, fill_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
y = relay.var("y", relay.TensorType(fill_shape, "int64"))
shape = relay.shape_of(y)
shape = relay.strided_slice(shape, [0], relay.shape_of(shape))
z = relay.full(x, shape, dtype)
func = relay.Function([x, y], z)
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("full")
ref_res = np.full(fill_shape, fill_value).astype(dtype)
y_data = np.random.uniform(low=-1, high=1, size=fill_shape).astype("int64")
verify_func(func2, [fill_value, y_data], ref_res)
verify_full(4, (1, 2, 3, 4), "int32")
verify_full(4.0, (1, 2, 8, 10), "float32")
@tvm.testing.uses_gpu
def test_dynamic_to_static_dynamic_if():
x = relay.var("x", relay.TensorType((2, 2), "int64"))
cond = relay.const(1)
iff = relay.If(cond, relay.reshape(x, [1, 4]), relay.reshape(x, (4, 1)))
func = relay.Function([x], iff)
func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType())
zz = func2.body
assert isinstance(zz, relay.Call)
assert zz.op == relay.op.get("reshape")
x_data = np.random.uniform(low=-1, high=1, size=(2, 2)).astype("int64")
verify_func(func2, [x_data], x_data.reshape(1, 4))
if __name__ == "__main__":
tvm.testing.main()
| 25,759 | 39.124611 | 100 | py |
tvm | tvm-main/tests/python/relay/test_op_floordiv.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import te
import tvm.testing
from tvm.script import tir
def test_floor_div_op():
target = "llvm"
dev = tvm.device(target)
N = 100
divisor = 5
@tir.prim_func
def func_64(
A: tir.Buffer((N + 100, 2), "int64"),
B: tir.Buffer((N), "int64"),
C: tir.Buffer((N), "int64"),
):
for i in tir.serial(N):
with tir.block("A"):
v_i = tir.axis.spatial(N, i)
A[v_i, 0] = tir.floordiv(C[v_i] - tir.max_value("int64"), divisor)
A[v_i, 1] = tir.floormod(C[v_i] - tir.max_value("int64"), divisor)
A[v_i + 100, 0] = tir.floordiv(B[v_i], divisor)
A[v_i + 100, 1] = tir.floormod(B[v_i], divisor)
@tir.prim_func
def func_32(
A: tir.Buffer((N + 100, 2), "int32"),
B: tir.Buffer((N), "int32"),
C: tir.Buffer((N), "int32"),
):
for i in tir.serial(N):
with tir.block("A"):
v_i = tir.axis.spatial(N, i)
A[v_i, 0] = tir.floordiv(C[v_i] - tir.max_value("int32"), divisor)
A[v_i, 1] = tir.floormod(C[v_i] - tir.max_value("int32"), divisor)
A[v_i + 100, 0] = tir.floordiv(B[v_i], divisor)
A[v_i + 100, 1] = tir.floormod(B[v_i], divisor)
@tir.prim_func
def func_16(
A: tir.Buffer((N + 100, 2), "int16"),
B: tir.Buffer((N), "int16"),
C: tir.Buffer((N), "int16"),
):
for i in tir.serial(N):
with tir.block("A"):
v_i = tir.axis.spatial(N, i)
A[v_i, 0] = tir.floordiv(C[v_i] - tir.max_value("int16"), divisor)
A[v_i, 1] = tir.floormod(C[v_i] - tir.max_value("int16"), divisor)
A[v_i + 100, 0] = tir.floordiv(B[v_i], divisor)
A[v_i + 100, 1] = tir.floormod(B[v_i], divisor)
@tir.prim_func
def func_8(
A: tir.Buffer((N + 100, 2), "int8"), B: tir.Buffer((N), "int8"), C: tir.Buffer((N), "int8")
):
for i in tir.serial(N):
with tir.block("A"):
v_i = tir.axis.spatial(N, i)
A[v_i, 0] = tir.floordiv(C[v_i] - tir.max_value("int8"), divisor)
A[v_i, 1] = tir.floormod(C[v_i] - tir.max_value("int8"), divisor)
A[v_i + 100, 0] = tir.floordiv(B[v_i], divisor)
A[v_i + 100, 1] = tir.floormod(B[v_i], divisor)
for opfunc, type in [
(func_8, "int8"),
(func_16, "int16"),
(func_32, "int32"),
(func_64, "int64"),
]:
built = tvm.build(opfunc, target=target)
x_data = np.random.randint(te.min_value(type), te.max_value(type), size=(100), dtype=type)
y_data = np.asarray([i for i in range(N)], dtype=type)
a_dev = tvm.nd.empty([N + 100, 2], type, dev)
b_dev = tvm.nd.array(x_data, dev)
c_dev = tvm.nd.array(y_data, dev)
built(a_dev, b_dev, c_dev)
a = a_dev.numpy()
b = b_dev.numpy()
c = c_dev.numpy()
# python modulo behaves a bit different to tvm floormod for negative numbers
for i in range(N + 100):
if a[i, 1] < 0:
a[i, 1] = divisor + a[i, 1]
np.testing.assert_array_equal(a[:100, 0], (c - te.max_value(type)) // divisor)
np.testing.assert_array_equal(a[:100, 1], (c - te.max_value(type)) % divisor)
np.testing.assert_array_equal(a[100 : N + 100, 0], b // divisor)
np.testing.assert_array_equal(a[100 : N + 100, 1], b % divisor)
if __name__ == "__main__":
tvm.testing.main()
| 4,418 | 36.449153 | 99 | py |
tvm | tvm-main/tests/python/relay/test_ir_nodes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" test ir"""
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.tir.expr import *
from tvm.relay import op
import numpy as np
def check_json_roundtrip(node):
json_str = tvm.ir.save_json(node)
back = tvm.ir.load_json(json_str)
assert tvm.ir.structural_equal(back, node, map_free_vars=True)
# Span
def test_span():
span = relay.Span(None, 1, 2, 3, 4)
assert span.source_name == None
assert span.line == 1
assert span.end_line == 2
assert span.column == 3
assert span.end_column == 4
assert span.same_as(span)
assert span == span
assert isinstance(span, relay.base.Span)
str(span)
# span is not a node so we can't use graph_equal
# to test the round trip
back = tvm.ir.load_json(tvm.ir.save_json(span))
assert back.source_name == span.source_name
assert back.line == span.line
assert back.end_line == span.end_line
assert back.column == span.column
assert back.end_column == span.end_column
def test_constant():
arr = tvm.nd.array(10)
const = relay.Constant(arr)
assert const.data == arr
assert const.span == None
str(const)
check_json_roundtrip(const)
def test_tuple():
fields = tvm.runtime.convert([])
tup = relay.Tuple(fields)
assert tup.fields == fields
assert tup.span == None
str(tup)
check_json_roundtrip(tup)
def test_local_var():
name_hint = "s"
lv = relay.Var(name_hint)
assert lv.name_hint == name_hint
assert lv.type_annotation is None
# assert lv.span == None todo(@jroesch): what do we do about spans
str(lv)
check_json_roundtrip(lv)
t1 = relay.ty.TensorType((), "float")
lv = relay.Var(name_hint, t1)
assert lv.name_hint == name_hint
assert lv.type_annotation == t1
def test_global_var():
name_hint = "g"
gv = relay.GlobalVar(name_hint)
gv.name_hint == name_hint
# assert lv.span == None todo(@jroesch): what do we do about spans
str(gv)
check_json_roundtrip(gv)
def test_function():
param_names = ["a", "b", "c", "d"]
params = tvm.runtime.convert([relay.Var(n) for n in param_names])
ret_type = relay.TupleType(tvm.runtime.convert([]))
body = relay.Tuple(tvm.runtime.convert([]))
type_params = tvm.runtime.convert([])
fn = relay.Function(params, body, ret_type, type_params)
fn = fn.with_attr("test_attribute", "value")
fn = fn.with_attr("test_attribute1", "value1")
assert fn.params == params
assert fn.body == body
assert fn.type_params == type_params
assert fn.span == None
assert fn.attrs["test_attribute"] == "value"
assert fn.attrs["test_attribute1"] == "value1"
str(fn)
check_json_roundtrip(fn)
def test_function_attrs():
param_names = ["a", "b", "c", "d"]
params = tvm.runtime.convert([relay.var(n, shape=(5, 2)) for n in param_names])
ret_type = relay.TupleType(tvm.runtime.convert([]))
body = relay.Tuple(tvm.runtime.convert([]))
type_params = tvm.runtime.convert([])
fn = relay.Function(params, body, ret_type, type_params)
model_params = {}
for param in params[:1]:
cty = param.type_annotation
tensor = np.random.rand(*[int(sh) for sh in cty.shape]).astype(cty.dtype)
model_params[param] = relay.Constant(tvm.nd.array(tensor))
fn = fn.with_attr("__params__", model_params)
assert fn.params == params
assert fn.body == body
assert fn.type_params == type_params
assert fn.span == None
str(fn)
check_json_roundtrip(fn)
json_str = tvm.ir.save_json(fn)
fn_after = tvm.ir.load_json(json_str)
model_params_after = fn_after.attrs["__params__"]
after_keys = [item[0] for item in model_params_after.items()]
for key1, key2 in zip(model_params, after_keys):
assert key1.name_hint == key2.name_hint
p1 = model_params[key1]
p2 = model_params_after[key2]
np.testing.assert_allclose(p1.data.numpy(), p2.data.numpy())
def test_call():
op = relay.Var("f")
arg_names = ["a", "b", "c", "d"]
args = tvm.runtime.convert([relay.Var(n) for n in arg_names])
call = relay.Call(op, args, None, None)
assert call.op == op
assert call.args == args
assert call.span == None
str(call)
check_json_roundtrip(call)
def test_let():
lv = relay.Var("x")
ty = None
arr = tvm.nd.array(10)
value = relay.Constant(arr)
# I would prefer that the order of arguments
# matches syntax let x: t = v in b
let = relay.Let(lv, value, lv)
assert let.var == lv
assert let.value == value
assert let.body == lv
assert let.span == None
str(let)
check_json_roundtrip(let)
def test_if():
cond = relay.Var("cond")
left = relay.Var("left")
right = relay.Var("right")
ife = relay.If(cond, left, right)
assert ife.cond == cond
assert ife.true_branch == left
assert ife.false_branch == right
assert ife.span == None
str(ife)
check_json_roundtrip(ife)
def test_tuple_get_item():
tup = relay.Var("tuple")
get = relay.TupleGetItem(tup, 1)
assert get.tuple_value == tup
assert get.index == 1
str(get)
check_json_roundtrip(get)
def test_op():
add = op.op.get("add")
check_json_roundtrip(add)
def test_conv2d_attrs():
data = relay.var("data", shape=(1, 3, 224, 224))
param = relay.var("param", shape=(64, 3, 7, 7))
out = op.nn.conv2d(data, param, strides=(2, 2), padding=(3, 3), channels=64, kernel_size=(7, 7))
check_json_roundtrip(out)
# Commented due to weird memory allocation issue
# def test_large_grpah():
# Test large graphs to avoid stack overflow in serialize/deserialize
# size = int(1e5)
# var = [relay.var("var_" + str(i), shape=(2, 3)) for i in range(size)]
# body = var[-1]
# for i in range(size, 1, -1):
# body = relay.Let(var[i - 1], op.add(var[i - 2], var[i - 2]), body)
# func = relay.Function([var[0]], body)
# check_json_roundtrip(func)
if __name__ == "__main__":
test_span()
test_constant()
test_tuple()
test_local_var()
test_global_var()
test_function()
test_function_attrs()
test_call()
test_let()
test_if()
test_tuple_get_item()
test_op()
test_conv2d_attrs()
# Commented due to weird memory allocation issue
# test_large_grpah()
| 7,121 | 28.799163 | 100 | py |
tvm | tvm-main/tests/python/relay/test_ir_module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for module functionality."""
import tvm
from tvm import te
from tvm import relay
from tvm.relay.prelude import Prelude
def constructor_list(p):
list_ctors = p.mod.get_type("List")
optional_ctors = p.mod.get_type("Option")
nat_ctors = p.mod.get_type("nat")
rose_ctors = p.mod.get_type("Tree")
return list_ctors[1:] + optional_ctors[1:] + nat_ctors[1:] + rose_ctors[1:]
def adt_list(p):
list_ctors = p.mod.get_type("List")
optional_ctors = p.mod.get_type("Option")
nat_ctors = p.mod.get_type("nat")
rose_ctors = p.mod.get_type("Tree")
return list_ctors[:1] + optional_ctors[:1] + nat_ctors[:1] + rose_ctors[:1]
def test_constructor_tag_round_trip():
mod1 = tvm.IRModule()
p1 = Prelude(mod1)
p1.mod.import_from_std("nat.rly")
mod2 = tvm.IRModule()
p2 = Prelude(mod2)
p2.mod.import_from_std("nat.rly")
# ensure hashes match across modules
ctors1 = constructor_list(p1)
ctors2 = constructor_list(p2)
for i in range(len(ctors1)):
tag = ctors1[i].tag
ctor = mod2.get_constructor(tag)
assert ctor == ctors2[i]
assert ctor.name_hint == ctors1[i].name_hint
def test_constructor_tag_differences():
# ensure that if we have the type data for a given ADT, the tags
# for the constructors of the *same ADT* are simple offsets from
# each other
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
adts = adt_list(p)
for adt in adts:
data = mod[adt]
for i in range(len(data.constructors) - 1):
ctor1 = data.constructors[i]
ctor2 = data.constructors[i + 1]
assert ctor2.tag - ctor1.tag == 1
# make sure there is something present at the MSB
assert ctor1.tag - i != 0
assert ctor2.tag - (i + 1) != 0
| 2,639 | 32.846154 | 79 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_requantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import relay
from tvm.contrib import graph_executor
roundings = ["UPWARD", "TONEAREST"]
compute_dtypes = ["float32", "float64", "int64"]
out_dtypes = ["int8", "int16"]
def verify(mod, goldens, target="llvm"):
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, target, params=None)
golden_data, golden_output = goldens
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
rt_mod.set_input("input_data", golden_data)
rt_mod.set_input(**params)
rt_mod.run()
res = rt_mod.get_output(0).numpy()
np.testing.assert_equal(res, golden_output)
def get_mod(
data_shape,
data_dtype,
out_dtype,
input_scale,
output_scale,
input_zero_point=0,
output_zero_point=0,
rounding="None",
compute_dtype="None",
axis=0,
):
input_data = relay.var("input_data", shape=data_shape, dtype=data_dtype)
if isinstance(input_scale, float):
input_scale_expr = relay.const(input_scale, "float32")
else:
input_scale_expr = relay.const(np.array(input_scale).astype("float32"))
if isinstance(input_zero_point, float):
input_zero_point_expr = relay.const(input_zero_point, "int32")
else:
input_zero_point_expr = relay.const(np.array(input_zero_point).astype("int32"))
mod = relay.qnn.op.requantize(
input_data,
input_scale=input_scale_expr,
input_zero_point=input_zero_point_expr,
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
axis=axis,
rounding=rounding,
compute_dtype=compute_dtype,
out_dtype=out_dtype,
)
mod = relay.Function(relay.analysis.free_vars(mod), mod)
mod = tvm.IRModule.from_expr(mod)
return mod
def test_same_scale():
# Have same scales, everything within range
golden_data = np.arange(-100, 100, 1).astype("int32")
golden_output = golden_data
for compute_dtype in compute_dtypes:
for rounding in roundings:
for qnn_out_dtype in out_dtypes:
mod = get_mod(
data_shape=(200,),
data_dtype="int32",
out_dtype=qnn_out_dtype,
input_scale=0.5,
output_scale=0.5,
rounding=rounding,
compute_dtype=compute_dtype,
)
assert "right_shift" not in mod.astext()
verify(mod, (golden_data, golden_output))
def test_scalar_same_scale():
# Have same scales, everything within range
golden_data = np.array(-10).astype("int32")
golden_output = golden_data
for compute_dtype in compute_dtypes:
for rounding in roundings:
for qnn_out_dtype in out_dtypes:
mod = get_mod(
data_shape=(),
data_dtype="int32",
out_dtype=qnn_out_dtype,
input_scale=0.5,
output_scale=0.5,
rounding=rounding,
compute_dtype=compute_dtype,
)
assert "right_shift" not in mod.astext()
verify(mod, (golden_data, golden_output))
def test_downscale():
for compute_dtype in compute_dtypes:
for rounding in roundings:
for qnn_out_dtype in out_dtypes:
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype=qnn_out_dtype,
input_scale=1,
output_scale=16,
rounding=rounding,
compute_dtype=compute_dtype,
)
# Try positive values
# 8 corresponds to 0.5, resulting in 1
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.repeat([0, 1, 2], [8, 16, 8])
verify(mod, (golden_data, golden_output))
# Try negative values
# -8 corresponds to -0.5. For UPWARD, this is 0
golden_data = np.arange(0, -32, -1).astype("int32")
if rounding == "UPWARD":
golden_output = np.repeat([0, -1, -2], [9, 16, 7])
else:
golden_output = np.repeat([0, -1, -2], [8, 16, 8])
verify(mod, (golden_data, golden_output))
# Try a different scale
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype=qnn_out_dtype,
input_scale=1,
output_scale=4,
rounding=rounding,
)
# Try positive values
# 2I corresponds to 0.5, resulting in 1
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.repeat([0, 1, 2, 3, 4, 5, 6, 7, 8], [2, 4, 4, 4, 4, 4, 4, 4, 2])
verify(mod, (golden_data, golden_output))
# Try negative values
# -8 corresponds to -0.5. For UPWARD, this is 0
golden_data = np.arange(0, -32, -1).astype("int32")
if rounding == "UPWARD":
golden_output = np.repeat(
[0, -1, -2, -3, -4, -5, -6, -7, -8], [3, 4, 4, 4, 4, 4, 4, 4, 1]
)
else:
golden_output = np.repeat(
[0, -1, -2, -3, -4, -5, -6, -7, -8], [2, 4, 4, 4, 4, 4, 4, 4, 2]
)
verify(mod, (golden_data, golden_output))
# Try uint8 out_dtype
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="uint8",
input_scale=1,
output_scale=16,
rounding=rounding,
)
# Try positive values
# 8 corresponds to 0.5, resulting in 1
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.repeat([0, 1, 2], [8, 16, 8])
verify(mod, (golden_data, golden_output))
# Try uint8 in_dtyope and uint8 out_dtype
mod = get_mod(
data_shape=(32,),
data_dtype="uint8",
out_dtype="uint8",
input_scale=1,
output_scale=16,
rounding=rounding,
)
# Try positive values
# 8 corresponds to 0.5, resulting in 1
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.repeat([0, 1, 2], [8, 16, 8])
verify(mod, (golden_data, golden_output))
def test_upscale():
for compute_dtype in compute_dtypes:
for rounding in roundings:
for qnn_out_dtype in out_dtypes:
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype=qnn_out_dtype,
input_scale=2,
output_scale=1,
rounding=rounding,
compute_dtype=compute_dtype,
)
# Try positive values
# 8 corresponds to 0.5, resulting in 1
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.multiply(2, golden_data)
verify(mod, (golden_data, golden_output))
# Try negative values
# -8 corresponds to -0.5. For UPWARD, this is 0
golden_data = np.arange(0, -32, -1).astype("int32")
golden_output = np.multiply(2, golden_data)
verify(mod, (golden_data, golden_output))
def test_non_power_of_two():
for compute_dtype in compute_dtypes:
for rounding in roundings:
for qnn_out_dtype in out_dtypes:
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype=qnn_out_dtype,
input_scale=1,
output_scale=3,
rounding=rounding,
compute_dtype=compute_dtype,
)
# Try positive values
golden_data = np.multiply(np.arange(0, 32, 1).astype("int32"), 3)
golden_output = np.arange(0, 32, 1)
verify(mod, (golden_data, golden_output))
# Try negative values
golden_data = np.multiply(np.arange(0, -32, -1).astype("int32"), 3)
golden_output = np.arange(0, -32, -1)
verify(mod, (golden_data, golden_output))
# Try a different scale
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype=qnn_out_dtype,
input_scale=3,
output_scale=1,
rounding=rounding,
)
# Try positive values
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.multiply(golden_data, 3)
verify(mod, (golden_data, golden_output))
# Try negative values
golden_data = np.arange(0, -32, -1).astype("int32")
golden_output = np.multiply(golden_data, 3)
verify(mod, (golden_data, golden_output))
def test_saturation_int8():
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(16,),
data_dtype="int32",
out_dtype="int8",
input_scale=0.5,
output_scale=0.5,
rounding=rounding,
compute_dtype=compute_dtype,
)
golden_data = np.arange(0, 16, 1).astype("int32")
golden_data = np.add(120, golden_data)
output = np.array(
[120, 121, 122, 123, 124, 125, 126, 127, 127, 127, 127, 127, 127, 127, 127, 127]
)
golden_output = output
verify(mod, (golden_data, golden_output))
# Try negative numbers
golden_data = np.arange(0, -16, -1).astype("int32")
golden_data = np.add(-120, golden_data)
output = np.array(
[
-120,
-121,
-122,
-123,
-124,
-125,
-126,
-127,
-128,
-128,
-128,
-128,
-128,
-128,
-128,
-128,
]
)
golden_output = output
verify(mod, (golden_data, golden_output))
def test_saturation_int16():
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(16,),
data_dtype="int32",
out_dtype="int16",
input_scale=0.5,
output_scale=0.5,
rounding=rounding,
compute_dtype=compute_dtype,
)
golden_data = np.arange(0, 16, 1).astype("int32")
golden_data = np.add(32760, golden_data)
output = np.array(
[
32760,
32761,
32762,
32763,
32764,
32765,
32766,
32767,
32767,
32767,
32767,
32767,
32767,
32767,
32767,
32767,
]
)
golden_output = output
verify(mod, (golden_data, golden_output))
# Try negative numbers
golden_data = np.arange(0, -16, -1).astype("int32")
golden_data = np.add(-32760, golden_data)
output = np.array(
[
-32760,
-32761,
-32762,
-32763,
-32764,
-32765,
-32766,
-32767,
-32768,
-32768,
-32768,
-32768,
-32768,
-32768,
-32768,
-32768,
]
)
golden_output = output
verify(mod, (golden_data, golden_output))
def test_zero_point():
# Output zero point
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype="int8",
input_scale=1,
output_scale=16,
output_zero_point=1,
rounding=rounding,
compute_dtype=compute_dtype,
)
# Try positive values
# 8 corresponds to 0.5, resulting in 1
golden_data = np.arange(0, 32, 1).astype("int32")
golden_output = np.repeat([0, 1, 2], [8, 16, 8])
golden_output = np.add(1, golden_output)
verify(mod, (golden_data, golden_output))
# Try negative values
# -8 corresponds to -0.5. For UPWARD, this is 0
golden_data = np.arange(-32, -64, -1).astype("int32")
if rounding == "UPWARD":
golden_output = np.repeat([-2, -3, -4], [9, 16, 7])
else:
golden_output = np.repeat([-2, -3, -4], [8, 16, 8])
golden_output = np.add(1, golden_output)
verify(mod, (golden_data, golden_output))
# Input zero point
for compute_dtype in compute_dtypes:
for rounding in roundings:
for qnn_out_dtype in out_dtypes:
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype=qnn_out_dtype,
input_scale=1,
output_scale=16,
input_zero_point=16,
rounding=rounding,
compute_dtype=compute_dtype,
)
# Try positive values
golden_data = np.arange(32, 64, 1).astype("int32")
golden_output = np.repeat([2, 3, 4], [8, 16, 8])
golden_output = np.subtract(golden_output, 1)
verify(mod, (golden_data, golden_output))
# Try negative values
golden_data = np.arange(-32, -64, -1).astype("int32")
if rounding == "UPWARD":
golden_output = np.repeat([-2, -3, -4], [9, 16, 7])
else:
golden_output = np.repeat([-2, -3, -4], [8, 16, 8])
golden_output = np.subtract(golden_output, 1)
verify(mod, (golden_data, golden_output))
def test_per_channel_same_scale():
# Have same scales, everything within range
golden_data = np.arange(-5, 5, 1).astype("int32").reshape((5, 2))
golden_output = golden_data
for compute_dtype in compute_dtypes:
for rounding in roundings:
for qnn_out_dtype in out_dtypes:
mod = get_mod(
data_shape=(5, 2),
data_dtype="int32",
out_dtype=qnn_out_dtype,
input_scale=[0.5, 0.5],
output_scale=0.5,
axis=1,
rounding=rounding,
compute_dtype=compute_dtype,
)
verify(mod, (golden_data, golden_output))
# Change axis
golden_data = np.arange(-10, 10, 1).astype("int32").reshape((2, 2, 5))
golden_output = golden_data
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(2, 2, 5),
data_dtype="int32",
out_dtype="int8",
input_scale=[0.5, 0.5],
output_scale=0.5,
axis=1,
rounding=rounding,
compute_dtype=compute_dtype,
)
verify(mod, (golden_data, golden_output))
def test_per_channel_different_scale():
# Have same scales, everything within range
golden_data = np.arange(-5, 5, 1).astype("int32").reshape((5, 2))
golden_output = np.array([-5, -2, -3, -1, -1, 0, 1, 1, 3, 2]).reshape((5, 2))
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(5, 2),
data_dtype="int32",
out_dtype="int8",
input_scale=[0.5, 0.25],
output_scale=0.5,
axis=1,
rounding=rounding,
compute_dtype=compute_dtype,
)
verify(mod, (golden_data, golden_output))
# Change axis
golden_data = np.arange(-20, 20, 2).astype("int32").reshape((2, 2, 5))
golden_output = np.array(
[-20, -18, -16, -14, -12, -5, -4, -3, -2, -1, 0, 2, 4, 6, 8, 5, 6, 7, 8, 9]
).reshape((2, 2, 5))
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(2, 2, 5),
data_dtype="int32",
out_dtype="int8",
input_scale=[0.5, 0.25],
output_scale=0.5,
axis=1,
rounding=rounding,
compute_dtype=compute_dtype,
)
verify(mod, (golden_data, golden_output))
# Have input scale > output scale
golden_data = np.arange(-5, 5, 1).astype("int32").reshape((5, 2))
golden_output = np.array([-10, -2, -6, -1, -2, 0, 2, 1, 6, 2]).reshape((5, 2))
for compute_dtype in compute_dtypes:
for rounding in roundings:
mod = get_mod(
data_shape=(5, 2),
data_dtype="int32",
out_dtype="int8",
input_scale=[1.0, 0.25],
output_scale=0.5,
axis=1,
rounding=rounding,
compute_dtype=compute_dtype,
)
verify(mod, (golden_data, golden_output))
def test_default_cfg_and_no_args():
for qnn_out_dtype in out_dtypes:
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype=qnn_out_dtype,
input_scale=1,
output_scale=16,
)
golden_data = np.arange(0, -32, -1).astype("int32")
golden_output = np.repeat([0, -1, -2], [9, 16, 7])
verify(mod, (golden_data, golden_output))
def test_non_default_cfg_and_no_args():
for rounding_cfg in roundings:
for qnn_out_dtype in out_dtypes:
with relay.qnn.op.requantize_config(rounding=rounding_cfg):
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype=qnn_out_dtype,
input_scale=1,
output_scale=16,
)
golden_data = np.arange(0, -32, -1).astype("int32")
if rounding_cfg == "UPWARD":
golden_output = np.repeat([0, -1, -2], [9, 16, 7])
else:
golden_output = np.repeat([0, -1, -2], [8, 16, 8])
verify(mod, (golden_data, golden_output))
def test_default_cfg_and_args():
for rounding in roundings:
for qnn_out_dtype in out_dtypes:
with relay.qnn.op.requantize_config(rounding="UPWARD"):
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype=qnn_out_dtype,
input_scale=1,
output_scale=16,
rounding=rounding,
)
golden_data = np.arange(0, -32, -1).astype("int32")
if rounding == "UPWARD":
golden_output = np.repeat([0, -1, -2], [9, 16, 7])
else:
golden_output = np.repeat([0, -1, -2], [8, 16, 8])
verify(mod, (golden_data, golden_output))
def test_non_default_cfg_and_args():
for rounding_arg in roundings:
for rounding_cfg in roundings:
for qnn_out_dtype in out_dtypes:
with relay.qnn.op.requantize_config(rounding=rounding_cfg):
mod = get_mod(
data_shape=(32,),
data_dtype="int32",
out_dtype=qnn_out_dtype,
input_scale=1,
output_scale=16,
rounding=rounding_arg,
)
golden_data = np.arange(0, -32, -1).astype("int32")
if rounding_arg == "UPWARD":
golden_output = np.repeat([0, -1, -2], [9, 16, 7])
else:
golden_output = np.repeat([0, -1, -2], [8, 16, 8])
verify(mod, (golden_data, golden_output))
if __name__ == "__main__":
test_same_scale()
test_scalar_same_scale()
test_downscale()
test_upscale()
test_non_power_of_two()
test_saturation_int8()
test_saturation_int16()
test_zero_point()
test_per_channel_same_scale()
test_per_channel_different_scale()
test_default_cfg_and_no_args()
test_non_default_cfg_and_no_args()
test_default_cfg_and_args()
test_non_default_cfg_and_args()
| 23,003 | 34.4453 | 99 | py |
tvm | tvm-main/tests/python/relay/test_pass_merge_compiler_regions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for merge compiler regions."""
import tvm
from tvm import relay
import tvm.relay.transform as transform
from tvm.relay.op.annotation import compiler_begin, compiler_end
from tvm.relay.testing import run_opt_pass
def test_diamond_graph_fanouts():
"""
This tests that the data dependencies present in a diamond-shaped
graph are correctly resolved by the merging pass.
O = supported by target
X = not supported by target
O O
/ \\ / \\
O X --> O + + X
\\ / \\ /
O O
Note that we can't just merge the three supported operators together,
otherwise both subgraphs would depend on the other.
"""
def diamond_graph_fanouts():
data = relay.var("data", shape=(10, 10))
cb_1 = compiler_begin(data, "test")
O_1 = relay.abs(cb_1)
ce_1 = compiler_end(O_1, "test")
ce_2 = compiler_end(O_1, "test")
cb_2 = compiler_begin(ce_1, "test")
cb_3 = compiler_begin(ce_2, "default")
O_2 = relay.nn.relu(cb_2)
ce_3 = compiler_end(O_2, "test")
X = relay.tanh(cb_3)
ce_4 = compiler_end(X, "default")
cb_4 = compiler_begin(ce_3, "test")
cb_5 = compiler_begin(ce_4, "test")
O_3 = relay.add(cb_4, cb_5)
ce_5 = compiler_end(O_3, "test")
diamond = relay.Function([data], ce_5)
return diamond
def expected():
data = relay.var("data", shape=(10, 10))
cb_1 = compiler_begin(data, "test")
O_1 = relay.abs(cb_1)
ce_2 = compiler_end(O_1, "test")
O_2 = relay.nn.relu(O_1)
ce_3 = compiler_end(O_2, "test")
cb_3 = compiler_begin(ce_2, "default")
X = relay.tanh(cb_3)
ce_4 = compiler_end(X, "default")
cb_4 = compiler_begin(ce_3, "test")
cb_5 = compiler_begin(ce_4, "test")
O_3 = relay.add(cb_4, cb_5)
ce_5 = compiler_end(O_3, "test")
func = relay.Function([data], ce_5)
return func
result = run_opt_pass(diamond_graph_fanouts(), relay.transform.MergeCompilerRegions())
golden = run_opt_pass(expected(), relay.transform.InferType())
assert tvm.ir.structural_equal(result, golden)
def test_example_graph():
"""This tests the merging algorithm on the example used in the RFC.
See the RFC here: https://discuss.tvm.apache.org/t/relay-improved-graph-partitioning-algorithm/5830
Blue nodes are adds (target: test), red nodes are subtracts (target: default).
"""
def annotated():
in_1 = relay.var("in_1", shape=(10, 10), dtype="float32")
in_2 = relay.var("in_2", shape=(10, 10), dtype="float32")
in_3 = relay.var("in_3", shape=(10, 10), dtype="float32")
in_4 = relay.var("in_4", shape=(10, 10), dtype="float32")
in_5 = relay.var("in_5", shape=(10, 10), dtype="float32")
in_6 = relay.var("in_6", shape=(10, 10), dtype="float32")
in_7 = relay.var("in_7", shape=(10, 10), dtype="float32")
in_8 = relay.var("in_8", shape=(10, 10), dtype="float32")
in_9 = relay.var("in_9", shape=(10, 10), dtype="float32")
in_10 = relay.var("in_10", shape=(10, 10), dtype="float32")
begin0 = compiler_begin(in_1, "test")
begin1 = compiler_begin(in_2, "test")
begin2 = compiler_begin(in_3, "test")
begin3 = compiler_begin(in_4, "test")
node0 = relay.add(begin0, begin1)
node1 = relay.add(begin2, begin3)
end0 = compiler_end(node0, "test")
end1 = compiler_end(node1, "test")
begin4 = compiler_begin(end0, "test")
begin5 = compiler_begin(end1, "test")
node2 = relay.add(begin4, begin5)
end2 = compiler_end(node2, "test")
dbegin0 = compiler_begin(in_5, "default")
dbegin1 = compiler_begin(in_6, "default")
node3 = relay.subtract(dbegin0, dbegin1)
dbegin2 = compiler_begin(in_7, "default")
dend1 = compiler_end(node3, "default")
dbegin3 = compiler_begin(dend1, "default")
node4 = relay.subtract(dbegin2, dbegin3)
dend2 = compiler_end(node4, "default")
begin6 = compiler_begin(end2, "test")
begin7 = compiler_begin(dend2, "test")
node5 = relay.add(begin6, begin7)
end3 = compiler_end(node5, "test")
end4 = compiler_end(node5, "test")
dbegin4 = compiler_begin(in_8, "default")
dbegin5 = compiler_begin(end3, "default")
node6 = relay.subtract(dbegin4, dbegin5)
begin8 = compiler_begin(in_9, "test")
begin9 = compiler_begin(end4, "test")
node7 = relay.add(begin8, begin9)
end5 = compiler_end(node7, "test")
dend3 = compiler_end(node6, "default")
begin10 = compiler_begin(dend3, "test")
begin11 = compiler_begin(end5, "test")
node8 = relay.add(begin10, begin11)
end6 = compiler_end(node8, "test")
begin12 = compiler_begin(in_10, "test")
begin13 = compiler_begin(end6, "test")
node9 = relay.add(begin12, begin13)
end7 = compiler_end(node9, "test")
f = relay.Function([in_1, in_2, in_3, in_4, in_5, in_6, in_7, in_8, in_9, in_10], end7)
mod = tvm.IRModule.from_expr(f)
return mod
def expected():
in_1 = relay.var("in_1", shape=(10, 10), dtype="float32")
in_2 = relay.var("in_2", shape=(10, 10), dtype="float32")
in_3 = relay.var("in_3", shape=(10, 10), dtype="float32")
in_4 = relay.var("in_4", shape=(10, 10), dtype="float32")
in_5 = relay.var("in_5", shape=(10, 10), dtype="float32")
in_6 = relay.var("in_6", shape=(10, 10), dtype="float32")
in_7 = relay.var("in_7", shape=(10, 10), dtype="float32")
in_8 = relay.var("in_8", shape=(10, 10), dtype="float32")
in_9 = relay.var("in_9", shape=(10, 10), dtype="float32")
in_10 = relay.var("in_10", shape=(10, 10), dtype="float32")
begin0 = compiler_begin(in_1, "test")
begin1 = compiler_begin(in_2, "test")
begin2 = compiler_begin(in_3, "test")
begin3 = compiler_begin(in_4, "test")
node0 = relay.add(begin0, begin1)
node1 = relay.add(begin2, begin3)
node2 = relay.add(node0, node1)
dbegin0 = compiler_begin(in_5, "default")
dbegin1 = compiler_begin(in_6, "default")
dbegin2 = compiler_begin(in_7, "default")
node3 = relay.subtract(dbegin0, dbegin1)
node4 = relay.subtract(dbegin2, node3)
dend0 = compiler_end(node4, "default")
begin4 = compiler_begin(dend0, "test")
begin5 = compiler_begin(in_9, "test")
node5 = relay.add(node2, begin4)
end1 = compiler_end(node5, "test")
dbegin4 = compiler_begin(end1, "default")
dbegin5 = compiler_begin(in_8, "default")
node6 = relay.subtract(dbegin5, dbegin4)
dend1 = compiler_end(node6, "default")
node7 = relay.add(begin5, node5)
end2 = compiler_end(node7, "test")
begin6 = compiler_begin(end2, "test")
begin7 = compiler_begin(dend1, "test")
node8 = relay.add(begin7, begin6)
begin8 = compiler_begin(in_10, "test")
node9 = relay.add(begin8, node8)
end3 = compiler_end(node9, "test")
f = relay.Function([in_1, in_2, in_3, in_4, in_5, in_6, in_7, in_8, in_9, in_10], end3)
mod = tvm.IRModule.from_expr(f)
return mod
mod = annotated()
mod = relay.transform.MergeCompilerRegions()(mod)
mod = relay.transform.InferType()(mod)
ref_mod = expected()
ref_mod = relay.transform.InferType()(ref_mod)
assert tvm.ir.structural_equal(mod, ref_mod)
def test_if_else():
"""
This tests that the restriction regions propagate successful in
if_else control flow.
O = supported by target
X = not supported by target
O1 - - - | O1 --|
| | |
X | X
| | |
If cond ? O1: X | --> + + If cond ? O1: X +
| | |
O2 <- - -| O2 <-|
Avoid O1 merge to O2.
"""
target = "test_if_else"
@tvm.ir.register_op_attr("sigmoid", "target." + target)
def sigmoid(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("erf", "target." + target)
def erf(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("add", "target." + target)
def add(expr): # pylint: disable=unused-variable
return True
"""Test that If-else nodes merges regions correctly."""
def get_mod():
data = relay.var("data", shape=(1, 32))
add0 = relay.add(data, data)
sub0 = relay.subtract(add0, data)
eq = relay.equal(relay.sum(add0), relay.sum(sub0))
true_branch = relay.sigmoid(add0)
false_branch = relay.sigmoid(sub0)
ife = relay.If(eq, true_branch, false_branch)
erf = relay.erf(ife)
out = relay.add(add0, erf)
func = relay.Function([data], out)
mod = tvm.IRModule.from_expr(func)
return mod
for annotate_non_call_ops in [True, False]:
result = transform.AnnotateTarget(target, annotate_non_call_ops)(get_mod())
merge = transform.MergeCompilerRegions()(result)
# Ensure partition finished without segment fault.
partition = transform.PartitionGraph()(merge)
if __name__ == "__main__":
test_diamond_graph_fanouts()
test_example_graph()
test_if_else()
| 10,492 | 36.20922 | 103 | py |
tvm | tvm-main/tests/python/relay/test_auto_scheduler_layout_rewrite_networks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test layout rewrite support for whole neural networks"""
import sys
import tempfile
import pytest
import numpy as np
import tvm
from tvm import relay, auto_scheduler
from tvm.contrib import graph_executor
import tvm.testing
def get_np_array(var, dtype):
return np.random.randn(*[int(x) for x in var.type_annotation.shape]).astype(dtype)
def get_relay_conv2d(
outc=32,
inc=32,
height=14,
width=14,
kh=3,
kw=3,
batch=1,
pad=0,
stride=1,
dilation=1,
layout="NHWC",
):
dtype = "float32"
if layout == "NHWC":
kernel_layout = "HWIO"
d = relay.var("data", shape=(batch, height, width, inc), dtype=dtype)
w = relay.var("weight", shape=(kh, kw, inc, outc), dtype=dtype)
elif layout == "NCHW":
kernel_layout = "OIHW"
d = relay.var("data", shape=(batch, inc, height, width), dtype=dtype)
w = relay.var("weight", shape=(outc, inc, kh, kw), dtype=dtype)
y = relay.nn.conv2d(
d,
w,
padding=pad,
kernel_size=(kh, kw),
strides=(stride, stride),
dilation=(dilation, dilation),
channels=outc,
groups=1,
data_layout=layout,
kernel_layout=kernel_layout,
)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def get_relay_conv3d(
outc=8,
inc=8,
depth=8,
height=7,
width=7,
kd=1,
kh=1,
kw=1,
batch=1,
pad=0,
stride=1,
dilation=1,
layout="NDHWC",
):
dtype = "float32"
if layout == "NDHWC":
kernel_layout = "DHWIO"
d = relay.var("data", shape=(batch, depth, height, width, inc), dtype=dtype)
w = relay.var("weight", shape=(kd, kh, kw, inc, outc), dtype=dtype)
elif layout == "NCDHW":
kernel_layout = "OIDHW"
d = relay.var("data", shape=(batch, inc, depth, height, width), dtype=dtype)
w = relay.var("weight", shape=(outc, inc, kd, kh, kw), dtype=dtype)
y = relay.nn.conv3d(
d,
w,
padding=pad,
kernel_size=(kd, kh, kw),
strides=(stride, stride, stride),
dilation=(dilation, dilation, dilation),
channels=outc,
groups=1,
data_layout=layout,
kernel_layout=kernel_layout,
)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def get_relay_dense(m=128, n=128, k=128):
dtype = "float32"
d = relay.var("data", shape=(m, k), dtype=dtype)
w = relay.var("weight", shape=(n, k), dtype=dtype)
y = relay.nn.dense(d, w)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def get_relay_batchmm(batch=4, m=128, n=128, k=128):
dtype = "float32"
d = relay.var("data", shape=(batch, m, k), dtype=dtype)
w = relay.var("weight", shape=(batch, n, k), dtype=dtype)
y = relay.nn.batch_matmul(d, w)
mod = tvm.IRModule()
mod["main"] = relay.Function([d, w], y)
data, weight = get_np_array(d, dtype), get_np_array(w, dtype)
return mod, data, weight
def tune_and_check(mod, data, weight, target, dev):
# Extract tasks from a relay program
tasks, task_weights = auto_scheduler.extract_tasks(
mod, target=target, params={"weight": weight}
)
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
# Tune tasks
tuner = auto_scheduler.TaskScheduler(tasks, task_weights, callbacks=[])
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=1,
num_measures_per_round=1,
builder=auto_scheduler.LocalBuilder(timeout=60),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option, search_policy="sketch.random")
# Compile
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3,
config={"relay.backend.use_auto_scheduler": True},
):
lib = relay.build(mod, target=target, params={"weight": weight})
# Compile without auto-scheduler for correctness check
with tvm.transform.PassContext(opt_level=0):
lib2 = relay.build(mod, target=target, params={"weight": weight})
def get_output(data, lib):
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input("data", data)
module.run()
return module.get_output(0).numpy()
# Check correctness
actual_output = get_output(data, lib)
expected_output = get_output(data, lib2)
tvm.testing.assert_allclose(actual_output, expected_output, rtol=1e-4, atol=2e-4)
# layout rewriting only works on CPU targets
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_conv2d(target, dev):
mod, data, weight = get_relay_conv2d(kh=1, kw=1)
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_conv2d_winograd(target, dev):
mod, data, weight = get_relay_conv2d(outc=128, kh=3, kw=3)
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_conv3d(target, dev):
mod, data, weight = get_relay_conv3d()
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_dense(target, dev):
mod, data, weight = get_relay_dense()
tune_and_check(mod, data, weight, target, dev)
@tvm.testing.parametrize_targets("llvm", "llvm -device=arm_cpu")
def test_batch_matmul(target, dev):
mod, data, weight = get_relay_batchmm()
tune_and_check(mod, data, weight, target, dev)
if __name__ == "__main__":
tvm.testing.main()
| 6,838 | 30.37156 | 89 | py |
tvm | tvm-main/tests/python/relay/test_op_level3.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level3 operator test cases.
"""
import sys
from typing import Callable, Optional
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay, te
from tvm.error import TVMError
from tvm.relay import create_executor, transform
from tvm.relay.testing import check_grad, run_infer_type
from utils import ref_funcs
executor_kind = tvm.testing.parameter("graph", "vm")
class TestZerosOnes:
config = {"zeros": (relay.zeros, np.zeros), "ones": (relay.ones, np.ones)}
op, ref = tvm.testing.parameters(*config.values(), ids=config.keys())
def test_zeros_ones(self, op, ref):
y = op(shape=(124, 50), dtype="float64")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((124, 50), "float64")
intrp_res = create_executor().evaluate(y).numpy()
np.testing.assert_allclose(intrp_res, ref((124, 50), "float64"))
class TestUnaryIdentity:
config = {
"zeros_like": (relay.zeros_like, np.zeros_like),
"ones_like": (relay.ones_like, np.ones_like),
"ceil": (relay.ceil, np.ceil),
"floor": (relay.floor, np.floor),
"trunc": (relay.trunc, np.trunc),
"round": (relay.round, np.round),
"abs": (relay.abs, np.abs),
"copy": (relay.copy, None), # np.copy
"negative": (relay.negative, np.negative),
"sign": (relay.sign, np.sign),
}
op, ref = tvm.testing.parameters(*config.values(), ids=config.keys())
def test_unary_identity(self, op, ref):
shape = (8, 9, 4)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = op(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, "float32")
if ref is not None:
data = np.random.rand(*shape).astype("float32")
op_res = create_executor().evaluate(y, {x: relay.const(data)})
ref_res = ref(data)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def test_cast():
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = x.astype("int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
y = relay.cast(x, "int32")
yy = run_infer_type(y)
assert "dtype=" in yy.astext()
assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
def test_sliding_window():
# Slide a window of shape (3, 4, 5) over the x tensor, beginning with
# dimension 1, which slides the window over the two subtensors of shape (3,
# 32, 32).
x = relay.var("x", relay.TensorType((2, 3, 32, 32), "float32"))
y = relay.sliding_window(x, 1, [3, 4, 5], [1, 2, 3])
# The resulting shape still has batch size 2. Each dimension in (1, 15, 10)
# represents the locations where we were able to form a window; that is, we
# were able to place the window in one place along the dimension of length
# 3, 15 places along the dimension of length 32 (when striding by 2), and 10
# places along the second dimension of length 32 (when striding by 3). The
# remaining dimensions (3, 4, 5) represent the formed windows.
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((2, 1, 15, 10, 3, 4, 5), "float32")
data = np.random.rand(2, 3, 32, 32).astype("float32")
intrp = create_executor()
result = intrp.evaluate(y, {x: relay.const(data)})
result_np = result.numpy()
assert result_np.shape == (2, 1, 15, 10, 3, 4, 5)
assert np.array_equal(result_np[0, 0, 0, 0, :, :, :], data[0, :, 0:4, 0:5])
assert np.array_equal(result_np[1, 0, 7, 3, :, :, :], data[1, :, 14:18, 9:14])
assert np.array_equal(result_np[1, 0, 14, 9, :, :, :], data[1, :, 28:32, 27:32])
def test_clip():
a = relay.var("a", relay.TensorType((10, 4), "float32"))
y = relay.clip(a, 1.0, 4.0)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((10, 4), "float32")
data = np.random.rand(10, 4).astype("float32")
op_res = create_executor().evaluate(y, {a: relay.const(data)})
ref_res = np.clip(data, 1.0, 4.0)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def test_fixed_point_multiply():
# Test 23 * 1/16
# [m,s] = [0.5, -3] = frexp(1/16)
# M = 0.5*2^31 = 1073741824
# so M = 1073741824 and s = -3
a = relay.var("a", relay.TensorType((10, 4), "int32"))
y = relay.fixed_point_multiply(a, 1073741824, -3)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((10, 4), "int32")
data = 23 * np.ones((10, 4)).astype("int32")
op_res = create_executor().evaluate(y, {a: relay.const(data)})
ref_res = np.ones((10, 4)).astype("int32")
np.testing.assert_allclose(op_res.numpy(), ref_res, atol=1)
def test_reinterpret():
a = relay.var("a", relay.TensorType((1000, 4), "float32"))
y = relay.reinterpret(a, "int32")
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000, 4), "int32")
data = np.random.randn(1000, 4).astype("float32") * 1000
op_res = create_executor().evaluate(y, {a: relay.const(data)})
ref_res = data.view("int32")
np.testing.assert_equal(op_res.numpy(), ref_res)
def test_approximate_transcendental():
def C(x):
return relay.expr.const(x, "float32")
def approx_exp(x):
# An approximation derived from Opus,
# https://github.com/xiph/opus/blob/c1c247/celt/mathops.h#L147-L165
x = relay.minimum(relay.maximum(x, C(-88.0)), C(88.0))
x = C(127.0) + x * C(1.44269504)
xf = relay.floor(x)
i = relay.cast(xf, "int32")
x = x - xf
Y = C(0.99992522) + x * (C(0.69583354) + x * (C(0.22606716) + x * C(0.078024523)))
exponent = relay.left_shift(i, relay.expr.const(23, "int32"))
exponent = relay.reinterpret(exponent, "float32")
return exponent * Y
def approximate_sigmoid(x):
y = approx_exp(x)
return y / (y + C(1.0))
def approximate_tanh(x):
x = x * C(2.0)
y = approx_exp(x)
return (y - C(1.0)) / (y + C(1.0))
a = relay.var("a", relay.TensorType((1000,), "float32"))
y = approximate_sigmoid(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
op_res = create_executor().evaluate(y, {a: relay.const(data)})
def reference_sigmoid(x):
return np.exp(-np.logaddexp(0, -x))
np.testing.assert_allclose(op_res.numpy(), reference_sigmoid(data), atol=2e-5, rtol=1e-9)
y = approximate_tanh(a)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1000,), "float32")
data = np.linspace(-5, 5, 1000).astype("float32")
op_res = create_executor().evaluate(y, {a: relay.const(data)})
def reference_tanh(x):
return np.tanh(x)
np.testing.assert_allclose(op_res.numpy(), reference_tanh(data), atol=4e-5, rtol=1e-9)
class TestSqueeze:
shape, dtype, axis = tvm.testing.parameters(
((1, 3, 2, 5), "float32", None),
((1, 3, 1), "float32", [0]),
((1, 2, 1, 2, 1), "float32", [0, 2]),
((1, 3, 1), "float32", 2),
((1, 3, 1), "float32", []),
)
def test_squeeze(self, shape, dtype, axis):
x = relay.var("x", relay.TensorType(shape, dtype))
squeeze = relay.squeeze(x, axis=axis)
if isinstance(axis, int):
np_axis = (axis,)
else:
np_axis = tuple(axis) if axis is not None else None
data = np.random.random_sample(shape).astype(dtype)
op_res = create_executor().evaluate(squeeze, {x: relay.const(data)})
ref_res = np.squeeze(data, axis=np_axis)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def test_transpose_infer_type():
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.transpose(x, axes=(1, 0, 2))
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((t, n, 100), "float32")
y = relay.transpose(x)
assert "axes=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((100, t, n), "float32")
def test_transpose(target, dev, executor_kind):
dshape = (2, 3, 4)
axes = (0, 2, 1)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.transpose(x, axes=axes)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.transpose(x_data, axes=axes)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_squeeze_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(2,))
assert "axis=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 4), "float32")
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x)
assert "axis=" not in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((4,), "float32")
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_squeeze_bad_axes_infer_type():
n, t, d = 1, 4, 1
x = relay.var("x", relay.TensorType((n, t, d), "float32"))
y = relay.squeeze(x, axis=(1,))
yy = run_infer_type(y)
def test_reshape_infer_type():
n, t, d1, d2 = 10, 20, 100, 20
x = relay.var("x", relay.TensorType((n, t, d1, d2), "float32"))
y = relay.reshape(x, newshape=(n, t, 2000))
assert "newshape=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, t, 2000), "float32")
class TestReshape:
shape, newshape, oshape = tvm.testing.parameters(
((2, 3, 4), (8, 3), (8, 3)),
((4, 7), (2, 7, 2), (2, 7, 2)),
((2, 3, 4), (4, 0, 2), (4, 3, 2)),
((2, 3, 4), (2, 0, 0), (2, 3, 4)),
((2, 3, 4), (0, -1), (2, 12)),
((2, 3, 4), (-1, 0), (8, 3)),
((2, 3, 4), (2, -2), (2, 3, 4)),
((2, 3, 4), (-2, 1, 1), (2, 3, 4, 1, 1)),
((2, 3, 4), (-3, 4), (6, 4)),
((2, 3, 4, 5), (-3, -3), (6, 20)),
((2, 3, 4), (0, -3), (2, 12)),
((2, 3, 4), (-3, -2), (6, 4)),
((2, 3, 4), (-4, 1, 2, -2), (1, 2, 3, 4)),
((2, 3, 4), (2, -4, -1, 3, -2), (2, 1, 3, 4)),
((1,), (), ()),
)
def test_reshape(self, target, dev, executor_kind, shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.reshape(x, newshape=newshape)
zz = run_infer_type(z)
assert "newshape=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
func = relay.Function([x], z)
check_grad(func)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = np.reshape(x_data, oshape)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_reshape_fail():
with pytest.raises(TVMError) as reshape_err:
x = relay.var("x", relay.TensorType([2, 3], "float32"))
z = relay.reshape(x, [7])
zz = run_infer_type(z)
def test_reshape_like_infer_type():
# concrete shape
x = relay.var("x", relay.TensorType((1, 2, 3), "float32"))
y = relay.var("y", relay.TensorType((1, 6), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 6), "float32")
# symbolic shape
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.var("y", relay.TensorType((1, 8, 8), "float32"))
z = relay.reshape_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 8, 8), "float32")
# partial reshaping
x = relay.var("x", relay.TensorType((1, 2, 3, 4), "float32"))
y = relay.var("y", relay.TensorType((1, 6, 5), "float32"))
z = relay.reshape_like(x, y, lhs_begin=1, lhs_end=3, rhs_begin=1, rhs_end=2)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((1, 6, 4), "float32")
x = relay.var("x", relay.TensorType((1, 2, 3, 4), "float32"))
y = relay.var("y", relay.TensorType((2, 3, 4, 1, 6), "float32"))
z = relay.reshape_like(x, y, rhs_end=3)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((2, 3, 4), "float32")
z = relay.reshape_like(x, y, rhs_begin=2)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((4, 1, 6), "float32")
# symbolic partial reshaping
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.var("y", relay.TensorType((5, 6), "float32"))
z = relay.var("z", relay.TensorType((4,), "float32"))
w = relay.reshape_like(x, y, lhs_end=3)
w = relay.reshape_like(w, z, lhs_begin=2)
w = run_infer_type(w)
assert w.checked_type == relay.TensorType((5, 6, 4), "float32")
class TestReshapeLike:
shape, oshape, shape_like, reshape_like_kwargs = tvm.testing.parameters(
((2, 3, 4), (1, 8, 3), None, {}),
((4, 7), (2, 7, 2), None, {}),
((1, 2, 3, 4), (1, 6, 4), (1, 6, 5), dict(lhs_begin=1, lhs_end=3, rhs_begin=1, rhs_end=2)),
)
def test_reshape_like(
self, target, dev, executor_kind, shape, oshape, shape_like=None, reshape_like_kwargs={}
):
if shape_like is None:
shape_like = oshape
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=shape_like).astype("float32")
ref_res = np.reshape(x_data, oshape)
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("x", relay.TensorType(shape_like, "float32"))
z = relay.reshape_like(x, y, **reshape_like_kwargs)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32")
func = relay.Function([x, y], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestTakeInferType:
d1, d2, d3 = te.var("d1"), te.var("d2"), te.var("d3")
d4, d5, d6 = te.var("d4"), te.var("d5"), te.var("d6")
dshape, indices_shape, oshape, axis = tvm.testing.parameters(
((d1,), (1,), (1,), 0),
((4,), (d1, d2), (d1, d2), None),
((3, 3, 3), (1, d2), (1, d2), None),
((d1, d2), (d3, d4, d5), (d3, d4, d5, d2), 0),
((d1, d2), (d3, d4, d5), (d1, d3, d4, d5), 1),
((d1, d2, d3, d4), (d5, d6), (d1, d2, d5, d6, d4), -2),
)
def test_take(self, dshape, indices_shape, oshape, axis):
x = relay.var("x", relay.TensorType(dshape, "float32"))
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
y = relay.take(x, indices, axis=axis)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(oshape, "float32")
class TestTake:
src_shape, indices_src, axis, mode, indices_dtype = tvm.testing.parameters(
((4,), [1], None, "clip", "int32"),
((4,), [[0, 1, 2, 3]], None, "clip", "int32"),
((3, 3, 3), [[11, 25]], None, "clip", "int32"),
((4,), [[0, 1], [2, 3]], None, "clip", "int32"),
((4,), [1], 0, "clip", "int32"),
((2, 2), [[[1, 0], [0, 1]]], 0, "clip", "int32"),
((2, 2), [[[1, 0], [0, 1]]], 1, "clip", "int32"),
((4, 3, 5, 6), [[2, 1, 0, 0]], -2, "clip", "int32"),
((3, 4), [-5, 20], None, "clip", "int32"),
((3, 4), [-5, 20], None, "wrap", "int32"),
((3, 4), [-1, 2], 0, "clip", "int32"),
((3, 4), [-1, 2], 0, "wrap", "int32"),
((3, 4), [-1, 2], 1, "clip", "int32"),
((3, 4), [-1, 2], 1, "wrap", "int32"),
((3, 3, 3), [[11, 25]], None, "fast", "int32"),
((3, 4), [0, 2], 0, "fast", "int32"),
((3, 4), [0, 2], 1, "fast", "int32"),
((3, 4), [1, 2], 1, "clip", "uint32"),
((3, 4), [1, 2], 1, "wrap", "uint16"),
((3, 3, 3), [1, 2], None, "fast", "uint16"),
((3, 4), [0, 2], 0, "fast", "uint8"),
)
# Incorrect numeric output in some cases on vulkan
@tvm.testing.known_failing_targets("vulkan")
def test_take(
self, target, dev, executor_kind, src_shape, indices_src, axis, mode, indices_dtype
):
src_dtype = "float32"
indices_src = np.array(indices_src, dtype=indices_dtype)
x = relay.var("x", relay.TensorType(src_shape, src_dtype))
indices = relay.var("indices", relay.TensorType(indices_src.shape, indices_dtype))
z = relay.take(x, indices, axis=axis, mode=mode)
func = relay.Function([x, indices], z)
x_data = np.random.uniform(low=-1, high=1, size=src_shape).astype(src_dtype)
np_mode = "raise" if mode == "fast" else mode
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, indices_src
)
# Old versions of numpy has take internally cast inside take which may violate
# safety rules. We have such version in i386 CI image.
indices_src = indices_src.astype("int32")
ref_res = np.take(x_data, indices=indices_src, axis=axis, mode=np_mode)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestSplitInferType:
idxd = tvm.tir.indexdiv
d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4")
axis = te.var("axis")
dshape, indices_or_sections, ret_type, axis = tvm.testing.parameters(
(
(5, 5, 2, 2),
5,
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
relay.ty.TensorType((5, 1, 2, 2), "float32"),
]
)
),
1,
),
(
(5, 5, 2, 2),
5,
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
relay.ty.TensorType((1, 5, 2, 2), "float32"),
]
)
),
0,
),
(
(d1, d2, d3, d4),
4,
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"),
]
)
),
2,
),
(
(d1, d2, d3, d4),
2,
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"),
relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"),
]
)
),
0,
),
(
(d1, d2, d3, d4),
(2, 4, 7),
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 3, d3, d4), "float32"),
relay.ty.TensorType((d1, (d2 - 7), d3, d4), "float32"),
]
)
),
1,
),
(
(d1, d2, d3, d4),
tuple(np.array([2, 4, 7]).astype(np.int64)),
relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 2, d3, d4), "float32"),
relay.ty.TensorType((d1, 3, d3, d4), "float32"),
relay.ty.TensorType((d1, (d2 - 7), d3, d4), "float32"),
]
)
),
1,
),
)
def test_split(self, dshape, indices_or_sections, ret_type, axis):
x = relay.var("x", relay.ty.TensorType(dshape, "float32"))
y = relay.split(x, indices_or_sections, axis=axis)
yy = run_infer_type(y.astuple())
assert yy.checked_type == ret_type
def test_full_infer_type():
# default settings: match input dtype
x = relay.var("x", relay.TensorType((), "int8"))
y = relay.full(x, ())
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((), "int8")
# change the shape and dtype
x = relay.var("x", relay.TensorType((), "float32"))
y = relay.full(x, (1, 2), "int8")
assert "shape=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 2), "int8")
class TestFull:
fill_value, arr_shape, dtype = tvm.testing.parameters(
(4, (1, 3, 4, 4), "int32"),
(4, (1, 3, 4, 4), "int64"),
(4.0, (1, 4), "float32"),
)
def test_full(self, target, dev, executor_kind, fill_value, arr_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
z = relay.full(x, arr_shape, dtype)
func = relay.Function([x], z)
ref_res = np.full(arr_shape, fill_value, dtype=dtype)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np.array(fill_value, dtype)
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_full_like(self, target, dev, executor_kind, arr_shape, fill_value, dtype):
x_data = np.random.uniform(low=-1, high=1, size=arr_shape).astype(dtype)
x = relay.var("x", relay.TensorType(arr_shape, dtype))
y = relay.var("y", relay.scalar_type(dtype))
z = relay.full_like(x, y)
func = relay.Function([x, y], z)
ref_res = np.full_like(x_data, fill_value)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, np.array(fill_value, dtype)
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_full_like_infer_type():
# concrete shape
base = relay.var("base", relay.TensorType((1, 2, 3), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((1, 2, 3), "float32")
# symbolic shape
n, c, h, w = te.size_var("n"), 2, 3, te.size_var("w")
base = relay.var("base", relay.TensorType((n, c, h, w), "float32"))
fill = relay.var("fill", relay.TensorType((), "float32"))
y = relay.full_like(base, fill)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
def test_infer_type_leaky_relu(target, dev, executor_kind):
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = relay.nn.leaky_relu(x, alpha=0.1)
"alpha=0.1" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, w), "float32")
shape = (1, 5, 10, 10)
dtype = "float32"
x = relay.var("x", relay.TensorType(shape, dtype))
z = relay.nn.leaky_relu(x, alpha=0.1)
assert "alpha=0.1" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype)
ref_res = np.where(x_data > 0, x_data, x_data * 0.1)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestInferTypePrelu:
dtype = tvm.testing.parameter("float32")
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
data, alpha, axis, output = tvm.testing.parameters(
((n, c, h, w), (c,), 1, (n, c, h, w)),
((n, h, w, c), (c,), 3, (n, h, w, c)),
((n, c, h, w), None, 1, (n, c, h, w)),
((n, h, w, c), None, 3, (n, h, w, c)),
((1, 3, 2, 2), (3,), 1, (1, 3, 2, 2)),
((1, 2, 2, 3), (3,), 3, (1, 2, 2, 3)),
((1, 3, 2, 2), None, 1, (1, 3, 2, 2)),
((1, 2, 2, 3), None, 3, (1, 2, 2, 3)),
)
def test_infer_type_prelu(self, target, dev, executor_kind, data, alpha, axis, output, dtype):
x = relay.var("data", relay.TensorType(data, dtype))
if alpha:
y = relay.var("alpha", relay.TensorType(alpha, dtype))
else:
y = relay.var("alpha", relay.IncompleteType())
z = relay.nn.prelu(x, y, axis=axis)
zz = run_infer_type(z)
if axis != 1:
assert "axis" in z.astext()
assert zz.checked_type == relay.ty.TensorType(output, dtype)
if not alpha:
axis = axis if axis else 1
alpha_shape = (data[axis],)
assert zz.args[1].checked_type == relay.TensorType(alpha_shape, "float32")
if all(isinstance(v, tvm.tir.Var) == 1 for v in data) or not alpha:
return
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype)
a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype)
if axis == 1:
ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data >= 0) * x_data
else:
ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data >= 0) * x_data
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, a_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestArange:
dtype = tvm.testing.parameter("float32")
start, stop, step = tvm.testing.parameters(
(None, 20, None),
(None, 20, 2),
(1, 20, None),
(1, 20, 2),
# arange doesnt' support floating point right now, see type relation
# (1, 20, 1.5),
(1, 20.5, None),
(1, 20, 3),
(20, 1, -1),
# arange doesnt' support floating point right now, see type relation
# (20, 1, -1.5),
)
def test_arange(self, target, dev, executor_kind, start, stop, step, dtype):
if start is None and step is None:
x = relay.arange(relay.const(stop, dtype=dtype))
ref_res = np.arange(stop).astype(dtype)
elif start is None:
x = relay.arange(relay.const(stop, dtype=dtype), step=relay.const(step, dtype=dtype))
ref_res = np.arange(stop, step=step).astype(dtype)
elif step is None:
x = relay.arange(relay.const(start, dtype=dtype), relay.const(stop, dtype=dtype))
ref_res = np.arange(start, stop).astype(dtype)
else:
x = relay.arange(
relay.const(start, dtype=dtype),
relay.const(stop, dtype=dtype),
relay.const(step, dtype=dtype),
)
ref_res = np.arange(start, stop, step).astype(dtype)
func = relay.Function([], x)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)()
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestMeshgrid:
lengths, indexing = tvm.testing.parameters(
([3, 5], "ij"),
([4, 2], "xy"),
([3, 5, 2], "ij"),
([3, 1, 5], "xy"),
# Length 0 signifies scalar.
([3, 5, 0], "ij"),
)
def test_meshgrid(self, target, dev, executor_kind, lengths, indexing="ij"):
input_vars = []
input_data = []
for i, length in enumerate(lengths):
input_name = "x_{}".format(i)
if length == 0:
# Scalar
input_vars.append(relay.var(input_name, relay.scalar_type("float32")))
input_data.append(np.array(1, "float32"))
else:
input_vars.append(relay.var(input_name, relay.TensorType((length,), "float32")))
input_data.append(np.arange(length).astype("float32"))
z = relay.meshgrid(input_vars, indexing=indexing).astuple()
func = relay.Function(input_vars, z)
# Get ref
ref_res = np.meshgrid(*input_data, indexing=indexing)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*input_data
)
assert len(op_res) == len(ref_res)
for i in range(len(op_res)):
tvm.testing.assert_allclose(op_res[i].numpy(), ref_res[i], rtol=1e-5)
class TestTile:
dshape, reps = tvm.testing.parameters(
((2, 3, 4), (3, 2, 1)),
((2, 3, 4), (1, 2)),
((2, 3), (3, 2, 1)),
)
def test_tile(self, target, dev, executor_kind, dshape, reps):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.tile(x, reps=reps)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.tile(x_data, reps=reps)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestRepeat:
dshape, repeats, axis = tvm.testing.parameters(
((3,), 2, 0),
((3, 10), 2, -1),
((3, 2, 4), 3, 1),
)
def test_repeat(self, target, dev, executor_kind, dshape, repeats, axis):
x = relay.Var("x", relay.TensorType(dshape, "float32"))
func = relay.Function([x], relay.repeat(x, repeats, axis))
data = np.random.uniform(size=dshape).astype("float32")
ref_res = np.repeat(data, repeats, axis)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestStack:
dshapes, axis = tvm.testing.parameters(
([(2,), (2,), (2,)], -1),
([(2,), (2,), (2,)], 0),
([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1),
([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1),
([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], 4),
)
expr_type = tvm.testing.parameter("tuple", "list", "tuple_expr")
@tvm.testing.fixture
def ref_data(self, dshapes, axis):
np_in = [np.random.normal(size=shape).astype("float32") for shape in dshapes]
np_out = np.stack(np_in, axis=axis)
return np_in, np_out
@tvm.testing.fixture
def input_expr(self, dshapes, axis, expr_type, ref_data):
input_vars = [relay.var("input", relay.TensorType(shape, "float32")) for shape in dshapes]
if expr_type == "tuple":
input_expr = relay.Tuple(input_vars)
elif expr_type == "list":
input_expr = input_vars
elif expr_type == "tuple_expr":
# expression that evaluates to a tuple
# but is not a tuple literal
np_in, np_out = ref_data
x = relay.Var("x")
input_expr = relay.Let(x, relay.Tuple([relay.const(inp) for inp in np_in]), x)
else:
raise ValueError(f"Unknown expr_type '{expr_type}'")
return input_expr
def test_stack(self, target, dev, executor_kind, input_expr, ref_data, axis):
z = relay.stack(input_expr, axis=axis)
inp_vars = relay.analysis.free_vars(z)
func = relay.Function(inp_vars, z)
np_in, np_out = ref_data
relay_args = np_in if inp_vars else []
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*relay_args
)
tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-5)
class TestReverse:
dshape, axis = tvm.testing.parameters(
((2, 3, 4), 1),
((4, 7), 0),
((2, 3, 4), -1),
)
def test_reverse(self, target, dev, executor_kind, dshape, axis):
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.reverse(x, axis=axis)
zz = run_infer_type(z)
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.flip(x_data, axis)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def test_reverse_sequence(target, dev, executor_kind):
def verify_reverse_sequence(x_data, seq_lengths, batch_axis, seq_axis, ref_res):
seq_lengths_data = np.array(seq_lengths).astype("int32")
x = relay.var("x", relay.TensorType(x_data.shape, str(x_data.dtype)))
z = relay.reverse_sequence(x, relay.const(seq_lengths_data), seq_axis, batch_axis)
zz = run_infer_type(z)
assert zz.checked_type == x.type_annotation
func = relay.Function([x], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 5, 10, 15], [4, 1, 6, 11], [8, 9, 2, 7], [12, 13, 14, 3]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 1, 0, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], -1, 0, np.array(result))
verify_reverse_sequence(
indata.astype("float32"), [1, 2, 3, 4], 1, 0, np.array(result).astype("float32")
)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3], [5, 4, 6, 7], [10, 9, 8, 11], [15, 14, 13, 12]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, 1, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, -1, np.array(result))
verify_reverse_sequence(
indata.astype("float32"), [1, 2, 3, 4], 0, 1, np.array(result).astype("float32")
)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [15, 14, 13, 12]]
verify_reverse_sequence(indata, [-1, 0, 1, 5], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [
[
[[18, 19, 20], [21, 22, 23], [24, 25, 26]],
[[9, 10, 11], [12, 13, 14], [15, 16, 17]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
],
[
[[45, 46, 47], [48, 49, 50], [51, 52, 53]],
[[36, 37, 38], [39, 40, 41], [42, 43, 44]],
[[27, 28, 29], [30, 31, 32], [33, 34, 35]],
],
]
verify_reverse_sequence(indata, [3, 3], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [
[
[[9, 10, 11], [21, 22, 23], [15, 16, 17]],
[[0, 1, 2], [12, 13, 14], [6, 7, 8]],
[[18, 19, 20], [3, 4, 5], [24, 25, 26]],
],
[
[[36, 37, 38], [48, 49, 50], [42, 43, 44]],
[[27, 28, 29], [39, 40, 41], [33, 34, 35]],
[[45, 46, 47], [30, 31, 32], [51, 52, 53]],
],
]
verify_reverse_sequence(indata, [2, 3, 2], 2, 1, np.array(result))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = []
with pytest.raises(Exception) as execinfo:
verify_reverse_sequence(indata, [2, 3, 2, 4, 5], 1, 0, np.array(result))
assert (
"For reverse_sequnece seq_lengths size should match with dimension of batch axis,"
" but got dimension of batch_axis = 4, and seq_length size = 5" in execinfo.value.args[0]
)
def ref_scatter(data, indices, updates, axis=0):
idx = np.indices(indices.shape).reshape(indices.ndim, -1)
updated_idx = np.copy(idx)
indices = indices.reshape(-1)
for i in range(len(indices)):
updated_idx[axis, i] = indices[i]
scattered = np.copy(data)
scattered[tuple(updated_idx)] = updates[tuple(idx)]
return scattered
def test_scatter(target, dev, executor_kind):
def verify_scatter(dshape, ishape, axis=0, indices_dtype="int64"):
d = relay.var("d", relay.TensorType(dshape, "float32"))
i = relay.var("i", relay.TensorType(ishape, indices_dtype))
u = relay.var("u", relay.TensorType(ishape, "float32"))
z = relay.op.scatter_elements(d, i, u, axis)
func = relay.Function([d, i, u], z)
data_np = np.random.uniform(size=dshape).astype("float32")
updates_np = np.random.uniform(size=ishape).astype("float32")
indices_np = np.random.randint(0, dshape[axis] - 1, ishape).astype(indices_dtype)
ref_res = ref_scatter(data_np, indices_np, updates_np, axis)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np, indices_np, updates_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_scatter((10,), (10,), 0)
verify_scatter((10, 5), (10, 5), -2)
verify_scatter((10, 5), (10, 5), -1)
verify_scatter((10, 5), (3, 5), 0)
verify_scatter((12, 4), (7, 2), 1)
verify_scatter((2, 3, 4), (1, 3, 4), 0)
verify_scatter((2, 3, 4), (2, 1, 4), 1)
verify_scatter((2, 3, 4), (2, 3, 1), 2)
verify_scatter((4, 2, 1), (1, 1, 1), 0)
verify_scatter((2, 3, 4, 5), (1, 3, 4, 5), 0)
verify_scatter((6, 3, 4, 5), (2, 3, 4, 5), 1)
verify_scatter((2, 3, 8, 5), (2, 3, 1, 1), 2)
verify_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3)
verify_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3, indices_dtype="uint32")
class TestDynamicScatter:
dshape, ishape, axis = tvm.testing.parameters(
((10,), (10,), 0),
((10, 5), (10, 5), -2),
((10, 5), (10, 5), -1),
((10, 5), (3, 5), 0),
((12, 4), (7, 2), 1),
((2, 3, 4), (1, 3, 4), 0),
((2, 3, 4), (2, 1, 4), 1),
((2, 3, 4), (2, 3, 1), 2),
((4, 2, 1), (1, 1, 1), 0),
((2, 3, 4, 5), (1, 3, 4, 5), 0),
((6, 3, 4, 5), (2, 3, 4, 5), 1),
((2, 3, 8, 5), (2, 3, 1, 1), 2),
((16, 16, 4, 5), (16, 16, 4, 5), 3),
)
@pytest.mark.parametrize("executor_kind", ["vm"])
def test_dynamic_scatter(self, target, dev, executor_kind, dshape, ishape, axis):
d = relay.var("d", relay.TensorType([relay.Any() for i in range(len(dshape))], "float32"))
i = relay.var("i", relay.TensorType([relay.Any() for i in range(len(ishape))], "int64"))
u = relay.var("u", relay.TensorType([relay.Any() for i in range(len(ishape))], "float32"))
z = relay.op.scatter_elements(d, i, u, axis)
func = relay.Function([d, i, u], z)
data_np = np.random.uniform(size=dshape).astype("float32")
updates_np = np.random.uniform(size=ishape).astype("float32")
indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype("int64")
ref_res = ref_scatter(data_np, indices_np, updates_np, axis)
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(data_np, indices_np, updates_np)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
class TestScatterAdd:
dshape, ishape, axis, dtype, indice_dtype = tvm.testing.parameters(
((10,), (10,), 0, "int32", "int64"),
((1000,), (1000,), 0, "int32", "int64"),
((10, 5), (10, 5), -2, "float32", "int64"),
((10, 5), (10, 5), -1, "float32", "int64"),
((10, 5), (3, 5), 0, "float32", "int64"),
((12, 4), (7, 2), 1, "float32", "int64"),
((2, 3, 4), (1, 3, 4), 0, "float32", "int64"),
((2, 3, 4), (2, 1, 4), 1, "float32", "int64"),
((2, 3, 4), (2, 3, 1), 2, "float32", "int64"),
((2, 3, 4, 5), (1, 3, 4, 5), 0, "float32", "int64"),
((6, 3, 4, 5), (2, 3, 4, 5), 1, "float32", "int64"),
((2, 3, 8, 5), (2, 3, 1, 1), 2, "float32", "int64"),
((16, 16, 4, 5), (16, 16, 4, 5), 3, "float32", "int64"),
((16, 16, 4, 5), (16, 16, 4, 5), 3, "float32", "uint32"),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(self, dshape, ishape, axis, dtype, indice_dtype):
data_np = np.random.uniform(size=dshape).astype(dtype)
updates_np = np.random.uniform(size=ishape).astype(dtype)
indices_np = np.random.randint(0, dshape[axis] - 1, ishape).astype(indice_dtype)
out_np = np.copy(data_np)
for index in np.ndindex(*indices_np.shape):
new_index = list(index)
new_index[axis] = indices_np[index]
out_np[tuple(new_index)] += updates_np[index]
return data_np, updates_np, indices_np, out_np
# Optimization can produce tir.atomic_add, not currently supported
# on vulkan runtime.
@tvm.testing.known_failing_targets("vulkan")
def test_scatter_add(self, target, dev, ref_data, dshape, ishape, axis, dtype, indice_dtype):
d = relay.var("d", relay.TensorType(shape=[relay.Any() for _ in dshape], dtype=dtype))
i = relay.var(
"i", relay.TensorType(shape=[relay.Any() for _ in ishape], dtype=indice_dtype)
)
u = relay.var("u", relay.TensorType(shape=[relay.Any() for _ in ishape], dtype=dtype))
z = relay.op.scatter_elements(d, i, u, axis, "add")
func = relay.Function([d, i, u], z)
data_np, updates_np, indices_np, out_np = ref_data
verify_func(target, dev, func, [data_np, indices_np, updates_np], out_np)
@pytest.mark.parametrize(
"data, axis, indices, ref_res",
[
([[1, 2], [3, 4]], 1, [[0, 0], [1, 0]], [[1, 1], [4, 3]]),
([[1, 2], [3, 4]], -1, [[0, 0], [1, 0]], [[1, 1], [4, 3]]),
(
[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]],
0,
[[[1, 0, 1], [1, 1, 0]]],
[[[6, 1, 8], [9, 10, 5]]],
),
(
[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]]],
-3,
[[[1, 0, 1], [1, 1, 0]]],
[[[6, 1, 8], [9, 10, 5]]],
),
(
[
[
[-0.2321, -0.2024, -1.7624],
[-0.3829, -0.4246, 0.2448],
[0.1822, 0.2360, -0.8965],
[0.4497, -0.2224, 0.6103],
],
[
[0.0408, -0.7667, -0.4303],
[-0.3216, 0.7489, -0.1502],
[0.0144, -0.4699, -0.0064],
[-0.0768, -1.6064, 1.3390],
],
],
1,
[[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],
[
[[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],
[[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]],
],
),
(
[
[
[-0.2321, -0.2024, -1.7624],
[-0.3829, -0.4246, 0.2448],
[0.1822, 0.2360, -0.8965],
[0.4497, -0.2224, 0.6103],
],
[
[0.0408, -0.7667, -0.4303],
[-0.3216, 0.7489, -0.1502],
[0.0144, -0.4699, -0.0064],
[-0.0768, -1.6064, 1.3390],
],
],
-2,
[[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],
[
[[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],
[[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]],
],
),
(
[
[
[-0.2321, -0.2024, -1.7624],
[-0.3829, -0.4246, 0.2448],
[0.1822, 0.2360, -0.8965],
[0.4497, -0.2224, 0.6103],
],
[
[0.0408, -0.7667, -0.4303],
[-0.3216, 0.7489, -0.1502],
[0.0144, -0.4699, -0.0064],
[-0.0768, -1.6064, 1.3390],
],
],
-2,
[[[2, 2, 0], [1, 0, 3]], [[3, 2, 0], [1, 0, 0]]],
[
[[0.1822, 0.2360, -1.7624], [-0.3829, -0.2024, 0.6103]],
[[-0.0768, -0.4699, -0.4303], [-0.3216, -0.7667, -0.4303]],
],
),
(
[
[
[0.3050, 1.6986, 1.1034],
[0.7020, -0.6960, -2.1818],
[0.3116, -0.5773, -0.9912],
[0.0835, -1.3915, -1.0720],
],
[
[0.1694, -0.6091, -0.6539],
[-0.5234, -0.1218, 0.5084],
[0.2374, -1.9537, -2.0078],
[-0.5700, -1.0302, 0.1558],
],
],
2,
[
[[1, 1, 0, 1], [0, 0, 2, 2], [1, 2, 1, 2], [2, 2, 1, 0]],
[[0, 0, 1, 2], [2, 2, 1, 0], [1, 2, 0, 0], [0, 2, 0, 2]],
],
[
[
[1.6986, 1.6986, 0.3050, 1.6986],
[0.7020, 0.7020, -2.1818, -2.1818],
[-0.5773, -0.9912, -0.5773, -0.9912],
[-1.0720, -1.0720, -1.3915, 0.0835],
],
[
[0.1694, 0.1694, -0.6091, -0.6539],
[0.5084, 0.5084, -0.1218, -0.5234],
[-1.9537, -2.0078, 0.2374, 0.2374],
[-0.5700, 0.1558, -0.5700, 0.1558],
],
],
),
(
[
[
[0.3050, 1.6986, 1.1034],
[0.7020, -0.6960, -2.1818],
[0.3116, -0.5773, -0.9912],
[0.0835, -1.3915, -1.0720],
],
[
[0.1694, -0.6091, -0.6539],
[-0.5234, -0.1218, 0.5084],
[0.2374, -1.9537, -2.0078],
[-0.5700, -1.0302, 0.1558],
],
],
-1,
[
[[1, 1, 0, 1], [0, 0, 2, 2], [1, 2, 1, 2], [2, 2, 1, 0]],
[[0, 0, 1, 2], [2, 2, 1, 0], [1, 2, 0, 0], [0, 2, 0, 2]],
],
[
[
[1.6986, 1.6986, 0.3050, 1.6986],
[0.7020, 0.7020, -2.1818, -2.1818],
[-0.5773, -0.9912, -0.5773, -0.9912],
[-1.0720, -1.0720, -1.3915, 0.0835],
],
[
[0.1694, 0.1694, -0.6091, -0.6539],
[0.5084, 0.5084, -0.1218, -0.5234],
[-1.9537, -2.0078, 0.2374, 0.2374],
[-0.5700, 0.1558, -0.5700, 0.1558],
],
],
),
],
)
def test_gather(target, dev, executor_kind, data, axis, indices, ref_res):
def verify_gather(data, axis, indices, ref_res):
data = np.asarray(data, dtype="float32")
indices = np.asarray(indices, dtype="int32")
ref_res = np.asarray(ref_res)
d = relay.var("x", relay.TensorType(data.shape, "float32"))
i = relay.var("y", relay.TensorType(indices.shape, "int32"))
z = relay.gather(d, axis, i)
func = relay.Function([d, i], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data, indices
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_gather(data, axis, indices, ref_res)
def test_gather_nd(target, dev, executor_kind):
def verify_gather_nd(xshape, yshape, y_data, batch_dims=0, indices_dtype="int32"):
x = relay.var("x", relay.TensorType(xshape, "float32"))
y = relay.var("y", relay.TensorType(yshape, indices_dtype))
z = relay.gather_nd(x, y, batch_dims)
func = relay.Function([x, y], z)
x_data = np.random.uniform(size=xshape).astype("float32")
if y_data:
y_data = np.array(y_data, dtype=indices_dtype)
else:
y_data = np.random.randint(low=0, high=2, size=yshape, dtype=indices_dtype)
ref_res = ref_funcs.gather_nd(x_data, y_data, batch_dims)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_gather_nd((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]])
verify_gather_nd((2, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify_gather_nd((3, 2), (2, 2, 3), [[[0, 1, 2], [2, 0, 1]], [[0, 0, 0], [1, 1, 1]]])
# Examples from tensorflow gather_nd doc
# https://www.tensorflow.org/api_docs/python/tf/gather_nd
verify_gather_nd((2, 2, 2), (1, 2), [[1, 0]], 1)
verify_gather_nd((2, 2, 2), (1, 2, 1), [[[1], [0]]], 1)
verify_gather_nd((2, 2, 2), (2, 2, 1), [[[1], [0]], [[0], [1]]], 1)
# Test cases from tensorflow gather_nd tests kernel_tests/array_ops_test.py
verify_gather_nd((2, 2, 2), (1, 2), None, 1)
verify_gather_nd((2, 2, 2), (2, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (3, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (2, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (1, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (3, 2, 1), None, 1)
verify_gather_nd((2, 2, 3, 2), (2, 2, 2), None, 1)
verify_gather_nd((2, 2, 3, 2), (1, 2, 3), None, 1)
verify_gather_nd((3, 2, 2, 3, 4), (3, 3, 2), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (2, 3, 2), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (1, 3, 2), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (3, 3, 2, 1), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (2, 3, 2, 2), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (1, 3, 2, 3), None, 2)
verify_gather_nd((3, 2, 2, 3, 4), (1, 3, 2, 3), None, 2, indices_dtype="uint8")
verify_gather_nd((2, 2, 2), (2, 2, 1), [[[1], [0]], [[0], [1]]], 1, indices_dtype="uint32")
def _verify_infiniteness_ops(relay_op, ref_op, target="llvm", dev=None):
for dtype in ["float32", "float16", "float16", "int32", "int16"]:
shape = (2, 8, 8)
x = relay.var("x", relay.TensorType(shape, dtype))
y = relay_op(x)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, "bool")
data = np.random.uniform(size=shape).astype(dtype)
if dtype.startswith("float"):
data.ravel()[
np.random.choice(data.size, int(data.size * 0.5), replace=False)
] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
op_res = create_executor(target=target, device=dev).evaluate(y, {x: data})
ref_res = ref_op(data)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
@tvm.testing.requires_gpu
def test_isfinite():
for target, dev in tvm.testing.enabled_targets():
if target not in ["llvm", "cuda"]:
continue
_verify_infiniteness_ops(relay.isfinite, np.isfinite, target=target, dev=dev)
@tvm.testing.requires_gpu
def test_isinf():
for target, dev in tvm.testing.enabled_targets():
if target not in ["llvm", "cuda"]:
continue
_verify_infiniteness_ops(relay.isinf, np.isinf, target=target, dev=dev)
def test_unravel_index(target, dev, executor_kind):
def verify_unravel_index(indices, shape, dtype):
x_data = np.array(indices).astype(dtype)
y_data = np.array(shape).astype(dtype)
x = relay.var("x", relay.TensorType(x_data.shape, dtype))
y = relay.var("y", relay.TensorType(y_data.shape, dtype))
z = relay.unravel_index(x, y)
zz = run_infer_type(z)
if len(x_data.shape) == 1:
out_shape = [y_data.shape[0], x_data.shape[0]]
else:
out_shape = [y_data.shape[0]]
assert zz.checked_type == relay.ty.TensorType(out_shape, dtype)
func = relay.Function([x, y], z)
ref_res = np.unravel_index(x_data, y_data)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
for dtype in ["int64", "int32"]:
verify_unravel_index([0, 1, 2, 3], [2, 2], dtype)
verify_unravel_index([144], [5, 5, 5, 2], dtype)
verify_unravel_index(144, [5, 5, 5, 2], dtype)
verify_unravel_index([100, 13, 5], [5, 5, 5, 2], dtype)
# In below example, 5 is out of bound for array of size 4.
# Numpy implementation throws error for it
# TVM implementation does not throw error instead it produces
# output which is inline with Tensorflow
# verify_unravel_index([0, 1, 2, 5], [2, 2], dtype)
def test_sparse_to_dense(target, dev, executor_kind):
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
default_value_data = np.array(default_value)
a = relay.var(
"a", relay.TensorType(sparse_indices_data.shape, str(sparse_indices_data.dtype))
)
b = relay.var(
"b", relay.TensorType(sparse_values_data.shape, str(sparse_values_data.dtype))
)
if default_value is None:
args = [a, b]
d = relay.sparse_to_dense(a, output_shape, b)
else:
c = relay.var(
"c", relay.TensorType(default_value_data.shape, str(default_value_data.dtype))
)
args = [a, b, c]
d = relay.sparse_to_dense(a, output_shape, b, c)
zz = run_infer_type(d)
assert zz.checked_type == relay.ty.TensorType(output_shape, str(sparse_values_data.dtype))
func = relay.Function(args, d)
f = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)
if default_value is None:
op_res = f(sparse_indices_data, sparse_values_data)
else:
op_res = f(sparse_indices_data, sparse_values_data, default_value_data)
tvm.testing.assert_allclose(op_res.numpy(), xpected, rtol=1e-5)
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0]) # scalar
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3]) # vector
verify_sparse_to_dense(
[[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]
) # nXd
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]],
) # nXd
verify_sparse_to_dense(
[0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]
) # floats
# default value not specified
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0])
# negative test cases
# sparse indices should be ints
# verify_sparse_to_dense([[0.1, 1.1, 4.1], [0,2,4]], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
# sparse_values should be 0d or 1d only
# verify_sparse_to_dense([[0, 1, 4], [0, 2, 4]], [[[3.1, 3.1, 3.1]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
# sparse_indices should not be > 2d tensor
# verify_sparse_to_dense([[[[0, 1, 4], [0, 2, 4]]]], [[[[3.1, 3.1, 3.1]]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
class TestSparseReshape:
sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np = tvm.testing.parameters(
(
np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]], dtype=np.int32),
np.array([7, 5, 6, 3, 9], dtype=np.int32),
np.array([2, 3, 6], dtype=np.int32),
np.array([9, -1], dtype=np.int32),
),
(
np.array(
[[0, 0, 0, 0], [0, 0, 1, 2], [0, 1, 0, 3], [1, 0, 0, 4], [1, 2, 3, 6]],
dtype=np.int64,
),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6, 7], dtype=np.int64),
np.array([9, -1, 7], dtype=np.int64),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 1, 2, 3],
[0, 1, 0, 3, 5],
[1, 0, 0, 4, 6],
[1, 2, 3, 6, 8],
],
dtype=np.int64,
),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6, 7, 9], dtype=np.int64),
np.array([9, -1, 7], dtype=np.int64),
),
(
np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int32),
np.array([7, 5, 6, 3, 9], dtype=np.int32),
np.array([9, 4], dtype=np.int32),
np.array([2, -1, 6], dtype=np.int32),
),
(
np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([9, 4], dtype=np.int64),
np.array([-1], dtype=np.int64),
),
(
np.array([[0], [5], [10], [20], [24]], dtype=np.int32),
np.array([7, 5, 6, 3, 9], dtype=np.int32),
np.array([25], dtype=np.int32),
np.array([5, 5], dtype=np.int32),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int32),
np.array([7, 5, 6, 3, 9], dtype=np.int32),
np.array([500, 20], dtype=np.int32),
np.array([500, -1], dtype=np.int32),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([250, 40], dtype=np.int64),
),
(
np.ones((0, 1), dtype=np.int32),
np.array([], dtype=np.int32),
np.array([4], dtype=np.int32),
np.array([2, -1], dtype=np.int32),
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, 2], dtype=np.int64),
),
(
np.ones((0, 2), dtype=np.int32),
np.array([], dtype=np.int32),
np.array([3, 6], dtype=np.int32),
np.array([-1, 2], dtype=np.int32),
),
)
use_dyn = tvm.testing.parameter(True, False, ids=["dyn", "static"])
@tvm.testing.fixture(cache_return_value=True)
def ref_res(
self,
sparse_indices_np: np.ndarray,
prev_shape_np: np.ndarray,
new_shape_np: np.ndarray,
):
"""
This function calculates the expected output of sparse_reshape operator given the inputs.
"""
new_sparse_indices = np.ones(
(sparse_indices_np.shape[0], new_shape_np.shape[0]), dtype=sparse_indices_np.dtype
)
multipliers = np.ones(prev_shape_np.shape[0])
dividers = np.ones(new_shape_np.shape[0])
total_ele = np.prod(prev_shape_np)
division_total_ele = 1
for i in range(new_shape_np.shape[0]):
if new_shape_np[i] == -1:
continue
division_total_ele *= new_shape_np[i]
for i in range(prev_shape_np.shape[0] - 2, -1, -1):
multipliers[i] = prev_shape_np[i + 1] * multipliers[i + 1]
for i in range(len(new_shape_np)):
if new_shape_np[i] == -1:
new_shape_np[i] = total_ele // division_total_ele
if np.array_equal(prev_shape_np, new_shape_np):
return sparse_indices_np, prev_shape_np
for i in range(new_shape_np.shape[0] - 2, -1, -1):
dividers[i] = new_shape_np[i + 1] * dividers[i + 1]
for row_num, sparse_row in enumerate(sparse_indices_np):
flat_idx = 0
if len(sparse_indices_np.shape) != 1:
for i, ele in enumerate(sparse_row):
flat_idx += sparse_row[i] * multipliers[i]
else:
flat_idx += sparse_row
if len(new_sparse_indices.shape) != 1:
for i in range(new_sparse_indices.shape[1]):
new_sparse_indices[row_num][i] = flat_idx // dividers[i]
flat_idx = flat_idx % dividers[i]
else:
new_sparse_indices[row_num] = flat_idx
return new_sparse_indices, new_shape_np
@tvm.testing.known_failing_targets("vulkan")
def test_sparse_reshape(
self,
target,
dev,
ref_res,
sparse_indices_np,
sparse_values_np,
prev_shape_np,
new_shape_np,
use_dyn,
):
if use_dyn:
sparse_indices = relay.var(
"sparse_indices",
shape=[relay.Any(), relay.Any()],
dtype=str(sparse_indices_np.dtype),
)
prev_shape = relay.var(
"prev_shape",
shape=[relay.Any()],
dtype=str(prev_shape_np.dtype),
)
new_shape = relay.var(
"new_shape",
shape=[relay.Any()],
dtype=str(new_shape_np.dtype),
)
else:
sparse_indices = relay.var(
"sparse_indices",
relay.TensorType(sparse_indices_np.shape, str(sparse_indices_np.dtype)),
)
prev_shape = relay.var(
"prev_shape", relay.TensorType(prev_shape_np.shape, str(prev_shape_np.dtype))
)
new_shape = relay.var(
"new_shape", relay.TensorType(new_shape_np.shape, str(new_shape_np.dtype))
)
z = relay.op.sparse_reshape(sparse_indices, prev_shape, new_shape).astuple()
func = relay.Function([sparse_indices, prev_shape, new_shape], z)
outputs = run_infer_type(z)
new_sparse_indices_infer_type, new_shape_infer_type = (
outputs.checked_type.fields[0].dtype,
outputs.checked_type.fields[1].dtype,
)
assert new_sparse_indices_infer_type == sparse_indices_np.dtype
assert new_shape_infer_type == new_shape_np.dtype
verify_func(
target,
dev,
func,
[sparse_indices_np, prev_shape_np, new_shape_np],
ref_res,
)
class TestSegmentSum:
data_np, segment_ids_np, num_segments = tvm.testing.parameters(
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 0, 1, 1, 0, 1], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 0, 1], dtype=np.int32),
None,
),
(
np.random.random((6, 4, 5)),
np.array([2, 0, 1, 0, 3, 2], dtype=np.int64),
None,
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float32),
np.array([0, 0, 1], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([5, 0, 1, 0, 3, 6, 8, 7, 7], dtype=np.int64),
9,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 2], dtype=np.int32),
4,
),
(
np.random.random((6, 4, 5)),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
100,
),
)
use_dyn = tvm.testing.parameter(True, False, ids=["dyn", "static"])
@tvm.testing.fixture(cache_return_value=True)
def ref_res(
self,
data_np: np.ndarray,
segment_ids_np: np.ndarray,
num_segments: Optional[int],
):
"""
This function calculates the expected output of segment_sum operator given the inputs.
"""
if not num_segments:
num_segments = np.unique(segment_ids_np).shape[0]
result = np.zeros((num_segments,) + data_np.shape[1:], data_np.dtype)
for i, index in enumerate(segment_ids_np):
result[index] += data_np[i]
return result
# Optimization can produce tir.atomic_add, not currently supported
# on vulkan runtime.
@tvm.testing.known_failing_targets("vulkan")
def test_segment_sum(
self,
target,
dev,
ref_res: np.ndarray,
data_np: np.ndarray,
segment_ids_np: np.ndarray,
num_segments: Optional[int],
use_dyn: bool,
):
"""
This function verifies the relay output of segment_sum with its expected output.
"""
if use_dyn:
data = relay.var(
"data",
shape=[relay.Any() for _ in data_np.shape],
dtype=str(data_np.dtype),
)
segment_ids = relay.var(
"segment_ids",
shape=[relay.Any()],
dtype=str(segment_ids_np.dtype),
)
else:
data = relay.var(
"data",
relay.TensorType(data_np.shape, str(data_np.dtype)),
)
segment_ids = relay.var(
"segment_ids", relay.TensorType(segment_ids_np.shape, str(segment_ids_np.dtype))
)
z = relay.op.segment_sum(data, segment_ids, num_segments)
func = relay.Function([data, segment_ids], z)
segment_sum_result = run_infer_type(z)
assert segment_sum_result.checked_type.dtype == data_np.dtype
verify_func(
target,
dev,
func,
[data_np, segment_ids_np],
ref_res,
)
def verify_func(target, dev, func, data, ref_res, rtol=1e-5, atol=1e-7, kinds=["vm"]):
assert isinstance(data, list)
for kind in kinds:
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(*data)
if isinstance(op_res, tvm.runtime.container.ADT):
assert len(op_res) == len(
ref_res
), "Outputs from TVM and Python implementation must be equal "
for op_result, ref_result in zip(op_res, ref_res):
tvm.testing.assert_allclose(op_result.numpy(), ref_result, rtol=rtol, atol=atol)
else:
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol)
relay.backend.te_compiler.get().clear()
def test_adv_index(target, dev, executor_kind):
def verify_adv_index(data_shape, index_shapes):
dtype = "float32"
inputs = [relay.var("data", relay.TensorType(data_shape, dtype))]
np_data = np.random.uniform(size=data_shape).astype(dtype)
np_indices = []
for i, index_shape in enumerate(index_shapes):
limit = data_shape[i]
np_indices.append(np.random.uniform(0, limit - 1, size=index_shape).astype("int64"))
inputs.append(relay.var("index_{}".format(i), relay.TensorType(index_shape, "int64")))
np_out = np_data[tuple(np_indices)]
np_args = [np_data] + np_indices
out = relay.op.adv_index(inputs)
func = relay.Function(inputs, out)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*np_args
)
tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-5)
verify_adv_index((10, 5), [(3, 4), (3, 1)])
verify_adv_index((10, 5), [(1, 4), (3, 1)])
verify_adv_index(
(10, 5),
[
(2,),
],
)
verify_adv_index((10, 5, 15), [(1, 2, 1), (1, 2, 7)])
# Helper for testing binop functions
scanops_supported = {"cumsum": relay.op.cumsum, "cumprod": relay.op.cumprod}
def run_binop_tests(
target,
dev,
executor_kind,
binop_type: str,
gt_func: Callable[..., np.array],
identity_value: int,
):
def assert_relay_scanop(
data_np: np.array,
np_out: np.array,
axis: int = None,
out_dtype: str = None,
rtol: float = 1e-5,
atol: float = 1e-5,
exclusive: bool = False,
):
inp = relay.var("data", relay.TensorType(data_np.shape, str(data_np.dtype)))
if binop_type not in scanops_supported.keys():
raise ValueError(f"Unknown function {binop_type}. Options: {scanops_supported.keys()}")
out = scanops_supported[binop_type](inp, axis, out_dtype, exclusive=exclusive)
func = relay.Function([inp], out)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np
)
tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=rtol, atol=atol)
data = np.array([2, 3, 0])
assert_relay_scanop(data, gt_func(data))
assert_relay_scanop(data, gt_func(data), out_dtype="int64")
data = np.random.randn(10, 10)
assert_relay_scanop(data, gt_func(data))
assert_relay_scanop(data, gt_func(data, axis=0), axis=0)
assert_relay_scanop(data, gt_func(data, axis=1), axis=1)
data = np.random.randn(10, 5, 10).astype("float32")
assert_relay_scanop(data, gt_func(data), rtol=1e-4, atol=1e-4)
assert_relay_scanop(data, gt_func(data, axis=0), axis=0, rtol=1e-4, atol=1e-4)
assert_relay_scanop(data, gt_func(data, axis=1), axis=1, rtol=1e-4, atol=1e-4)
assert_relay_scanop(data, gt_func(data, axis=-1), axis=-1, rtol=1e-4, atol=1e-4)
data = np.random.rand(10) > 0.5
data = data.astype(np.int32)
assert_relay_scanop(data, gt_func(data, dtype=np.int32))
assert_relay_scanop(data, gt_func(data, dtype="int64"), out_dtype="int64")
# Test exclusivity operations
data = np.random.randint(-100, 100, size=(10, 10)).astype("int64")
expected_result = np.roll(gt_func(data), 1)
expected_result[0] = identity_value
assert_relay_scanop(data, expected_result, exclusive=True)
expected_result = np.roll(gt_func(data, axis=0), 1, axis=0)
expected_result[0, :] = identity_value
assert_relay_scanop(data, expected_result, exclusive=True, axis=0)
expected_result = np.roll(gt_func(data, axis=1), 1, axis=1)
expected_result[:, 0] = identity_value
assert_relay_scanop(data, expected_result, exclusive=True, axis=1)
@tvm.testing.parametrize_targets
def test_cumsum(target, dev, executor_kind):
run_binop_tests(
target, dev, executor_kind, binop_type="cumsum", gt_func=np.cumsum, identity_value=0
)
@tvm.testing.parametrize_targets
def test_cumprod(target, dev, executor_kind):
run_binop_tests(
target, dev, executor_kind, binop_type="cumprod", gt_func=np.cumprod, identity_value=1
)
@tvm.testing.parametrize_targets
def test_scatter_nd(target, dev, executor_kind):
def test_scatter_nd_large_shape():
def before():
data = relay.const(np.zeros((1, 900, 300), dtype="float32"), dtype="float32")
indices = relay.const(np.ones((3, 1, 900, 300), dtype="int64"), dtype="int64")
update = relay.const(np.ones((1, 900, 300), dtype="float32"), dtype="float32")
b = relay.op.scatter_nd(data, indices, update)
return relay.Function(relay.analysis.free_vars(b), b)
passes = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
]
)
before_mod = tvm.IRModule.from_expr(before())
with tvm.transform.PassContext(opt_level=3):
after_mod = passes(before_mod)
test_scatter_nd_large_shape()
def test_scatter_nd_inequal_m_k():
def before():
data = relay.const(np.zeros((1, 1, 10), dtype="float32"), dtype="float32")
indices = relay.const(np.zeros((2, 1, 1, 1), dtype="float32"), dtype="int64")
update = relay.const(np.ones((1, 1, 1, 10), dtype="float32"), dtype="float32")
b = relay.op.scatter_nd(data, indices, update)
return relay.Function(relay.analysis.free_vars(b), b)
passes = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
]
)
before_mod = tvm.IRModule.from_expr(before())
with tvm.transform.PassContext(opt_level=3):
after_mod = passes(before_mod)
test_scatter_nd_inequal_m_k()
def verify_scatter_nd(
data_np, indices_np, updates_np, ref_res, mode="add", rtol=1e-5, atol=1e-5
):
data = relay.var("data", shape=data_np.shape, dtype=str(data_np.dtype))
indices = relay.var("indices", shape=indices_np.shape, dtype=str(indices_np.dtype))
updates = relay.var("updates", shape=updates_np.shape, dtype=str(updates_np.dtype))
out = relay.op.scatter_nd(data, indices, updates, mode)
func = relay.Function([data, indices, updates], out)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np, indices_np, updates_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol)
def verify_scatter_nd_with_stack(
data_np, indices_np, updates_np, ref_res, mode="add", rtol=1e-5, atol=1e-5
):
data = relay.var("data", shape=data_np.shape, dtype=str(data_np.dtype))
indices_vars = [
relay.var("ind%d" % i, shape=v.shape, dtype=str(v.dtype))
for i, v in enumerate(indices_np)
]
updates = relay.var("updates", shape=updates_np.shape, dtype=str(updates_np.dtype))
# test if scatter_nd works in case indices are prepared by another Relay operator
indices = relay.op.stack(indices_vars, axis=0)
out = relay.op.scatter_nd(data, indices, updates, mode)
func = relay.Function(
[data, updates] + indices_vars,
out,
)
fargs = [data_np, updates_np]
for a in indices_np:
fargs.append(a)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*fargs
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol)
# TODO(vcchernov): check frameworks' int type requirements. ONNX expects int64 only
for indice_dtype in ["uint8", "uint16", "uint32"]:
data = np.zeros((2, 2)).astype("int64")
indices = np.array([[1, 1, 0], [0, 1, 0]]).astype(indice_dtype)
updates = np.array([2, 3, 0])
out = np.array([[0, 0], [2, 3]])
verify_scatter_nd(data, indices, updates, out)
verify_scatter_nd_with_stack(data, indices, updates, out)
data = np.zeros((2, 2, 2, 2)).astype("int64")
indices = np.array([[0, 1], [1, 1]]).astype(indice_dtype)
updates = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
out = np.array([[[[0, 0], [0, 0]], [[1, 2], [3, 4]]], [[[0, 0], [0, 0]], [[5, 6], [7, 8]]]])
verify_scatter_nd(data, indices, updates, out)
verify_scatter_nd_with_stack(data, indices, updates, out)
indices = np.array([[1, 0, 0]]).astype(indice_dtype)
updates = np.reshape(np.arange(1560 * 3), (3, 1560)).astype("float32")
shape = (2, 1560)
data = np.zeros(shape).astype("float32")
out = data.copy()
out[1, :] += updates[0, :]
out[0, :] += updates[1, :]
out[0, :] += updates[2, :]
verify_scatter_nd(data, indices, updates, out, mode="add")
verify_scatter_nd_with_stack(data, indices, updates, out)
for mode in ["update", "add", "mul", "min", "max"]:
indices = np.stack((np.random.randint(2, size=5), np.random.randint(7, size=5))).astype(
indice_dtype
)
updates = np.ones((5, 3)).astype("float64")
shape = (2, 7, 3)
data = np.random.random(shape).astype("float64")
out = data.copy()
for i in range(indices.shape[1]):
for j in range(updates.shape[1]):
if mode == "update":
out[indices[0, i], indices[1, i], j] = updates[i, j]
elif mode == "add":
out[indices[0, i], indices[1, i], j] += updates[i, j]
elif mode == "mul":
out[indices[0, i], indices[1, i], j] *= updates[i, j]
elif mode == "min":
out[indices[0, i], indices[1, i], j] = min(
out[indices[0, i], indices[1, i], j], updates[i, j]
)
elif mode == "max":
out[indices[0, i], indices[1, i], j] = max(
out[indices[0, i], indices[1, i], j], updates[i, j]
)
verify_scatter_nd(data, indices, updates, out, mode)
verify_scatter_nd_with_stack(data, indices, updates, out, mode)
def test_unique(target, dev):
def calc_numpy_unique(data, is_sorted=False):
uniq, index, inverse, counts = np.unique(
data, return_index=True, return_inverse=True, return_counts=True
)
num_uniq = np.array([len(uniq)]).astype("int32")
if not is_sorted:
order = np.argsort(index)
reverse_order = np.argsort(order)
uniq = uniq[order].astype(data.dtype)
inverse = np.array([reverse_order[i] for i in inverse]).astype("int32")
counts = counts[order].astype("int32")
# In unsorted case, need to sort the index of first occurence
index = np.sort(index)
return [
uniq.astype(data.dtype),
index.astype("int32"),
inverse.astype("int32"),
num_uniq,
counts,
]
def verify_unique(n, dtype, is_dyn=False, is_sorted=False, return_counts=False):
if is_dyn:
x = relay.var("x", relay.TensorType([relay.Any()], dtype))
else:
x = relay.var("x", relay.TensorType([n], dtype))
outs = relay.unique(x, is_sorted, return_counts)
outs = outs.astuple()
func = relay.Function([x], outs)
x_data = np.random.randint(50, size=n).astype(dtype)
if is_dyn:
backend = "vm"
else:
backend = "graph"
mod = tvm.ir.IRModule.from_expr(func)
tvm_res = relay.create_executor(backend, mod=mod, device=dev, target=target).evaluate()(
x_data
) # unique, indices, inverse_indices, num_unique, (counts)
np_res = calc_numpy_unique(
x_data, is_sorted
) # unique, indices, inverse_indices, num_unique, counts
num_unique = np_res[3][0]
# num_unique
assert num_unique == tvm_res[3].numpy()[0]
# unique
tvm.testing.assert_allclose(tvm_res[0].numpy()[:num_unique], np_res[0], rtol=1e-5)
# indices
tvm.testing.assert_allclose(tvm_res[1].numpy()[:num_unique], np_res[1], rtol=1e-5)
# inverse_indices
tvm.testing.assert_allclose(tvm_res[2].numpy(), np_res[2], rtol=1e-5)
# counts
if return_counts:
tvm.testing.assert_allclose(tvm_res[4].numpy()[:num_unique], np_res[4], rtol=1e-5)
for dtype in ["int32", "int64"]:
for i in range(8):
is_dyn, is_sorted, return_counts = bool(i & 1), bool(i & 2), bool(i & 4)
verify_unique(10, dtype, is_dyn, is_sorted, return_counts)
class TestSTFT:
(
data_np,
n_fft,
hop_length,
win_length,
window_np,
normalized,
onesided,
) = tvm.testing.parameters(
(
np.array([[1, 2, 3, 4, 5, 6]], dtype=np.float32),
3,
3,
3,
np.array([4, 3, 2], dtype=np.int32),
False,
True,
),
(
np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9], [2, 5, 7, 8, 5, 6, 7, 3, 2]], dtype=np.float32),
2,
1,
2,
np.array([1, 3], dtype=np.int32),
False,
True,
),
(
np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9], [2, 5, 7, 8, 5, 6, 7, 3, 2]], dtype=np.float32),
2,
1,
2,
np.array([1, 3], dtype=np.int32),
True,
True,
),
(
np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9], [2, 5, 7, 8, 5, 6, 7, 3, 2]], dtype=np.float32),
2,
1,
2,
np.array([1, 3], dtype=np.int32),
False,
False,
),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_res(
self,
data_np: np.ndarray,
n_fft: int,
hop_length: int,
win_length: int,
window_np,
normalized,
onesided,
):
"""
This function calculates the expected output of segment_sum operator given the inputs.
"""
def pad_window(window_np, n_fft):
shape = window_np.shape[-1]
lpad = int((n_fft - shape) // 2)
lengths = [(0, 0)] * len(window_np.shape)
lengths[-1] = (lpad, int(n_fft - shape - lpad))
if lpad < 0:
print("ERROR Padding")
return np.pad(window_np, lengths, mode="constant")
import math
if not onesided:
n_rows = n_fft
else:
n_rows = n_fft // 2 + 1
if window_np is None:
window_np = np.ones(win_length, dtype=np.int32)
window_np = pad_window(window_np, n_fft)
n_cols = (data_np.shape[-1] - n_fft) // hop_length + 1
np_result = np.zeros((data_np.shape[0], n_rows, n_cols, 2))
for batch in range(data_np.shape[0]):
for w in range(n_rows):
for m in range(n_cols):
for k in range(n_fft):
np_result[batch][w][m][0] += (
window_np[k]
* data_np[batch][m * hop_length + k]
* math.cos(2 * math.pi * w * k / n_fft)
)
np_result[batch][w][m][1] -= (
window_np[k]
* data_np[batch][m * hop_length + k]
* math.sin(2 * math.pi * w * k / n_fft)
)
if normalized:
np_result[batch][w][m][0] /= math.sqrt(n_fft)
np_result[batch][w][m][1] /= math.sqrt(n_fft)
return np_result
use_dyn = tvm.testing.parameter(True, False, ids=["dyn", "static"])
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_stft(
self,
target,
dev,
ref_res: np.ndarray,
data_np: np.ndarray,
n_fft: int,
hop_length: int,
win_length: int,
window_np: np.ndarray,
normalized: bool,
onesided: bool,
use_dyn,
):
if use_dyn:
data = relay.var(
"data",
relay.TensorType([relay.Any(), relay.Any()], str(data_np.dtype)),
)
window = relay.var(
"window",
relay.TensorType([relay.Any()], str(window_np.dtype)),
)
backends = ["vm"]
else:
data = relay.var(
"data",
relay.TensorType(data_np.shape, str(data_np.dtype)),
)
window = relay.var(
"window",
relay.TensorType(window_np.shape, str(window_np.dtype)),
)
backends = ["graph", "vm"]
z = relay.op.stft(data, n_fft, hop_length, win_length, window, normalized, onesided)
func = relay.Function([data, window], z)
verify_func(
target, dev, func, [data_np, window_np], ref_res, rtol=1e-3, atol=1e-3, kinds=backends
)
def test_trilu(target="llvm", dev=tvm.cpu()):
def verify_trilu(data_shape, upper=True, k=0):
data = relay.var("data", relay.TensorType(data_shape, "float32"))
y = relay.trilu(data, k, upper)
mod = tvm.ir.IRModule.from_expr(y)
data_np = np.random.normal(size=data_shape).astype("float32")
tvm_res = (
relay.create_executor("graph", mod=mod, device=dev, target=target)
.evaluate()(data_np)
.numpy()
)
if upper:
np_res = np.triu(data_np, k)
else:
np_res = np.tril(data_np, k)
tvm.testing.assert_allclose(tvm_res, np_res)
# Test upper and lower triangle
verify_trilu((3, 3), True, 0)
verify_trilu((3, 3), False, 0)
# Test larger matrices with offset.
verify_trilu((6, 6), True, 1)
verify_trilu((6, 6), False, 2)
verify_trilu((6, 6), False, -2)
# Test batch size
verify_trilu((8, 6, 6), False, -2)
def test_trilu_shape_i64():
data_x = np.ones((2, 1), dtype="int32")
x = relay.var("x", shape=[2, 1], dtype="float32")
v0 = relay.broadcast_to(x, shape=relay.const([2, 1], dtype="int64"))
v2 = relay.add(relay.const([[1.0]]), v0)
v3 = relay.trilu(v0, k=0)
f = relay.Function([x], relay.Tuple([v2, v3]))
tvm_res = relay.create_executor("graph", device=tvm.cpu(), target="llvm").evaluate(f)(data_x)
np_res = (
np.array([[2.0], [2.0]], dtype=np.float32),
np.array([[1.0], [0.0]], dtype=np.float32),
)
tvm.testing.assert_allclose(tvm_res[0].numpy(), np_res[0])
tvm.testing.assert_allclose(tvm_res[1].numpy(), np_res[1])
def test_trilu_reduce():
data_i0 = np.ones((2, 2), dtype="int32")
k = 0
i0 = relay.var("i0", shape=[2, 2], dtype="int32")
i1 = relay.var("i1", shape=(), dtype="int64")
v0 = relay.trilu(i0, i1)
v1 = relay.argmin(v0, axis=[0])
f = relay.Function([i0, i1], v1)
tvm_res = (
relay.create_executor("graph", device=tvm.cpu(), target="llvm")
.evaluate(f)(data_i0, k)
.numpy()
)
np_res = np.triu(data_i0, k).argmin(axis=0)
tvm.testing.assert_allclose(tvm_res, np_res)
if __name__ == "__main__":
tvm.testing.main()
| 89,394 | 37.137799 | 116 | py |
tvm | tvm-main/tests/python/relay/test_type_solver.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
from tvm.relay import testing
import pytest
import numpy as np
def make_rel(name, args, num_inputs=None, attrs=None):
func = tvm.ir.EnvFunc.get("tvm.relay.type_relation." + name)
if num_inputs is None:
num_inputs = len(args) - 1
return relay.ty.TypeRelation(func, args, num_inputs, attrs)
def make_solver():
solver = relay.analysis._ffi_api._test_type_solver()
solver.Solve = solver("Solve")
solver.Unify = solver("Unify")
solver.Resolve = solver("Resolve")
solver.AddConstraint = solver("AddConstraint")
def gen_type(name, args, out=None):
out = out if out else relay.ty.IncompleteType()
solver.AddConstraint(make_rel(name, args + [out]))
return out
solver.gen_type = gen_type
return solver
def test_bcast():
solver = make_solver()
t0 = relay.ty.TensorType((10, 20), "float32")
t1 = relay.ty.TensorType((10, 1), "float32")
tc = relay.ty.TensorType((10, 1, 1), "float32")
t2 = solver.gen_type("Broadcast", [t0, t1])
t3 = solver.gen_type("Identity", [t2])
t4 = solver.gen_type("Broadcast", [t3, tc])
assert solver.Solve()
assert solver.Resolve(t2) == relay.ty.TensorType((10, 20), "float32")
assert solver.Resolve(t4) == relay.ty.TensorType((10, 10, 20), "float32")
def test_backward_solving():
solver = make_solver()
t0 = relay.ty.TensorType((10, 20), "float32")
tc = relay.ty.TensorType((10, 1, 1), "float32")
t1 = relay.ty.IncompleteType()
t3 = solver.gen_type("Broadcast", [t0, t1])
t2 = solver.gen_type("Identity", [t1], out=tc)
assert solver.Solve()
assert solver.Resolve(t3) == relay.ty.TensorType((10, 10, 20), "float32")
def test_unify_tuple():
solver = make_solver()
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
t3 = relay.ty.TensorType((10, 20), "float32")
tup1 = relay.ty.TupleType([t1, t2])
tup2 = relay.ty.TupleType([t3, t3])
unified = solver.Unify(tup1, tup2)
assert unified == tup2
def test_unify_global_type_var():
# should only be able to unify if they're the same
solver = make_solver()
gtv = relay.GlobalTypeVar("gtv")
unified = solver.Unify(gtv, gtv)
assert unified == gtv
def test_unify_typecall():
solver = make_solver()
gtv = relay.GlobalTypeVar("gtv")
# yeah, typecalls are shaped like tuples so the same
# tests work out
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
t3 = relay.ty.TensorType((10, 20), "float32")
tc1 = relay.ty.TypeCall(gtv, [t1, t2])
tc2 = relay.ty.TypeCall(gtv, [t3, t3])
unified = solver.Unify(tc1, tc2)
assert unified == tc2
def test_unify_functype():
solver = make_solver()
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
t3 = relay.ty.IncompleteType()
unit = relay.ty.TupleType([])
tensor1 = relay.ty.TensorType((10, 20), "float32")
tensor2 = relay.ty.TensorType((10,), "float32")
ft1 = relay.ty.FuncType([t1, t2], t3)
ft2 = relay.ty.FuncType([tensor1, tensor2], unit)
unified = solver.Unify(ft1, ft2)
assert unified == ft2
def test_recursive_unify():
solver = make_solver()
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
t3 = relay.ty.IncompleteType()
tensor1 = relay.ty.TensorType((10, 10, 20), "float32")
tensor2 = relay.ty.TensorType((10, 20), "float32")
tensor3 = relay.ty.TensorType((10,), "float32")
tup1 = relay.ty.TupleType([relay.ty.TupleType([t1, t2]), t2])
tup2 = relay.ty.TupleType([relay.ty.TupleType([tensor1, tensor2]), tensor2])
ft1 = relay.ty.FuncType([tup1, t3], t3)
ft2 = relay.ty.FuncType([tup2, tensor3], tensor3)
unified = solver.Unify(ft1, ft2)
assert unified == ft2
def test_unify_vars_under_tuples():
solver = make_solver()
t1 = relay.ty.IncompleteType()
tup1 = relay.ty.TupleType([t1, t1])
unified = solver.Unify(tup1, tup1)
assert unified == tup1
t2 = relay.ty.IncompleteType()
tup2 = relay.ty.TupleType([t2, t2])
tup3 = relay.ty.TupleType([t1, t2])
tup4 = relay.ty.TupleType([t2, t1])
unified = solver.Unify(tup3, tup4)
assert unified == tup1 or unified == tup2
def test_binding_over_typevars():
solver = make_solver()
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
a = relay.ty.TypeVar("a")
b = relay.ty.TypeVar("b")
c = relay.ty.TypeVar("c")
d = relay.ty.TypeVar("d")
ft1 = relay.ty.FuncType([t1], t2, [c, d])
ft2 = relay.ty.FuncType([a], b, [a, b])
unified = solver.Unify(ft1, ft2)
assert unified == solver.Resolve(ft1)
def test_recursive_backward_solving():
solver = make_solver()
tensor1 = relay.ty.TensorType((10, 20), "float32")
tensor2 = relay.ty.TensorType((10, 1, 1), "float32")
tensor3 = relay.ty.TensorType((10,), "float32")
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
t3 = relay.ty.IncompleteType()
tup1 = relay.ty.TupleType([relay.ty.TupleType([tensor1, tensor2]), tensor3])
tup2 = relay.ty.TupleType([relay.ty.TupleType([t1, t2]), t3])
solver.gen_type("Identity", [tup1], out=tup2)
assert solver.Solve()
assert solver.Resolve(tup2) == tup1
def test_backward_solving_after_child_update():
solver = make_solver()
tensor1 = relay.ty.TensorType((10, 20), "float32")
tensor2 = relay.ty.TensorType((10, 1, 1), "float32")
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
t3 = relay.ty.IncompleteType()
tup1 = relay.ty.TupleType([t1, t2])
tup2 = relay.ty.TupleType([t1, t3])
tup_concrete = relay.ty.TupleType([tensor1, tensor2])
t4 = solver.gen_type("Identity", [tup1])
t5 = solver.gen_type("Identity", [tup2])
solver.gen_type("Identity", [t4], out=t5)
assert solver.Solve()
assert solver.Resolve(t3) == t3 or solver.Resolve(t3) == t2
assert solver.Resolve(t4) == tup1 or solver.Resolve(t4) == tup2
assert solver.Resolve(t5) == tup1 or solver.Resolve(t5) == tup2
# updating the variables *inside* tup1 and tup2 should update t4 and t5
solver.gen_type("Identity", [t1], out=tensor1)
solver.gen_type("Identity", [t2], out=tensor2)
assert solver.Solve()
assert solver.Resolve(t4) == tup_concrete
assert solver.Resolve(t5) == tup_concrete
def test_unify_quantified_funcs():
solver = make_solver()
a, b, c = relay.TypeVar("a"), relay.TypeVar("b"), relay.TypeVar("c")
ft1 = relay.FuncType([a, b], c, [a, b, c])
ft2 = relay.FuncType([a, a], a, [a])
unified = solver.Unify(ft1, ft2)
assert unified == ft2
ft3 = relay.FuncType([a], a, [a])
ft4 = relay.FuncType([b], c, [b, c])
unified = solver.Unify(ft3, ft4)
assert unified == ft3
def test_unify_quantified_func_and_concrete():
solver = make_solver()
a, b = relay.TypeVar("a"), relay.TypeVar("b")
ft1 = relay.FuncType([a], b, [a, b])
ft2 = relay.FuncType([b], relay.TupleType([]), [b])
unified = solver.Unify(ft1, ft2)
assert unified == ft2
def test_unify_quantified_funcs_nesting():
solver = make_solver()
a, b, c = relay.TypeVar("a"), relay.TypeVar("b"), relay.TypeVar("c")
ft1 = relay.FuncType([a, relay.TupleType([b, c])], relay.TupleType([a, b, c]), [a, b, c])
ft2 = relay.FuncType([a, relay.TupleType([a, a])], relay.TupleType([a, a, a]), [a])
unified = solver.Unify(ft1, ft2)
assert unified == ft2
def test_unify_quantified_funcs_var_order():
solver = make_solver()
a, b, c = relay.TypeVar("a"), relay.TypeVar("b"), relay.TypeVar("c")
ft1 = relay.FuncType([a, relay.TupleType([b, c])], relay.TupleType([a, b, c]), [a, b, c])
ft2 = relay.FuncType([a, relay.TupleType([a, c])], relay.TupleType([a, a, c]), [a, c])
# unified = solver.Unify(ft1, ft2) # crashes here but it shouldn't
# assert unified == ft2
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_incompatible_tuple_unification():
solver = make_solver()
t1 = relay.ty.IncompleteType()
t2 = relay.ty.IncompleteType()
tensor1 = relay.ty.TensorType((1, 2, 3), "float32")
tensor2 = relay.ty.TensorType((2, 3), "float32")
tensor3 = relay.ty.TensorType((3,), "float32")
tup1 = relay.ty.TupleType([relay.ty.TupleType([t1, t1]), t2])
tup2 = relay.ty.TupleType([relay.ty.TupleType([tensor1, tensor2]), tensor3])
solver.Unify(tup1, tup2)
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_bad_recursive_unification():
solver = make_solver()
t1 = relay.ty.IncompleteType()
solver.Unify(t1, relay.ty.TupleType([t1, t1]))
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_unify_invalid_global_typevars():
solver = make_solver()
gtv1 = relay.GlobalTypeVar("gtv1")
gtv2 = relay.GlobalTypeVar("gtv2")
solver.Unify(gtv1, gtv2)
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_incompatible_typecall_var_unification():
solver = make_solver()
gtv1 = relay.GlobalTypeVar("gtv1")
gtv2 = relay.GlobalTypeVar("gtv2")
t1 = relay.IncompleteType()
t2 = relay.IncompleteType()
tc1 = relay.TypeCall(gtv1, [t1])
tc2 = relay.TypeCall(gtv2, [t2])
solver.Unify(tc1, tc2)
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_incompatible_typecall_args_unification():
solver = make_solver()
gtv = relay.GlobalTypeVar("gtv1")
t1 = relay.IncompleteType()
t2 = relay.IncompleteType()
tensor1 = relay.TensorType((1, 2, 3), "float32")
tensor2 = relay.TensorType((2, 3), "float32")
tensor3 = relay.TensorType((3,), "float32")
tc1 = relay.TypeCall(gtv, [relay.TupleType([t1, t1]), t2])
tc2 = relay.TypeCall(gtv, [relay.TupleType([tensor1, tensor2]), tensor3])
solver.Unify(tc1, tc2)
@pytest.mark.xfail(raises=tvm._ffi.base.TVMError)
def test_incompatible_quantified_func_unification():
solver = make_solver()
a, b, c = relay.TypeVar("a"), relay.TypeVar("b"), relay.TypeVar("c")
ft1 = relay.FuncType([a, b], c, [a, b, c])
ft2 = relay.FuncType([b, c], relay.TupleType([a]), [a, b, c])
solver.Unify(ft1, ft2)
def test_integer_compatibility_in_layout_transform():
x = relay.var("data", shape=(2, 3, 48, 48), dtype="float32")
conv_out = relay.nn.conv2d(
x,
relay.var("weight", shape=(1, 3, 1, 1), dtype="float32"),
strides=[47, 47],
channels=1,
kernel_size=[1, 1],
)
bias_out = relay.nn.bias_add(conv_out, relay.var("bias"))
broadcast_out = relay.op.broadcast_to(bias_out, relay.const([2, 1, 2, 2], dtype="int64"))
y = relay.add(bias_out, broadcast_out)
mod, _ = testing.create_workload(y)
with tvm.transform.PassContext(opt_level=3):
with tvm.target.Target("llvm"):
mod = relay.transform.CanonicalizeOps()(mod)
mod = relay.transform.AlterOpLayout()(mod)
if __name__ == "__main__":
test_bcast()
test_backward_solving()
test_unify_tuple()
test_unify_typecall()
test_unify_functype()
test_recursive_unify()
test_unify_vars_under_tuples()
test_recursive_backward_solving()
test_backward_solving_after_child_update()
test_unify_quantified_funcs()
test_unify_quantified_func_and_concrete()
test_unify_quantified_funcs_nesting()
test_unify_quantified_funcs_var_order()
test_incompatible_tuple_unification()
test_bad_recursive_unification()
test_incompatible_typecall_var_unification()
test_incompatible_typecall_args_unification()
test_incompatible_quantified_func_unification()
test_integer_compatibility_in_layout_transform()
| 12,436 | 31.388021 | 93 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_unary_elementwise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Callable, List
import numpy as np
import pytest
import scipy.special
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.qnn.op.legalizations import hardswish_func
def dequantize(data, scale, zp):
return scale * (np.asarray(data) - zp)
def generate_golden_output(
floating_point_golden_func, dequantized_x, output_scale, output_zero_point, dtype
):
output = floating_point_golden_func(dequantized_x)
output = np.around(output / output_scale + output_zero_point)
np_dtype = {"int8": np.int8, "uint8": np.uint8}[dtype]
q_min = np.iinfo(np_dtype).min
q_max = np.iinfo(np_dtype).max
return np.clip(output, q_min, q_max)
def run_qnn_func(func: relay.Function, args: List[relay.Expr]):
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.Legalize()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(*args)
return op_res.numpy()
def create_qnn_func(
qnn_op: Callable[[relay.Expr, relay.Expr, relay.Expr, relay.Expr, relay.Expr], relay.Call],
x_data: np.ndarray,
input_scale: float,
input_zero_point: int,
output_scale: float,
output_zero_point: int,
input_dtype: str = "uint8",
):
x = relay.var("x", shape=x_data.shape, dtype=input_dtype)
y = qnn_op(
x=x,
scale=relay.const(input_scale, "float32"),
zero_point=relay.const(input_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
return relay.Function([x], y)
def run_condition(
qnn_op: Callable[[relay.Expr, relay.Expr, relay.Expr, relay.Expr, relay.Expr], relay.Call],
floating_point_golden_func: Callable[[np.ndarray], np.ndarray],
x_data: np.ndarray,
input_scale: float,
input_zero_point: int,
output_scale: float,
output_zero_point: int,
input_dtype: str = "uint8",
):
func = create_qnn_func(
qnn_op,
x_data,
input_scale=input_scale,
input_zero_point=input_zero_point,
output_scale=output_scale,
output_zero_point=output_zero_point,
input_dtype=input_dtype,
)
x_dequantized = dequantize(x_data, input_scale, input_zero_point)
golden_output = generate_golden_output(
floating_point_golden_func,
x_dequantized,
output_scale,
output_zero_point,
dtype=input_dtype,
)
op_res = run_qnn_func(func, [x_data])
np.testing.assert_equal(op_res, golden_output.astype(input_dtype))
def generic_test(
qnn_op: Callable[[relay.Expr, relay.Expr, relay.Expr, relay.Expr, relay.Expr], relay.Call],
floating_point_golden_func: Callable[[np.ndarray], np.ndarray],
input_dtype: str = "uint8",
x_data: np.ndarray = np.arange(0, 256, dtype="uint8"),
):
x_data = x_data.view(input_dtype)
return run_condition(
qnn_op,
floating_point_golden_func,
x_data,
input_scale=0.125,
input_zero_point=0,
output_scale=0.125,
output_zero_point=0,
input_dtype=input_dtype,
)
class TestRSqrt:
def test_saturation(self):
# Same qparams in and out
x_data = np.array((255, 133, 0, 9)).reshape((1, 4))
run_condition(
relay.qnn.op.rsqrt,
lambda x: 1 / np.sqrt(x),
x_data,
input_scale=0.125,
input_zero_point=0,
output_scale=0.125,
output_zero_point=0,
input_dtype="uint8",
)
# Different scale
run_condition(
relay.qnn.op.rsqrt,
lambda x: 1 / np.sqrt(x),
x_data,
input_scale=0.125,
input_zero_point=0,
output_scale=0.25,
output_zero_point=0,
input_dtype="uint8",
)
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.rsqrt, lambda x: 1 / np.sqrt(x), input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(
relay.qnn.op.rsqrt,
lambda x: 1 / np.sqrt(x),
input_dtype="int8",
x_data=np.arange(1, 128, dtype="int8"),
)
class Sqrt:
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.sqrt, np.sqrt, input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(
relay.qnn.op.sqrt,
np.sqrt,
input_dtype="int8",
x_data=np.arange(1, 128, dtype="int8"),
)
class TestExp:
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.exp, np.exp, input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(relay.qnn.op.exp, np.exp, input_dtype="int8")
class TestTanh:
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.tanh, np.tanh, input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(relay.qnn.op.tanh, np.tanh, input_dtype="int8")
class TestErf:
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.erf, scipy.special.erf, input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(relay.qnn.op.erf, scipy.special.erf, input_dtype="int8")
class TestSigmoid:
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.sigmoid, lambda x: 1 / (1 + np.exp(-x)), input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(relay.qnn.op.sigmoid, lambda x: 1 / (1 + np.exp(-x)), input_dtype="int8")
class TestHardswish:
def test_all_numbers_uint8(self):
generic_test(relay.qnn.op.hardswish, hardswish_func, input_dtype="uint8")
def test_all_numbers_int8(self):
generic_test(relay.qnn.op.hardswish, hardswish_func, input_dtype="int8")
if __name__ == "__main__":
tvm.testing.main()
| 6,785 | 29.430493 | 99 | py |
tvm | tvm-main/tests/python/relay/test_pass_fast_math.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm.ir import IRModule
from tvm import relay
from tvm.relay.transform import FastMath
def test_exp():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
y = relay.exp(x)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
fast_mod = FastMath()(mod)
assert "fast_exp" in fast_mod.astext()
# Check that FastMath option works for relay.build.
with tvm.transform.PassContext(opt_level=3, required_pass=["FastMath"]):
fast_mod = relay.optimize(mod, target="llvm", params=None)
assert "fast_exp" in fast_mod[0].astext()
def test_tanh():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
y = relay.tanh(x)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
fast_mod = FastMath()(mod)
assert "fast_tanh" in fast_mod.astext()
# Check that FastMath option works for relay.build.
with tvm.transform.PassContext(opt_level=3, required_pass=["FastMath"]):
fast_mod = relay.optimize(mod, target="llvm", params=None)
assert "fast_tanh" in fast_mod[0].astext()
def test_erf():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
y = relay.erf(x)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
fast_mod = FastMath()(mod)
assert "fast_erf" in fast_mod.astext()
# Check that FastMath option works for relay.build.
with tvm.transform.PassContext(opt_level=3, required_pass=["FastMath"]):
fast_mod = relay.optimize(mod, target="llvm", params=None)
assert "fast_erf" in fast_mod[0].astext()
def test_softmax():
x = relay.var("x", shape=(1, 16), dtype="float32")
y = relay.nn.softmax(x)
func = relay.Function([x], y)
mod = tvm.IRModule.from_expr(func)
with tvm.transform.PassContext(opt_level=3, required_pass=["FastMath"]):
fast_mod = relay.optimize(mod, target="llvm")
assert "nn.fast_softmax" in fast_mod[0].astext()
if __name__ == "__main__":
test_exp()
test_tanh()
test_erf()
test_softmax()
| 2,839 | 32.809524 | 76 | py |
tvm | tvm-main/tests/python/relay/test_pass_split_args.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.testing import run_infer_type, create_workload
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = relay.transform.InferType()(mod)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_split_concat_metal():
shape = (1, 1, 1, 3)
dtype = "float32"
axis = 1
inputs = []
for i in range(100):
inputs.append(relay.var("p{}".format(i), shape=shape, dtype=dtype))
def before():
inp = relay.Tuple(inputs)
return relay.op.concatenate(inp, axis)
def expected():
limit = tvm.target.Target("metal").max_function_args - 1 # one buffer with output
splitNum = int(len(inputs) / limit)
if len(inputs) % limit > 0:
splitNum += 1
splitted = []
for i in range(splitNum):
startIdx = i * limit
argsCount = min(limit, len(inputs) - startIdx)
args = []
for j in range(argsCount):
args.append(inputs[j + startIdx])
t = relay.Tuple(args)
concat = relay.op.concatenate(t, axis)
splitted.append(relay.annotation.stop_fusion(concat))
inp = relay.Tuple(splitted)
return relay.op.concatenate(inp, axis)
# the fold constant should work on any context.
res = run_opt_pass(before(), transform.SplitArgs(tvm.target.Target("metal").max_function_args))
exp = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(res, exp)
def test_split_concat_cuda():
shape = (1, 1, 1, 3)
dtype = "float32"
axis = 1
inputs = []
for i in range(100):
inputs.append(relay.var("p{}".format(i), shape=shape, dtype=dtype))
def before():
inp = relay.Tuple(inputs)
return relay.op.concatenate(inp, axis)
def expected():
inp = relay.Tuple(inputs)
return relay.op.concatenate(inp, axis)
# the fold constant should work on any context.
res = run_opt_pass(before(), transform.SplitArgs(tvm.target.Target("cuda").max_function_args))
exp = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(res, exp)
if __name__ == "__main__":
test_split_concat_metal()
test_split_concat_cuda()
| 3,306 | 33.092784 | 99 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
from tvm import relay
from tvm.contrib import graph_executor
from tvm.relay.testing.temp_op_attr import TempOpAttr
# We use llvm target for testing functionality. `llvm` points to an older Intel
# generation machine, that legalizes to a simple lowering. Therefore, the
# legalization is overwritten such that it can be skipped and we use the
# QNNCanonicalizeOps lowering for the testing.
def legalize_qnn_batch_matmul(attrs, inputs, types):
return None
def make_requantize_params(input_scale, output_scale, output_zero_point, out_dtype):
config = {
"input_scale": input_scale,
"output_scale": output_scale,
"output_zero_point": output_zero_point,
"out_dtype": out_dtype,
}
return config
def make_configuration(
quantized_x,
quantized_y,
dtype,
x_shape,
y_shape,
x_zero_point,
y_zero_point,
x_scale,
y_scale,
output,
out_dtype="int32",
requantize=None,
):
config = {
"quantized_x": quantized_x,
"quantized_y": quantized_y,
"dtype": dtype,
"x_shape": x_shape,
"y_shape": y_shape,
"x_zero_point": x_zero_point,
"y_zero_point": y_zero_point,
"x_scale": x_scale,
"y_scale": y_scale,
"output": output,
"out_dtype": out_dtype,
"requantize": requantize,
}
return config
def make_int_configuration(
xzero_point_zero=True,
yzero_point_zero=True,
requantize_output=False,
per_channel=False,
batch_size=1,
):
x_shape, y_shape, output_shape = (batch_size, 4, 5), (batch_size, 3, 5), (batch_size, 4, 3)
if xzero_point_zero == True:
x_zero_point = 0
else:
x_zero_point = -123
if yzero_point_zero == True:
y_zero_point = 0
else:
y_zero_point = -123
in_dtype = "int8"
out_dtype = "int32" if not requantize_output else "int8"
quantized_x_np = (
np.array(
[
1,
3,
5,
7,
9, # sum = 25
11,
13,
15,
-19,
-21, # sum = -1
1,
3,
5,
7,
9, # sum = 25
11,
13,
-17,
17,
-21,
]
)[ # sum = 3
np.newaxis, np.newaxis, :
]
.repeat(batch_size, axis=1)
.astype(in_dtype)
.reshape(x_shape)
)
quantized_y_np = (
np.array([1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 1, 3, 5, 7, 9])[np.newaxis, np.newaxis, :]
.repeat(batch_size, axis=1)
.astype(in_dtype)
.reshape(y_shape)
)
x_scale = 0.5
y_scale = 0.5
output_scale = 2.0
if requantize_output:
assert xzero_point_zero is True
assert yzero_point_zero is True
output = np.array([20, 51, 20, -26, -27, -26, 20, 51, 20, -14, -10, -14])
elif xzero_point_zero is False and yzero_point_zero is False:
output = np.array(
[81960, 88360, 81960, 78400, 84540, 78400, 81960, 88360, 81960, 78984, 85164, 78984]
)
elif xzero_point_zero is True and yzero_point_zero is False:
output = np.array([3240, 3490, 3240, -320, -330, -320, 3240, 3490, 3240, 264, 294, 264])
elif xzero_point_zero is False and yzero_point_zero is True:
output = np.array([3240, 9640, 3240, 2878, 9018, 2878, 3240, 9640, 3240, 2970, 9150, 2970])
else:
output = np.array([165, 415, 165, -197, -207, -197, 165, 415, 165, -105, -75, -105])
requant_params = (
make_requantize_params(x_scale * y_scale, output_scale, -1, "int8")
if requantize_output
else None
)
# Outputs are for batch size 1, make batch size n version
output = (
output[np.newaxis, np.newaxis, :]
.repeat(batch_size, axis=1)
.astype(out_dtype)
.reshape(output_shape)
)
return make_configuration(
quantized_x=quantized_x_np,
quantized_y=quantized_y_np,
dtype=in_dtype,
x_shape=x_shape,
y_shape=y_shape,
x_zero_point=x_zero_point,
y_zero_point=y_zero_point,
x_scale=x_scale,
y_scale=y_scale,
output=output,
requantize=requant_params,
)
def qnn_batch_matmul_driver(test_configuration):
in_dtype = test_configuration["dtype"]
out_dtype = test_configuration["out_dtype"]
quantized_x_name = "quantized_x"
quantized_y_name = "quantized_y"
expected_out_dtype = test_configuration["out_dtype"]
quantized_x = relay.var(quantized_x_name, shape=test_configuration["x_shape"], dtype=in_dtype)
quantized_y = relay.var(quantized_y_name, shape=test_configuration["y_shape"], dtype=in_dtype)
mod = relay.qnn.op.batch_matmul(
quantized_x,
quantized_y,
relay.const(test_configuration["x_zero_point"], "int32"),
relay.const(test_configuration["y_zero_point"], "int32"),
relay.const(test_configuration["x_scale"], "float32"),
relay.const(test_configuration["y_scale"], "float32"),
)
if test_configuration["requantize"] is not None:
requantize_config = test_configuration["requantize"]
mod = relay.qnn.op.requantize(
mod,
input_scale=relay.const(requantize_config["input_scale"], "float32"),
input_zero_point=relay.const(0, "int32"),
output_scale=relay.const(requantize_config["output_scale"], "float32"),
output_zero_point=relay.const(requantize_config["output_zero_point"], "int32"),
out_dtype=requantize_config["out_dtype"],
)
expected_out_dtype = requantize_config["out_dtype"]
mod = relay.Function(relay.analysis.free_vars(mod), mod)
mod = tvm.IRModule.from_expr(mod)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
with tvm.transform.PassContext(opt_level=2):
graph, lib, params = relay.build(mod, "llvm", params=None)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input(quantized_x_name, test_configuration[quantized_x_name])
mod.set_input(quantized_y_name, test_configuration[quantized_y_name])
mod.set_input(**params)
mod.run()
res = mod.get_output(0).numpy()
np.testing.assert_equal(res, test_configuration["output"])
assert res.dtype == expected_out_dtype
def test_qnn_batch_matmul_xzp0_yzp0():
with TempOpAttr("qnn.batch_matmul", "FTVMQnnLegalize", legalize_qnn_batch_matmul):
for batch_size in [1, 4, 7]:
int32_output_params = make_int_configuration(
xzero_point_zero=True, yzero_point_zero=True, batch_size=batch_size
)
qnn_batch_matmul_driver(int32_output_params)
def test_qnn_batch_matmul_xzp0():
with TempOpAttr("qnn.batch_matmul", "FTVMQnnLegalize", legalize_qnn_batch_matmul):
for batch_size in [1, 4, 7]:
int32_output_params = make_int_configuration(
xzero_point_zero=True, yzero_point_zero=False, batch_size=batch_size
)
qnn_batch_matmul_driver(int32_output_params)
def test_qnn_batch_matmul_yzp0():
with TempOpAttr("qnn.batch_matmul", "FTVMQnnLegalize", legalize_qnn_batch_matmul):
for batch_size in [1, 4, 7]:
int32_output_params = make_int_configuration(
xzero_point_zero=False, yzero_point_zero=True, batch_size=batch_size
)
qnn_batch_matmul_driver(int32_output_params)
def test_qnn_batch_matmul():
with TempOpAttr("qnn.batch_matmul", "FTVMQnnLegalize", legalize_qnn_batch_matmul):
for batch_size in [1, 4, 7]:
int32_output_params = make_int_configuration(
xzero_point_zero=False, yzero_point_zero=False, batch_size=batch_size
)
qnn_batch_matmul_driver(int32_output_params)
def test_qnn_batch_matmul_with_requantized_output():
with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_batch_matmul):
for batch_size in [1, 4, 7]:
int8_requantized_output_params = make_int_configuration(
requantize_output=True, batch_size=batch_size
)
qnn_batch_matmul_driver(int8_requantized_output_params)
if __name__ == "__main__":
test_qnn_batch_matmul_xzp0_yzp0()
test_qnn_batch_matmul_xzp0()
test_qnn_batch_matmul_yzp0()
test_qnn_batch_matmul()
test_qnn_batch_matmul_with_requantized_output()
| 9,461 | 33.532847 | 99 | py |
tvm | tvm-main/tests/python/relay/test_pass_combine_parallel_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
from tvm.relay import transform
def run_combine_parallel(expr, min_num_branches=3):
mod = tvm.IRModule.from_expr(expr)
mod = transform.CombineParallelConv2D(min_num_branches)(mod)
return mod["main"]
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = tvm.relay.transform.InferType()(mod)
mod = opt_pass(mod)
return mod["main"]
def test_combine_parallel_conv2d():
"""Simple testcase."""
def before(x, w1, w2, w3, w4):
args = [x, w1, w2, w3, w4]
y1 = relay.nn.conv2d(x, w1)
y2 = relay.nn.conv2d(x, w2)
# y3 cannot be combined
y3 = relay.nn.conv2d(x, w3)
y4 = relay.nn.conv2d(x, w4)
y5 = relay.nn.max_pool2d(x)
y = relay.Tuple((y1, y2, y3, y4, y5))
return relay.Function(args, y)
def expected(x, w1, w2, w3, w4, channels1, channels2, channels3, channels4):
# use a fixed order of args so alpha equal check can pass
args = [x, w1, w2, w3, w4]
w = relay.concatenate((w1, w2, w4), axis=0)
y = relay.nn.conv2d(x, w, channels=channels1 + channels2 + channels4)
y1 = relay.strided_slice(
y, begin=[0, 0], end=[-1, channels1], strides=[1, 1], slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=[0, channels1], end=[-1, channels2], strides=[1, 1], slice_mode="size"
)
y3 = relay.nn.conv2d(x, w3)
y4 = relay.strided_slice(
y,
begin=[0, channels1 + channels2],
end=[-1, channels4],
strides=[1, 1],
slice_mode="size",
)
y5 = relay.nn.max_pool2d(x)
y = relay.Tuple((y1, y2, y3, y4, y5))
return relay.Function(args, y)
def check(x_shape, channels1, channels2, channels3, channels4):
x = relay.var("x", shape=x_shape)
in_c = x_shape[1]
w1 = relay.var("w1", shape=(channels1, in_c, 1, 1))
w2 = relay.var("w2", shape=(channels2, in_c, 1, 1))
w3 = relay.var("w3", shape=(channels3, in_c, 3, 3))
w4 = relay.var("w4", shape=(channels4, in_c, 1, 1))
y_before = before(x, w1, w2, w3, w4)
y = run_opt_pass(y_before, transform.CombineParallelConv2D(min_num_branches=2))
y_expected = expected(x, w1, w2, w3, w4, channels1, channels2, channels3, channels4)
y_expected = run_opt_pass(y_expected, transform.InferType())
assert tvm.ir.structural_equal(y, y_expected, map_free_vars=True)
check((1, 4, 16, 16), 4, 4, 4, 4)
check((1, 4, 16, 16), 4, 8, 4, 7)
def test_combine_parallel_conv2d_scale_relu():
"""Testcase of combining conv2d + scale + relu"""
def before(x, w1, w2, scale1, scale2, bias):
args = [x, w1, w2, scale1, scale2, bias]
y1 = relay.nn.conv2d(x, w1)
y1 = relay.multiply(y1, scale1)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(x, w2)
y2 = relay.multiply(y2, scale2)
y2 = relay.nn.relu(y2)
y2 = relay.add(y2, bias)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, scale1, scale2, bias, channels1, channels2):
args = [x, w1, w2, scale1, scale2, bias]
w = relay.concatenate((w1, w2), axis=0)
scale = relay.concatenate((scale1, scale2), axis=0)
y = relay.nn.conv2d(x, w, channels=channels1 + channels2)
y = relay.multiply(y, scale)
y = relay.nn.relu(y)
y1 = relay.strided_slice(
y, begin=[0, 0], end=[-1, channels1], strides=[1, 1], slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=[0, channels1], end=[-1, channels2], strides=[1, 1], slice_mode="size"
)
y2 = relay.add(y2, bias)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def check(x_shape, channels1, channels2):
x = relay.var("x", shape=x_shape)
in_c = x_shape[1]
w1 = relay.var("w1", shape=(channels1, in_c, 1, 1))
w2 = relay.var("w2", shape=(channels2, in_c, 1, 1))
scale1 = relay.var("scale1", shape=(channels1, 1, 1))
scale2 = relay.var("scale2", shape=(channels2, 1, 1))
bias = relay.var("bias", shape=(channels2, 1, 1))
y_before = before(x, w1, w2, scale1, scale2, bias)
y = run_opt_pass(y_before, transform.CombineParallelConv2D(min_num_branches=2))
y_expected = expected(x, w1, w2, scale1, scale2, bias, channels1, channels2)
y_expected = run_opt_pass(y_expected, transform.InferType())
assert tvm.ir.structural_equal(y, y_expected, map_free_vars=True)
check((1, 4, 16, 16), 4, 8)
def test_combine_parallel_conv2d_scale():
"""Testcase of un-combinable scale"""
def before(x, w1, w2, scale1, scale2):
args = [x, w1, w2, scale1, scale2]
y1 = relay.nn.conv2d(x, w1)
y1 = relay.multiply(y1, scale1)
y2 = relay.nn.conv2d(x, w2)
y2 = relay.multiply(y2, scale2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, scale1, scale2, channels1, channels2):
args = [x, w1, w2, scale1, scale2]
w = relay.concatenate((w1, w2), axis=0)
y = relay.nn.conv2d(x, w, channels=channels1 + channels2)
y1 = relay.strided_slice(
y, begin=[0, 0], end=[-1, channels1], strides=[1, 1], slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=[0, channels1], end=[-1, channels2], strides=[1, 1], slice_mode="size"
)
y1 = relay.multiply(y1, scale1)
y2 = relay.multiply(y2, scale2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def check(x_shape, channels1, channels2):
x = relay.var("x", shape=x_shape)
in_c = x_shape[1]
w1 = relay.var("w1", shape=(channels1, in_c, 1, 1))
w2 = relay.var("w2", shape=(channels2, in_c, 1, 1))
scale1 = relay.var("scale1", shape=(1,))
scale2 = relay.var("scale2", shape=(1,))
y_before = before(x, w1, w2, scale1, scale2)
y = run_opt_pass(y_before, transform.CombineParallelConv2D(min_num_branches=2))
y_expected = expected(x, w1, w2, scale1, scale2, channels1, channels2)
y_expected = run_opt_pass(y_expected, transform.InferType())
assert tvm.ir.structural_equal(y, y_expected, map_free_vars=True)
check((1, 4, 16, 16), 4, 8)
def test_combine_parallel_conv2d_multiple_blocks():
def before(x, w, repeat):
args = [x, w]
y = x
for i in range(repeat):
y1 = relay.nn.conv2d(y, w)
y2 = relay.nn.conv2d(y, w)
y = relay.concatenate((y1, y2), axis=1)
return relay.Function(args, y)
def expected(x, w, channels, repeat):
args = [x, w]
y = x
for i in range(repeat):
w_concat = relay.concatenate((w, w), axis=0)
y = relay.nn.conv2d(y, w_concat, channels=channels * 2)
y1 = relay.strided_slice(
y, begin=[0, 0], end=[-1, channels], strides=[1, 1], slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=[0, channels], end=[-1, channels], strides=[1, 1], slice_mode="size"
)
y = relay.concatenate((y1, y2), axis=1)
return relay.Function(args, y)
def check(x_shape, repeat):
x = relay.var("x", shape=x_shape)
in_c = x_shape[1]
out_c = in_c // 2
w = relay.var("w", shape=(out_c, in_c, 1, 1))
y_before = before(x, w, repeat)
y = run_opt_pass(y_before, transform.CombineParallelConv2D(min_num_branches=2))
y_expected = expected(x, w, out_c, repeat)
y_expected = run_opt_pass(y_expected, transform.InferType())
assert tvm.ir.structural_equal(y, y_expected, map_free_vars=True)
check((1, 4, 16, 16), 4)
if __name__ == "__main__":
test_combine_parallel_conv2d()
test_combine_parallel_conv2d_scale_relu()
test_combine_parallel_conv2d_scale()
test_combine_parallel_conv2d_multiple_blocks()
| 8,957 | 38.462555 | 93 | py |
tvm | tvm-main/tests/python/relay/test_annotated_regions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, invalid-name
import tvm
from tvm import relay
from tvm.relay.op.annotation import compiler_begin, compiler_end
def check_region(region_set, target, args, nodes, rets):
region = region_set.get_region(args[0])
assert region
assert target == region.target
assert set(args) == set(region.args)
assert set(nodes) == set(region.nodes)
assert set(rets) == set(region.rets)
def test_region_set_creator_diamond():
data = relay.var("data", shape=(10, 10))
cb_1 = compiler_begin(data, "test_target")
O_1 = relay.abs(cb_1)
ce_1 = compiler_end(O_1, "test_target")
ce_2 = compiler_end(O_1, "test_target")
cb_2 = compiler_begin(ce_1, "test_target")
O_2 = relay.nn.relu(cb_2)
ce_3 = compiler_end(O_2, "test_target")
cb_d = compiler_begin(ce_2, "default")
X = relay.tanh(cb_d)
ce_d = compiler_end(X, "default")
cb_3 = compiler_begin(ce_3, "test_target")
cb_4 = compiler_begin(ce_d, "test_target")
O_3 = relay.add(cb_3, cb_4)
ce_4 = compiler_end(O_3, "test_target")
diamond = relay.Function([data], ce_4)
region_set = relay.analysis.AnnotatedRegionSet(
diamond, relay.op.get("annotation.compiler_begin"), relay.op.get("annotation.compiler_end")
)
assert len(region_set) == 4
check_region(
region_set,
"test_target",
[cb_1],
[cb_1, O_1, ce_1, ce_2],
[ce_1, ce_2],
)
check_region(
region_set,
"test_target",
[cb_2],
[cb_2, O_2, ce_3],
[ce_3],
)
check_region(
region_set,
"default",
[cb_d],
[cb_d, X, ce_d],
[ce_d],
)
check_region(
region_set,
"test_target",
[cb_3, cb_4],
[cb_3, cb_4, O_3, ce_4],
[ce_4],
)
def test_region_set_creator_merged():
data = relay.var("data", shape=(10, 10))
cb_1 = compiler_begin(data, "test_target")
O_1 = relay.abs(cb_1)
ce_2 = compiler_end(O_1, "test_target")
O_2 = relay.nn.relu(O_1)
ce_3 = compiler_end(O_2, "test_target")
cb_d = compiler_begin(ce_2, "default")
X = relay.tanh(cb_d)
ce_d = compiler_end(X, "default")
cb_3 = compiler_begin(ce_3, "test_target")
cb_4 = compiler_begin(ce_d, "test_target")
O_3 = relay.add(cb_3, cb_4)
O_4 = relay.add(cb_3, cb_4)
O_5 = relay.Tuple([O_3, O_4])
ce_4 = compiler_end(O_5, "test_target")
merged = relay.Function([data], ce_4)
region_set = relay.analysis.AnnotatedRegionSet(
merged, relay.op.get("annotation.compiler_begin"), relay.op.get("annotation.compiler_end")
)
assert len(region_set) == 3
check_region(
region_set,
"test_target",
[cb_1],
[cb_1, O_1, O_2, ce_2, ce_3],
[ce_2, ce_3],
)
check_region(
region_set,
"default",
[cb_d],
[cb_d, X, ce_d],
[ce_d],
)
check_region(
region_set,
"test_target",
[cb_3, cb_4],
[cb_3, cb_4, O_3, O_4, O_5, ce_4],
[ce_4],
)
if __name__ == "__main__":
test_region_set_creator_diamond()
test_region_set_creator_merged()
| 3,998 | 29.295455 | 99 | py |
tvm | tvm-main/tests/python/relay/test_pass_fuse_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import run_opt_pass
import tvm.testing
import tvm.topi.testing
def test_fuse_simple():
"""Simple testcase."""
def before():
x = relay.var("x", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.exp(y)
w = relay.squeeze(z)
return relay.Function([x], w)
def expected():
x = relay.var("p", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.exp(y)
w = relay.squeeze(z)
f1 = relay.Function([x], w)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=(10, 20))
y = relay.Call(f1, [x])
return relay.Function([x], y)
z = before()
zz = run_opt_pass(z, transform.FuseOps())
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_conv2d_fuse():
"""Test fusion case of conv2d"""
def before(dshape):
x = relay.var("x", shape=dshape)
x = relay.add(x, relay.const(1, "float32"))
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=16)
# this is the next dominator.
y1 = relay.add(relay.const(1, "float32"), y)
y = relay.add(y, y1)
# second path
z2 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(1, 1), padding=(0, 0), channels=16)
z3 = relay.nn.conv2d(y, relay.var("w3"), kernel_size=(3, 3), padding=(1, 1), channels=16)
# add can only be fused to z1
z = relay.add(z2, z3)
return relay.Function(relay.analysis.free_vars(z), z)
def expected(dshape):
# segment 0
x = relay.var("p0", shape=dshape)
y = relay.add(x, relay.const(1, "float32"))
f0 = relay.Function([x], y)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
# segment 1
x = relay.var("p0", shape=dshape)
w = relay.var("p1")
y = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=16)
y1 = relay.add(relay.const(1, "float32"), y)
y = relay.add(y, y1)
f1 = relay.Function([x, w], y)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
# segment 2
x = relay.var("p0", shape=dshape)
w = relay.var("p1")
z2 = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=16)
f2 = relay.Function([x, w], z2)
f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
# segment 3
x = relay.var("p0", shape=dshape)
w = relay.var("p1")
offset = relay.var("p2", shape=dshape)
z3 = relay.nn.conv2d(x, w, kernel_size=(1, 1), padding=(0, 0), channels=16)
z3 = relay.add(z3, offset)
f3 = relay.Function([x, w, offset], z3)
f3 = f3.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
# compose
x = relay.var("x", shape=dshape)
y = relay.Call(f0, [x])
y = relay.Call(f1, [y, relay.var("w1")])
z2 = relay.Call(f2, [y, relay.var("w3")])
z3 = relay.Call(f3, [y, relay.var("w2"), z2])
z = z3
return relay.Function(relay.analysis.free_vars(z), z)
dshape = (1, 16, 64, 64)
z = before(dshape)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
after = run_opt_pass(expected(dshape), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_concatenate():
"""Test fusion case involving concat op and Tuple node"""
def before(dshape):
x = relay.var("x", shape=dshape)
pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
upsampled = relay.nn.upsampling(pooled, scale_h=2, scale_w=2, layout="NCHW")
concat = relay.concatenate((upsampled, x), axis=1)
out = relay.add(concat, relay.const(1, "float32"))
return relay.Function(relay.analysis.free_vars(out), out)
def expected(dshape):
x = relay.var("x", shape=dshape)
pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
f0 = relay.Function([x], pooled)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p0 = relay.var("p0", shape=(dshape[0], dshape[1], dshape[2] // 2, dshape[3] // 2))
p1 = relay.var("p1", shape=dshape)
upsampled = relay.nn.upsampling(p0, scale_h=2, scale_w=2, layout="NCHW")
concat = relay.concatenate((upsampled, p1), axis=1)
out = relay.add(concat, relay.const(1, "float32"))
f1 = relay.Function([p0, p1], out)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=dshape)
y = relay.Call(f0, [x])
z = relay.Call(f1, [y, x])
return relay.Function([x], z)
dshape = (1, 16, 64, 64)
z = before(dshape)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(dshape), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_tuple_root():
"""Test fusion case where Tuple node is the root in its group"""
def before(dshape):
x = relay.var("x", shape=dshape)
pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
upsampled = relay.nn.upsampling(pooled, scale_h=2, scale_w=2, layout="NCHW")
out = relay.Tuple((upsampled, x))
return relay.Function(relay.analysis.free_vars(out), out)
def expected(dshape):
x = relay.var("x", shape=dshape)
pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
f0 = relay.Function([x], pooled)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p0 = relay.var("p0", shape=(dshape[0], dshape[1], dshape[2] // 2, dshape[3] // 2))
upsampled = relay.nn.upsampling(p0, scale_h=2, scale_w=2, layout="NCHW")
f1 = relay.Function([p0], upsampled)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=dshape)
y = relay.Call(f0, [x])
z = relay.Call(f1, [y])
tup = relay.Tuple((z, x))
return relay.Function([x], tup)
dshape = (1, 16, 64, 64)
z = before(dshape)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(dshape), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_stop_fusion():
def before(dshape):
x = relay.var("x", shape=dshape)
y = relay.add(x, relay.const(1, "float32"))
y = relay.annotation.stop_fusion(y)
z = relay.exp(y)
return relay.Function([x], z)
def expected(dshape):
x = relay.var("p0", shape=dshape)
y = relay.add(x, relay.const(1, "float32"))
f1 = relay.Function([x], y)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("p01", shape=dshape)
y = relay.exp(x)
f2 = relay.Function([x], y)
f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=dshape)
y = relay.Call(f1, [x])
z = relay.Call(f2, [y])
return relay.Function([x], z)
dshape = (10, 20)
z = before(dshape)
zz = run_opt_pass(z, transform.FuseOps())
after = run_opt_pass(expected(dshape), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_fuse_myia_regression():
def before(dshape, dtype):
x = relay.var("x", shape=dshape, dtype=dtype)
y = relay.var("y", shape=dshape, dtype=dtype)
sb = relay.ScopeBuilder()
with sb.if_scope(relay.op.greater(x, y)):
sb.ret(relay.Function([], x))
with sb.else_scope():
sb.ret(relay.Function([], y))
return relay.Function([x, y], relay.Call(sb.get(), []))
def expected(dshape, dtype):
x = relay.var("x", shape=dshape, dtype=dtype)
y = relay.var("y", shape=dshape, dtype=dtype)
sb = relay.ScopeBuilder()
p1 = relay.var("p1", shape=dshape, dtype=dtype)
p2 = relay.var("p2", shape=dshape, dtype=dtype)
fused_gt = relay.Function([p1, p2], relay.op.greater(p1, p2))
fused_gt = fused_gt.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
with sb.if_scope(fused_gt(x, y)):
sb.ret(relay.Function([], x))
with sb.else_scope():
sb.ret(relay.Function([], y))
return relay.Function([x, y], relay.Call(sb.get(), []))
dshape = ()
dtype = "int64"
f = before(dshape, dtype)
zz = run_opt_pass(f, transform.FuseOps())
after = run_opt_pass(expected(dshape, dtype), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_fuse_tuple_get_elemwise():
def before(dim):
X = relay.var("X", shape=(1, dim))
W = relay.var("W", shape=(3 * dim, dim))
matmul = relay.nn.dense(X, W)
splitted = relay.split(matmul, indices_or_sections=3, axis=1)
out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
return relay.Function([X, W], out)
def expected(dim):
p0 = relay.var("p0", shape=(1, dim))
p1 = relay.var("p1", shape=(3 * dim, dim))
matmul = relay.nn.dense(p0, p1)
f0 = relay.Function([p0, p1], matmul)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p01 = relay.var("p01", shape=(1, 3 * dim))
splitted = relay.split(p01, indices_or_sections=3, axis=1)
out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
f1 = relay.Function([p01], out)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
X = relay.var("X", shape=(1, dim))
W = relay.var("W", shape=(3 * dim, dim))
y = relay.Call(f0, [X, W])
z = relay.Call(f1, [y])
return relay.Function([X, W], z)
dim = 10
z = before(dim)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(dim), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_tuple_get_root():
def before(dim):
X = relay.var("X", shape=(1, 3 * dim))
W = relay.var("W", shape=(dim, dim))
splitted = relay.split(X, indices_or_sections=3, axis=1)
out = relay.nn.dense(splitted[0], W)
return relay.Function([X, W], out)
def expected(dim):
p0 = relay.var("p0", shape=(1, 3 * dim))
splitted = relay.split(p0, indices_or_sections=3, axis=1)
out = splitted[0]
f0 = relay.Function([p0], out)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p01 = relay.var("p01", shape=(1, dim))
p1 = relay.var("p1", shape=(dim, dim))
out = relay.nn.dense(p01, p1)
f1 = relay.Function([p01, p1], out)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
X = relay.var("X", shape=(1, 3 * dim))
W = relay.var("W", shape=(dim, dim))
y = relay.Call(f0, [X])
z = relay.Call(f1, [y, W])
return relay.Function([X, W], z)
dim = 10
z = before(dim)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(dim), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def fuse0(mod):
mod = relay.transform.InferType()(mod)
return relay.transform.FuseOps(fuse_opt_level=0)(mod)
def fuse2(mod):
mod = relay.transform.InferType()(mod)
return relay.transform.FuseOps(fuse_opt_level=2)(mod)
def test_tuple_intermediate():
def before(x):
inj = relay.squeeze(x)
y1 = relay.add(inj, relay.const(1, "float32"))
tmp = relay.squeeze(inj)
tmp = relay.add(tmp, relay.const(1, "float32"))
y2 = relay.add(tmp, relay.const(1, "float32"))
y3 = relay.add(inj, relay.const(1, "float32"))
concat = relay.concatenate((y1, y2, y3), axis=1)
out_inj = relay.squeeze(concat)
out = relay.add(out_inj, relay.const(1, "float32"))
return relay.Function(relay.analysis.free_vars(out), out)
def expected(p0):
f0 = before(p0)
f1 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=dshape)
y = relay.Call(f1, [x])
return relay.Function([x], y)
dshape = (1, 16, 64, 64)
x = relay.var("x", shape=dshape)
orig = before(x)
fuse0(tvm.IRModule.from_expr(orig))
m = fuse2(tvm.IRModule.from_expr(orig))
relay.build(m, "llvm")
after = run_opt_pass(expected(x), transform.InferType())
assert tvm.ir.structural_equal(m["main"], after)
def test_tuple_consecutive():
def gen_intermediate_tuple(x):
y1 = relay.add(x, relay.const(1, "float32"))
y2 = relay.add(x, relay.const(1, "float32"))
y3 = relay.add(x, relay.const(1, "float32"))
concat = relay.concatenate((y1, y2, y3), axis=1)
out = relay.add(concat, relay.const(1, "float32"))
return out
def gen_consecutive_tuple(x):
y1 = gen_intermediate_tuple(x)
y2 = gen_intermediate_tuple(x)
y3 = gen_intermediate_tuple(x)
concat = relay.concatenate((y1, y2, y3), axis=1)
return concat
def before(x):
concat = gen_consecutive_tuple(x)
pooled = relay.nn.max_pool2d(concat, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
out = relay.add(pooled, relay.const(1, "float32"))
out2 = relay.add(out, relay.const(1, "float32"))
out_tup = relay.Tuple((out, out2))
return relay.Function(relay.analysis.free_vars(out_tup), out_tup)
def expected(dshape):
p0 = relay.var("p0", shape=dshape)
concat = gen_consecutive_tuple(p0)
f0 = relay.Function([p0], concat)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p01 = relay.var("p01", shape=(1, dshape[1] * 9, dshape[2], dshape[3]))
pooled = relay.nn.max_pool2d(p01, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
out = relay.add(pooled, relay.const(1, "float32"))
f1 = relay.Function([p01], out)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p02 = relay.var("p02", shape=(1, dshape[1] * 9, dshape[2] // 2, dshape[3] // 2))
out = relay.add(p02, relay.const(1, "float32"))
f2 = relay.Function([p02], out)
f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=dshape)
y = relay.Call(f0, [x])
z = relay.Call(f1, [y])
z2 = relay.Call(f2, [z])
return relay.Function([x], relay.Tuple((z, z2)))
dshape = (1, 16, 64, 64)
x = relay.var("x", shape=dshape)
orig = before(x)
fuse0(tvm.IRModule.from_expr(orig))
m = fuse2(tvm.IRModule.from_expr(orig))
relay.build(m, "llvm")
after = run_opt_pass(expected(dshape), transform.InferType())
assert tvm.ir.structural_equal(m["main"], after)
def test_inception_like():
def conv(data):
y = relay.nn.conv2d(data, relay.var("w"), kernel_size=(3, 3), padding=(1, 1), channels=16)
return relay.nn.relu(data=y)
def inception_like(data):
c0 = conv(data)
c1 = conv(data)
return relay.concatenate((c0, c1), axis=1)
def before(dshape):
x = relay.var("x", shape=dshape)
in1 = inception_like(x)
in2 = inception_like(in1)
return relay.Function(relay.analysis.free_vars(in2), in2)
def expected(dshape):
p0 = relay.var("p0", shape=dshape)
c = conv(p0)
f0 = relay.Function(relay.analysis.free_vars(c), c)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p01 = relay.var("p01", shape=dshape)
c = conv(p01)
f1 = relay.Function(relay.analysis.free_vars(c), c)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p02 = relay.var("p02", shape=dshape)
p12 = relay.var("p12", shape=dshape)
concat1 = relay.concatenate((p02, p12), axis=1)
f_concat1 = relay.Function([p02, p12], concat1)
f_concat1 = f_concat1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
dshape2 = (dshape[0], dshape[1] * 2, dshape[2], dshape[3])
p03 = relay.var("p03", shape=dshape2)
c = conv(p03)
f2 = relay.Function(relay.analysis.free_vars(c), c)
f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p04 = relay.var("p04", shape=dshape2)
c = conv(p04)
f3 = relay.Function(relay.analysis.free_vars(c), c)
f3 = f3.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
p05 = relay.var("p05", shape=dshape)
p15 = relay.var("p15", shape=dshape)
concat2 = relay.concatenate((p05, p15), axis=1)
f_concat2 = relay.Function([p05, p15], concat2)
f_concat2 = f_concat2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=dshape)
c1 = relay.Call(f0, [x, relay.var("w1")])
c2 = relay.Call(f1, [x, relay.var("w2")])
concat = relay.Call(f_concat1, [c1, c2])
c3 = relay.Call(f2, [concat, relay.var("w3")])
c4 = relay.Call(f3, [concat, relay.var("w4")])
out = relay.Call(f_concat2, [c3, c4])
return relay.Function(relay.analysis.free_vars(out), out)
dshape = (1, 16, 64, 64)
orig = before(dshape)
fuse0(tvm.IRModule.from_expr(orig))
m = fuse2(tvm.IRModule.from_expr(orig))
relay.build(m, "llvm")
after = run_opt_pass(expected(dshape), transform.InferType())
assert tvm.ir.structural_equal(m["main"], after)
def test_fuse_parallel_injective():
"""Test fusing parallel injective ops to an elemwise op."""
def before():
x = relay.var("x", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.squeeze(y)
u = relay.transpose(y, axes=[0, 1])
w = relay.left_shift(z, u)
return relay.Function([x], w)
def expected():
x = relay.var("p", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.squeeze(y)
u = relay.transpose(y, axes=[0, 1])
w = relay.left_shift(z, u)
f1 = relay.Function([x], w)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=(10, 20))
y = relay.Call(f1, [x])
return relay.Function([x], y)
z = before()
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_immutable():
"""Verify the fusion pass won't change original module."""
def before():
x = relay.var("x", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.exp(y)
w = relay.squeeze(z)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], w)
return mod
def expected():
x = relay.var("p", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.exp(y)
w = relay.squeeze(z)
f1 = relay.Function([x], w)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=(10, 20))
y = relay.Call(f1, [x])
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
return mod
mod = transform.InferType()(before())
new_mod = transform.FuseOps(fuse_opt_level=2)(mod)
assert tvm.ir.structural_equal(mod, transform.InferType()(before()))
assert tvm.ir.structural_equal(new_mod, transform.InferType()(expected()))
def test_split():
"""Test that the result is well formed."""
x = relay.var("x", shape=(6, 9))
y = relay.split(x, 3).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
c = relay.TupleGetItem(y, 2)
mod = tvm.IRModule()
mod["main"] = relay.Function([x], a + relay.RefRead(relay.RefCreate(b)) + c)
mod = transform.InferType()(mod)
mod = transform.FuseOps()(mod)
def test_fuse_max():
"""Test the constraint of number of nodes in op fusion."""
def before(n):
x = relay.var("x", shape=(10, 20))
y = x
for i in range(n):
y = relay.exp(y)
return relay.Function([x], y)
def expected(n, max_fused_ops):
x = relay.var("p", shape=(10, 20))
y = x
for i in range(max_fused_ops):
y = relay.exp(y)
f1 = relay.Function([x], y)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=(10, 20))
z = relay.Call(f1, [x])
xx = relay.var("pp", shape=(10, 20))
yy = xx
# it is assumed that there are two fused functions
for i in range(n - max_fused_ops):
yy = relay.exp(yy)
f2 = relay.Function([xx], yy)
f2 = f2.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
zz = relay.Call(f2, [z])
return relay.Function([x], zz)
max_fused_ops = 256
n = 300
z = before(n)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
zz = run_opt_pass(z, transform.FuseOps())
after = run_opt_pass(expected(n, max_fused_ops), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
max_fused_ops = 10
n = 20
z = before(n)
after = run_opt_pass(expected(n, max_fused_ops), transform.InferType())
with tvm.transform.PassContext(config={"relay.FuseOps.max_depth": max_fused_ops}):
zz = run_opt_pass(z, transform.FuseOps())
assert tvm.ir.structural_equal(zz, after)
link_params = tvm.testing.parameter(False, True)
def test_fuse_take(link_params):
"""Test fusion case involving concat and take"""
def before():
shape = (tvm.tir.const(10, "int64"), tvm.tir.const(1, "int64"))
x = relay.var("x", shape=shape)
concat = relay.concatenate([x, x], axis=-1)
out = relay.op.take(concat, indices=relay.const([0], dtype="int64"))
return relay.Function(relay.analysis.free_vars(out), out)
def expected(link_params):
shape1 = (tvm.tir.const(10, "int64"), tvm.tir.const(1, "int64"))
shape2 = (tvm.tir.const(1, "int64"),)
x = relay.var("x", shape=shape1)
p0 = relay.var("p0", shape=shape1)
p1 = relay.var("p1", shape=shape2, dtype="int64")
c = relay.const([0], dtype="int64")
concat = relay.concatenate([p0, p0], axis=-1)
out = relay.op.take(concat, indices=c if link_params else p1)
f0 = relay.Function([p0] if link_params else [p0, p1], out)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
y = relay.Call(f0, [x] if link_params else [x, c])
return relay.Function([x], y)
after = run_opt_pass(expected(link_params), transform.InferType())
with tvm.transform.PassContext(opt_level=2, config={"relay.FuseOps.link_params": link_params}):
m = run_opt_pass(before(), transform.InferType())
m = run_opt_pass(m, transform.FuseOps())
assert tvm.ir.structural_equal(m, after)
relay.build(m, "llvm")
def test_fuse_gather_nd(link_params):
"""Test fusion case involving concat and gather_nd"""
def before():
shape = (tvm.tir.const(10, "int64"), tvm.tir.const(1, "int64"))
x = relay.var("x", shape=shape)
concat = relay.concatenate([x, x], axis=-1)
out = relay.gather_nd(concat, indices=relay.expr.const([[0, 1], [1, 0]], dtype="int64"))
return relay.Function(relay.analysis.free_vars(out), out)
def expected(link_params):
shape1 = (tvm.tir.const(10, "int64"), tvm.tir.const(1, "int64"))
shape2 = (tvm.tir.const(2, "int64"), tvm.tir.const(2, "int64"))
x = relay.var("x", shape=shape1)
p0 = relay.var("p0", shape=shape1)
p1 = relay.var("p1", shape=shape2, dtype="int64")
c = relay.const([[0, 1], [1, 0]], dtype="int64")
concat = relay.concatenate([p0, p0], axis=-1)
out = relay.gather_nd(concat, indices=c if link_params else p1)
f0 = relay.Function([p0] if link_params else [p0, p1], out)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
y = relay.Call(f0, [x] if link_params else [x, c])
return relay.Function([x], y)
after = run_opt_pass(expected(link_params), transform.InferType())
with tvm.transform.PassContext(opt_level=2, config={"relay.FuseOps.link_params": link_params}):
m = run_opt_pass(before(), transform.InferType())
m = run_opt_pass(m, transform.FuseOps())
assert tvm.ir.structural_equal(m, after)
relay.build(m, "llvm")
@tvm.testing.uses_gpu
def test_fuse_bcast_reduce_scalar():
"""Test fusion case with broadcast and reduction involving scalar"""
def before():
x = relay.var("x", shape=(), dtype="int32")
less = relay.less(x, relay.const(10, dtype="int32"))
z = relay.min(less)
return relay.Function([x], z)
def expected():
p0 = relay.var("p0", shape=(), dtype="int32")
less = relay.less(p0, relay.const(10, dtype="int32"))
z0 = relay.min(less)
f0 = relay.Function([p0], z0)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
x = relay.var("x", shape=(), dtype="int32")
f = relay.Call(f0, [x])
return relay.Function([x], f)
orig = before()
m = fuse2(tvm.IRModule.from_expr(orig))
for tgt, dev in tvm.testing.enabled_targets():
relay.build(m, tgt)
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(m["main"], after)
def test_fuse_max_diamond():
def create_diamond(x, branch_len):
x1 = x
x2 = x
for _ in range(branch_len):
x1 = relay.exp(x1)
x2 = relay.exp(x2)
return relay.add(x1, x2)
def before(branch_len, num_diamond):
x = relay.var("x", shape=(10, 20))
out = x
for _ in range(num_diamond):
out = create_diamond(out, branch_len)
return relay.Function([x], out)
def after(branch_len, num_diamond):
def create_diamond_func(inp):
inp_var = relay.var("p", shape=(10, 20))
d = create_diamond(inp_var, branch_len)
f = relay.Function([inp_var], d)
f = f.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
return relay.Call(f, [inp])
inp = relay.var("x", shape=(10, 20))
out = inp
for _ in range(num_diamond):
out = create_diamond_func(out)
return relay.Function([inp], out)
branch_len = 5
max_fused_ops = branch_len * 2 + 1 # the number of ops in one diamond
num_diamond = 3
with tvm.transform.PassContext(config={"relay.FuseOps.max_depth": max_fused_ops}):
fused = run_opt_pass(before(branch_len, num_diamond), transform.FuseOps())
expected = run_opt_pass(after(branch_len, num_diamond), transform.InferType())
assert tvm.ir.structural_equal(fused, expected)
def test_fuse_dynamic_squeeze_slice_take():
input_data = [
np.random.random([1, 2, 4]).astype("float32"),
np.array([0]).astype("int64"),
]
x = relay.var("p0107", shape=(relay.Any(), relay.Any(), 4), dtype="float32")
take_val = relay.var("p166", shape=(relay.Any(),), dtype="int64")
squeeze = relay.op.squeeze(x, axis=[0])
strided_slice = relay.op.strided_slice(
squeeze, begin=[0, 0], end=[15130, 2147483647], strides=[1, 1]
)
take = relay.op.take(strided_slice, take_val, axis=0)
mod = tvm.IRModule.from_expr(take)
result = relay.create_executor("vm", mod=mod, device=tvm.cpu(), target="llvm").evaluate()(
*input_data
)
np_result = np.squeeze(input_data[0][:, input_data[1][0], :], axis=0)
assert np.allclose(result.numpy(), np_result)
@tvm.testing.uses_gpu
def test_fuse_softmax():
"""Test if softmax can be fused with following ops."""
channel_size = 16
def before():
x = relay.var("x", shape=(16, channel_size))
softmax = relay.nn.softmax(x)
out = relay.cast(softmax, "float16")
return relay.Function([x], out)
def expected():
p0 = relay.var("p0", shape=(16, channel_size))
softmax = relay.nn.softmax(p0)
out = relay.cast(softmax, "float16")
x = relay.var("x", shape=(16, channel_size))
f0 = relay.Function([p0], out)
f0 = f0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
y = relay.Call(f0, [x])
return relay.Function([x], y)
orig = before()
m = fuse2(tvm.IRModule.from_expr(orig))
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(m["main"], after)
inp = np.random.randn(16, channel_size).astype("float32")
ref = tvm.topi.testing.softmax_python(inp).astype("float16")
for tgt, dev in tvm.testing.enabled_targets():
ex = relay.create_executor("graph", mod=m, device=dev, target=tgt)
result = ex.evaluate()(inp).numpy()
tvm.testing.assert_allclose(result, ref, rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
tvm.testing.main()
| 30,900 | 36.096038 | 99 | py |
tvm | tvm-main/tests/python/relay/test_vm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import time
from unittest.mock import patch
import tvm
from tvm import runtime
from tvm import relay, IRModule
from tvm.relay.backend import vm
from tvm.relay.scope_builder import ScopeBuilder
from tvm.relay.prelude import Prelude
from tvm.relay.loops import while_loop
from tvm.relay import testing
from tvm.contrib import utils
from tvm import rpc
import tvm.testing
from tvm.relay.transform import InferType
from tvm.relay.testing import mlp
from tvm.relay.dataflow_pattern import wildcard, is_op
from tvm.relay.backend.vm import VMCompiler
def check_result(target, dev, args, expected_result, mod):
"""
Check that evaluating `expr` applied to the arguments produces
`result` on Relay VM.
Parameters
----------
args: list of Expr
The arguments to supply the expr.
expected_result:
The expected result of running the expression.
"""
rts_result = relay.create_executor("vm", device=dev, target=target, mod=mod).evaluate()(*args)
tvm.testing.assert_allclose(expected_result, rts_result.numpy())
def veval(f, *args, device=tvm.cpu(), target="llvm"):
if isinstance(f, relay.Expr):
mod = tvm.IRModule()
mod["main"] = f
else:
assert isinstance(f, tvm.IRModule), "expected expression or module"
mod = f
exe = relay.vm.compile(mod, target)
vm = runtime.vm.VirtualMachine(exe, device)
return vm.invoke("main", *args)
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
return [o.numpy().tolist()]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def test_split(target, dev):
x = relay.var("x", shape=(12,))
y = relay.split(x, 3, axis=0).astuple()
f = relay.Function([x], y)
x_data = np.random.rand(
12,
).astype("float32")
ref_res = np.split(x_data, 3, axis=0)
res = veval(f, x_data, device=dev, target=target)
for i in range(3):
tvm.testing.assert_allclose(res[i].numpy(), ref_res[i])
def test_split_no_fuse(target, dev):
x = relay.var("x", shape=(12,))
y = relay.split(x, 3, axis=0).astuple()
z = relay.concatenate([relay.TupleGetItem(y, 0)], axis=0)
z = relay.annotation.stop_fusion(z)
f = relay.Function([x], z)
x_data = np.random.rand(
12,
).astype("float32")
res = veval(f, x_data, device=dev, target=target)
tvm.testing.assert_allclose(res.numpy(), np.split(x_data, 3, axis=0)[0])
def test_id(target, dev):
x = relay.var("x", shape=(10, 10), dtype="float64")
f = relay.Function([x], x)
x_data = np.random.rand(10, 10).astype("float64")
mod = tvm.IRModule()
mod["main"] = f
check_result(target, dev, [x_data], x_data, mod)
def test_op(target, dev):
x = relay.var("x", shape=(10, 10))
f = relay.Function([x], x + x)
x_data = np.random.rand(10, 10).astype("float32")
mod = tvm.IRModule()
mod["main"] = f
check_result(target, dev, [x_data], 2 * x_data, mod)
def any(x):
x = relay.op.nn.batch_flatten(x)
return relay.op.min(x, axis=[0, 1])
@tvm.testing.known_failing_targets("vulkan")
def test_cond(target, dev):
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(10, 10))
# f = relay.Function([x, y], relay.op.equal(x, y))
f = relay.Function([x, y], any(relay.op.equal(x, y)))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(10, 10).astype("float32")
mod = tvm.IRModule()
mod["main"] = f
# same
check_result(target, dev, [x_data, x_data], True, mod)
# diff
check_result(target, dev, [x_data, y_data], False, mod)
@tvm.testing.known_failing_targets("vulkan")
def test_simple_if(target, dev):
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(10, 10))
f = relay.Function([x, y], relay.If(any(relay.op.equal(x, y)), x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(10, 10).astype("float32")
mod = tvm.IRModule()
mod["main"] = f
# same
check_result(target, dev, [x_data, x_data], x_data, mod)
# diff
check_result(target, dev, [x_data, y_data], y_data, mod)
@tvm.testing.parametrize_targets("llvm")
def test_multiple_ifs(target, dev):
mod = tvm.IRModule({})
b = relay.var("b")
v0 = relay.var("v0")
v1 = relay.var("v1")
v2 = relay.var("v2")
v3 = relay.var("v3")
out = relay.Tuple([v2, v3])
out = relay.Let(v3, relay.If(b, v1, v0), out)
out = relay.Let(v2, relay.If(b, v0, v1), out)
out = relay.Let(v1, relay.Tuple([relay.const(1)]), out)
out = relay.Let(v0, relay.Tuple([relay.const(0)]), out)
fn = relay.Function([b], out)
mod["main"] = fn
func = relay.create_executor(device=dev, mod=mod, kind="vm").evaluate()
res = vmobj_to_list(func(False))
assert res == [1, 0]
def test_unused_function(target, dev):
cond = relay.const(True)
mod = tvm.IRModule()
then_name = relay.GlobalVar("times_2")
# define unused function
else_name = relay.GlobalVar("times_3")
t1 = relay.TensorType((2, 2), dtype="float32")
x1 = relay.var("x1", t1, dtype="float32")
x2 = relay.var("x2", t1, dtype="float32")
f2 = relay.multiply(x1, relay.const(2.0))
f3 = relay.multiply(x2, relay.const(3.0))
mod[then_name] = relay.Function([x1], f2)
mod[else_name] = relay.Function([x2], f3)
mod = InferType()(mod)
x3 = relay.var("x3", t1, dtype="float32")
# put unused function in else branch
f = relay.If(cond, then_name(x3), else_name(x3))
mod["main"] = relay.Function([x3], f)
x_data = np.random.rand(2, 2).astype("float32")
y_data = x_data * 2
check_result(target, dev, [x_data], y_data, mod)
def test_simple_call(target, dev):
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
sb = ScopeBuilder()
sb.ret(i)
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
mod[sum_up] = func
i_data = np.array(0, dtype="int32")
iarg = relay.var("iarg", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg], sum_up(iarg))
check_result(target, dev, [i_data], i_data, mod)
def test_count_loop(target, dev):
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype="int32"))
rec_call = relay.Call(sum_up, [one_less])
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
mod[sum_up] = func
i_data = np.array(0, dtype="int32")
iarg = relay.var("i", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg], sum_up(iarg))
result = veval(mod, i_data, device=dev, target=target)
tvm.testing.assert_allclose(result.numpy(), i_data)
check_result(target, dev, [i_data], i_data, mod)
def test_sum_loop(target, dev):
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
accum = relay.var("accum", shape=[], dtype="int32")
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, "int32"))):
sb.ret(accum)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, "int32"))
new_accum = relay.add(accum, i)
sb.ret(relay.Call(sum_up, [one_less, new_accum]))
func = relay.Function([i, accum], sb.get())
mod[sum_up] = func
mod = relay.transform.InferType()(mod)
loop_bound = 0
i_data = np.array(loop_bound, dtype="int32")
accum_data = np.array(0, dtype="int32")
iarg = relay.var("i", shape=[], dtype="int32")
aarg = relay.var("accum", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
check_result(target, dev, [i_data, accum_data], sum(range(1, loop_bound + 1)), mod)
def test_tuple_fst(target, dev):
ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
tup = relay.var("tup", type_annotation=ttype)
f = relay.Function([tup], relay.TupleGetItem(tup, 0))
i_data = np.random.rand(41).astype("float32")
j_data = np.random.rand(10).astype("float32")
mod = tvm.IRModule()
mod["main"] = f
check_result(target, dev, [(i_data, j_data)], i_data, mod)
def test_tuple_second(target, dev):
ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
tup = relay.var("tup", type_annotation=ttype)
f = relay.Function([tup], relay.TupleGetItem(tup, 1))
i_data = np.random.rand(41).astype("float32")
j_data = np.random.rand(10).astype("float32")
mod = tvm.IRModule()
mod["main"] = f
check_result(target, dev, [(i_data, j_data)], j_data, mod)
def test_list_constructor(target, dev):
mod = tvm.IRModule()
p = Prelude(mod)
l, cons, nil = mod.get_type("List")
one2 = cons(relay.const(1), nil())
one3 = cons(relay.const(2), one2)
one4 = cons(relay.const(3), one3)
f = relay.Function([], one4)
mod["main"] = f
result = veval(mod, device=dev, target=target)
assert len(result) == 2
assert len(result[1]) == 2
obj = vmobj_to_list(result)
tvm.testing.assert_allclose(obj, np.array([3, 2, 1]))
def test_let_tensor(target, dev):
sb = relay.ScopeBuilder()
shape = (1,)
x = relay.var("x", shape=shape, dtype="float32")
x1 = relay.var("x1", shape=shape, dtype="float32")
x1 = sb.let(x1, x)
xplusone = x1 + relay.const(42.0, "float32")
sb.ret(xplusone)
body = sb.get()
f = relay.Function([x], body)
x_data = np.random.rand(*shape).astype("float32")
mod = tvm.IRModule()
mod["main"] = f
check_result(target, dev, [x_data], x_data + 42.0, mod)
def test_let_scalar(target, dev):
sb = relay.ScopeBuilder()
x = relay.var("x", "float32")
x1 = sb.let("x1", x)
xplusone = x1 + relay.const(42.0, "float32")
sb.ret(xplusone)
body = sb.get()
f = relay.Function([x], body)
x_data = np.array(np.random.rand()).astype("float32")
mod = tvm.IRModule()
mod["main"] = f
check_result(target, dev, [x_data], x_data + 42.0, mod)
def test_compose(target, dev):
mod = tvm.IRModule()
p = Prelude(mod)
compose = p.compose
# add_one = fun x -> x + 1
sb = relay.ScopeBuilder()
x = relay.var("x", "float32")
x1 = sb.let("x1", x)
xplusone = x1 + relay.const(1.0, "float32")
sb.ret(xplusone)
body = sb.get()
add_one = relay.GlobalVar("add_one")
add_one_func = relay.Function([x], body)
# add_two = compose(add_one, add_one)
sb = relay.ScopeBuilder()
y = relay.var("y", "float32")
add_two_func = sb.let("add_two", compose(add_one_func, add_one_func))
add_two_res = add_two_func(y)
sb.ret(add_two_res)
add_two_body = sb.get()
mod[add_one] = add_one_func
f = relay.Function([y], add_two_body)
mod["main"] = f
x_data = np.array(np.random.rand()).astype("float32")
result = veval(mod, [x_data], device=dev, target=target)
tvm.testing.assert_allclose(result.numpy(), x_data + 2.0)
def test_list_hd(target, dev):
mod = tvm.IRModule()
p = Prelude(mod)
l, cons, nil = mod.get_type("List")
hd = mod.get_global_var("hd")
one2 = cons(relay.const(1), nil())
one3 = cons(relay.const(2), one2)
one4 = cons(relay.const(3), one3)
three = hd(one4)
f = relay.Function([], three)
mod["main"] = f
result = veval(mod, device=dev, target=target)
tvm.testing.assert_allclose(result.numpy(), 3)
def test_list_tl_empty_list(target, dev):
mod = tvm.IRModule()
p = Prelude(mod)
l, cons, nil = mod.get_type("List")
tl = mod.get_global_var("tl")
f = relay.Function([], tl(nil()))
mod["main"] = f
with pytest.raises(tvm.error.TVMError):
result = veval(mod, device=dev, target=target)
def test_list_tl(target, dev):
mod = tvm.IRModule()
p = Prelude(mod)
l, cons, nil = mod.get_type("List")
tl = mod.get_global_var("tl")
one2 = cons(relay.const(1), nil())
one3 = cons(relay.const(2), one2)
one4 = cons(relay.const(3), one3)
f = relay.Function([], tl(one4))
mod["main"] = f
result = veval(mod, device=dev, target=target)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([2, 1]))
def test_list_nth(target, dev):
expected = list(range(10))
for i in range(len(expected)):
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
nth = mod.get_global_var("nth")
l = nil()
for i in reversed(expected):
l = cons(relay.const(i), l)
f = relay.Function([], nth(l, relay.const(i)))
mod["main"] = f
result = veval(mod, device=dev, target=target)
tvm.testing.assert_allclose(result.numpy(), expected[i])
def test_list_update(target, dev):
expected = list(range(10))
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
update = mod.get_global_var("update")
l = nil()
# create zero initialized list
for i in range(len(expected)):
l = cons(relay.const(0), l)
# set value
for i, v in enumerate(expected):
l = update(l, relay.const(i), relay.const(v))
f = relay.Function([], l)
mod["main"] = f
result = veval(mod, device=dev, target=target)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array(expected))
def test_list_length(target, dev):
expected = list(range(10))
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
length = mod.get_global_var("length")
l = nil()
# create zero initialized list
for _ in range(len(expected)):
l = cons(relay.const(0), l)
l = length(l)
f = relay.Function([], l)
mod["main"] = f
result = veval(mod, device=dev, target=target)
tvm.testing.assert_allclose(result.numpy(), 10)
def test_list_map(target, dev):
mod = tvm.IRModule()
p = Prelude(mod)
x = relay.var("x", "int32")
add_one_func = relay.Function([x], relay.const(1) + x)
_, cons, nil = mod.get_type("List")
map = mod.get_global_var("map")
l = cons(relay.const(2), cons(relay.const(1), nil()))
f = relay.Function([], map(add_one_func, l))
mod["main"] = f
result = veval(mod, device=dev, target=target)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 2]))
def test_list_foldl(target, dev):
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
foldl = mod.get_global_var("foldl")
x = relay.var("x")
y = relay.var("y")
rev_dup_func = relay.Function([y, x], cons(x, cons(x, y)))
l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))
f = relay.Function([], foldl(rev_dup_func, nil(), l))
mod["main"] = f
result = veval(mod, device=dev, target=target)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 3, 2, 2, 1, 1]))
def test_list_foldr(target, dev):
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
foldr = mod.get_global_var("foldr")
x = relay.var("x")
y = relay.var("y")
identity_func = relay.Function([x, y], cons(x, y))
l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))
f = relay.Function([], foldr(identity_func, nil(), l))
mod["main"] = f
result = veval(mod, device=dev, target=target)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([1, 2, 3]))
def test_list_sum(target, dev):
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
sum = mod.get_global_var("sum")
l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))
f = relay.Function([], sum(l))
mod["main"] = f
result = veval(mod, device=dev, target=target)
tvm.testing.assert_allclose(result.numpy(), 6)
def test_list_filter(target, dev):
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
filter = mod.get_global_var("filter")
x = relay.var("x", "int32")
greater_than_one = relay.Function([x], x > relay.const(1))
l = cons(
relay.const(1),
cons(
relay.const(3), cons(relay.const(1), cons(relay.const(5), cons(relay.const(1), nil())))
),
)
f = relay.Function([], filter(greater_than_one, l))
mod["main"] = f
result = veval(mod, device=dev, target=target)
tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 5]))
def test_closure(target, dev):
x = relay.var("x", shape=())
y = relay.var("y", shape=())
f = relay.Function([x], x + y)
ff = relay.Function([y], f)
clo = ff(relay.const(1.0))
main = clo(relay.const(2.0))
res = veval(main, device=dev, target=target)
tvm.testing.assert_allclose(res.numpy(), 3.0)
def test_add_op_scalar(target, dev):
"""
test_add_op_scalar:
fn (x, y) {
return x + y;
}
"""
mod = tvm.IRModule()
x = relay.var("x", shape=()) # Default to float32
y = relay.var("y", shape=()) # Default to float32
func = relay.Function([x, y], relay.op.add(x, y))
x_y_data = [
(np.array(10.0, dtype="float32"), np.array(1.0, dtype="float32")),
(np.float32(10.0), np.float32(1.0)),
(10.0, 1.0),
]
for (x_data, y_data) in x_y_data:
mod["main"] = func
check_result(target, dev, [x_data, y_data], x_data + y_data, mod)
def test_add_op_scalar_float16(target, dev):
"""
test_add_op_scalar_float16:
fn (x, y) {
return x + y;
}
"""
mod = tvm.IRModule()
x = relay.var("x", shape=(), dtype="float16") # Default to float16
y = relay.var("y", shape=(), dtype="float16") # Default to float16
func = relay.Function([x, y], relay.op.add(x, y))
x_y_data = [
(np.array(10.0, dtype="float16"), np.array(1.0, dtype="float16")),
(np.float16(10.0), np.float16(1.0)),
]
for (x_data, y_data) in x_y_data:
mod["main"] = func
check_result(target, dev, [x_data, y_data], x_data + y_data, mod)
def test_add_op_scalar_int(target, dev):
"""
test_add_op_scalar_int:
fn (x, y) {
return x + y;
}
"""
mod = tvm.IRModule()
x = relay.var("x", shape=(), dtype="int32")
y = relay.var("y", shape=(), dtype="int32")
func = relay.Function([x, y], relay.op.add(x, y))
x_y_data = [
(np.array(10.0, dtype="int32"), np.array(1.0, dtype="int32")),
(np.int32(10), np.int32(1)),
(10, 1),
]
for (x_data, y_data) in x_y_data:
mod["main"] = func
check_result(target, dev, [x_data, y_data], x_data + y_data, mod)
def test_add_op_tensor(target, dev):
"""
test_add_op_tensor:
fn (x, y) {
return x + y;
}
"""
mod = tvm.IRModule()
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(10, 5))
func = relay.Function([x, y], relay.op.add(x, y))
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(10, 5).astype("float32")
mod["main"] = func
check_result(target, dev, [x_data, y_data], x_data + y_data, mod)
def test_add_op_broadcast(target, dev):
"""
test_add_op_broadcast:
fn (x, y) {
return x + y;
}
"""
mod = tvm.IRModule()
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
func = relay.Function([x, y], relay.op.add(x, y))
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
mod["main"] = func
check_result(target, dev, [x_data, y_data], x_data + y_data, mod)
def test_vm_optimize_dynamic():
dtype = "float32"
x = relay.var("x", shape=(relay.Any(), relay.Any()), dtype=dtype)
y = relay.var("y", shape=(relay.Any(), relay.Any()), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], relay.add(x, y))
comp = relay.vm.VMCompiler()
opt_mod, _ = comp.optimize(mod, target="llvm")
assert "shape_func" in opt_mod.astext(False)
def test_vm_optimize():
mod, params = testing.synthetic.get_workload()
comp = relay.vm.VMCompiler()
opt_mod, _ = comp.optimize(mod, target="llvm", params=params)
free_vars = relay.analysis.free_vars(opt_mod["main"].body)
# Paremeters should all be bound, so the only free var is data
assert len(free_vars) == 1
def test_loop_free_var(target, dev):
x = relay.var("x", shape=(), dtype="int32")
i = relay.var("i", shape=(), dtype="int32")
s = relay.var("s", shape=(), dtype="int32")
def cond(i, _):
return i < relay.const(10, dtype="int32")
def body_no_free_var(i, acc):
incr = relay.const(1, "int32")
return i + incr, acc + i
def body_with_free_var(i, acc):
incr = relay.const(1, "int32")
return i + incr, acc + x
for args, body, expected in zip([[], [1]], [body_no_free_var, body_with_free_var], [45, 10]):
loop = while_loop(cond, [i, s], body)
tup = loop(relay.const(0, dtype="int32"), relay.zeros(shape=(), dtype="int32"))
ret = relay.TupleGetItem(tup, 1)
mod = tvm.IRModule()
mod["main"] = relay.Function(relay.analysis.free_vars(ret), ret)
check_result(target, dev, args, expected, mod)
def test_vm_reshape_tensor(target, dev):
x_np = np.random.uniform(size=(8, 16)).astype("float32")
x = relay.var("x", shape=(8, 16), dtype="float32")
y = relay.reshape(x, [-1, 4, 8])
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
with tvm.transform.PassContext(opt_level=3):
exec = relay.vm.compile(mod, "llvm")
assert "reshape_tensor" in exec.bytecode
check_result(target, dev, [x_np], x_np.reshape([4, 4, 8]), mod)
x = relay.var("x", shape=(8, 16), dtype="float32")
y = relay.reshape(x, [16, -1])
y = relay.reverse_reshape(y, [-1, 4, 0])
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
with tvm.transform.PassContext(opt_level=3):
exec = relay.vm.compile(mod, "llvm")
assert exec.bytecode.count("reshape_tensor") == 1
check_result(target, dev, [x_np], x_np.reshape([4, 4, 8]), mod)
# reshape with symbolic/any shape
for n in [tvm.tir.Any(), tvm.te.size_var("n")]:
x = relay.var("x", shape=(n, 16), dtype="float32")
y = relay.reshape(x, [-1, 4])
y = relay.reshape(y, [0, 2, -1])
mod = tvm.IRModule()
mod["main"] = relay.Function([x], y)
with tvm.transform.PassContext(opt_level=3):
exec = relay.vm.compile(mod, "llvm")
assert exec.bytecode.count("reshape_tensor") == 1
check_result(target, dev, [x_np], x_np.reshape([32, 2, 2]), mod)
# dyn.reshape
x = relay.var("x", shape=(8, 16), dtype="float32")
y = relay.var("y", shape=(3,), dtype="int32")
z = relay.reshape(x, [-1, 4, 8])
z = relay.reshape(z, y)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
with tvm.transform.PassContext(opt_level=3):
exec = relay.vm.compile(mod, "llvm")
assert exec.bytecode.count("reshape_tensor") == 2
assert "reshape_tensor" in exec.bytecode
y_np = np.array([8, 2, 8]).astype("int32")
check_result(target, dev, [x_np, y_np], x_np.reshape([8, 2, 8]), mod)
def test_vm_reshape_and_copy(target, dev):
"""Make sure the compiler notices the reshape result shape is a literal and can use
the immediate-mode alloc_tensor instruction instead of alloc_tensor_reg."""
x_np = np.random.uniform(size=(1, 1)).astype("float32")
x = relay.var("x", shape=(1, 1), dtype="float32")
mod = tvm.IRModule.from_expr(relay.Function([x], relay.copy(relay.reshape(x, [0, 1]))))
with tvm.transform.PassContext(opt_level=3):
exec = relay.vm.compile(mod, "llvm")
assert "alloc_tensor" in exec.bytecode
assert not "alloc_tensor_reg" in exec.bytecode
check_result(target, dev, [x_np], x_np.reshape([1, 1]), mod)
def test_vm_reshape_tuple(target, dev, x_shape=(1, 4, 2), y_shape=(1, 2, 10)):
tup = relay.var(
"tup",
type_annotation=relay.TupleType([relay.TensorType(x_shape), relay.TensorType(y_shape)]),
)
out = relay.reshape(relay.TupleGetItem(tup, 0), (1, -1))
f = relay.Function([tup], out)
x_data = np.random.uniform(size=x_shape).astype("float32")
y_data = np.random.uniform(size=y_shape).astype("float32")
res = veval(f, (x_data, y_data), device=dev, target=target)
tvm.testing.assert_allclose(res.numpy(), np.reshape(x_data, (1, -1)))
def test_constant_shape_with_external_codegen():
@tvm.register_func("relay.ext.test1")
def relay_ext_test(func):
return None
mod = tvm.IRModule()
shape = (relay.Any(), 25)
dtype = "float32"
# external function
x = relay.var("x", shape=shape, dtype=dtype)
weight = relay.const(np.random.rand(5, 25).astype("float32"), dtype="float32")
out = relay.nn.dense(x, weight)
f1 = relay.Function([x], out)
f1 = f1.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
f1 = f1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
f1 = f1.with_attr("Compiler", "test1")
f1 = f1.with_attr("global_symbol", "f1")
glb_f1 = relay.GlobalVar("f1")
mod[glb_f1] = f1
mod = relay.transform.InferType()(mod)
# Main function
x = relay.var("x", shape=shape, dtype=dtype)
mod["main"] = relay.Function([x], glb_f1(x))
comp = relay.vm.VMCompiler()
opt_mod, _ = comp.optimize(mod, target="llvm")
assert "shape_func" in opt_mod.astext(False)
def prepare_vm_model(path, tensor_shape):
"""
Virtual Machine is compiled for simple topology and
exported as library to given path
"""
target = tvm.target.Target("llvm --host=llvm")
# Build a IRModule.
x = relay.var("x", shape=tensor_shape)
f = relay.Function([x], x + x)
mod = IRModule.from_expr(f)
# Compile to VMExecutable.
vm_exec = vm.compile(mod, target=target)
# Export to Disk
vm_exec.mod.export_library(path)
def test_vm_rpc():
"""
This test checks to make sure you can export a VMExecutable,
upload it to a remote machine using RPC and then execute it
on the other machine.
"""
# Shape for input and output tensors
shape = (10, 1)
# Export to Disk
temp = utils.tempdir()
path = temp.relpath("vm_library.so")
prepare_vm_model(path, shape)
# Use local rpc server for testing.
# Server must use popen so it doesn't inherit the current process state. It
# will crash otherwise.
def check_remote(server):
remote = rpc.connect(server.host, server.port, session_timeout=10)
# Upload the serialized Executable.
remote.upload(path)
# Get a handle to remote Executable.
rexec = remote.load_module("vm_library.so")
device = remote.cpu()
# Build a VM out of the executable and context.
vm_factory = runtime.vm.VirtualMachine(rexec, device)
np_input = np.random.uniform(size=shape).astype("float32")
input_tensor = tvm.nd.array(np_input, device)
# Invoke its "main" function.
out = vm_factory.invoke("main", input_tensor)
# Check the result.
np.testing.assert_allclose(out.numpy(), np_input + np_input)
check_remote(rpc.Server("127.0.0.1"))
def test_vm_invoke_with_outputs_rpc():
"""
This test checks to make sure you can export a VMExecutable,
upload it to a remote machine using RPC and then execute it
on the other machine with preallocated outputs.
"""
# Shape for input and output tensors
shape = (3, 2)
# Export to Disk
temp = utils.tempdir()
path = temp.relpath("vm_library.so")
prepare_vm_model(path, shape)
# Use local rpc server for testing.
# Server must use popen so it doesn't inherit the current process state. It
# will crash otherwise.
def check_remote_invoke_with_outputs(server):
remote = rpc.connect(server.host, server.port, session_timeout=10)
# Upload the serialized Executable.
remote.upload(path)
# Get a handle to remote Executable.
rexec = remote.load_module("vm_library.so")
device = remote.cpu()
# Build a VM out of the executable and context.
vm_factory = runtime.vm.VirtualMachine(rexec, device)
np_input = np.random.uniform(size=shape).astype("float32")
input_tensor = tvm.nd.array(np_input, device)
np_output = np.empty(shape, dtype="float32")
output_tensor = tvm.nd.array(np_output, device)
# Invoke its "main" function.
vm_factory.invoke_with_outputs(
"main", input_args={"x": input_tensor}, output_args=[output_tensor]
)
# Check the result.
np.testing.assert_allclose(output_tensor.numpy(), np_input + np_input)
check_remote_invoke_with_outputs(rpc.Server("127.0.0.1"))
def test_vm_invoke_with_outputs():
target = tvm.target.Target("llvm")
shape = (3, 2)
# Build a IRModule.
x = relay.var("x", shape=shape)
f = relay.Function([x], x + x)
mod = IRModule.from_expr(f)
# Compile to VMExecutable.
vm_exec = vm.compile(mod, target=target)
vm_factory = runtime.vm.VirtualMachine(vm_exec, tvm.cpu())
np_input = np.random.uniform(size=shape).astype("float32")
input_tensor = tvm.nd.array(np_input)
np_output = np.empty(shape, dtype="float32")
output_tensor = tvm.nd.array(np_output)
# Invoke
vm_factory.invoke_with_outputs(
"main", input_args={"x": input_tensor}, output_args=[output_tensor]
)
# Check the result.
np.testing.assert_allclose(output_tensor.numpy(), np_input + np_input)
def test_get_output_single():
target = tvm.target.Target("llvm")
# Build a IRModule.
x = relay.var("x", shape=(10,))
f = relay.Function([x], x + x)
mod = IRModule.from_expr(f)
# Compile to VMExecutable.
vm_exec = vm.compile(mod, target=target)
vm_factory = runtime.vm.VirtualMachine(vm_exec, tvm.cpu())
inp = np.ones(10, dtype="float32")
vm_factory.invoke_stateful("main", inp)
outputs = vm_factory.get_outputs()
assert len(outputs) == 1
np.testing.assert_allclose(outputs[0].numpy(), inp + inp)
@tvm.testing.parametrize_targets("llvm")
def test_get_output_multiple(target, dev):
# Build a IRModule.
x = relay.var("x", shape=(10,))
f = relay.Function([x], relay.Tuple([x + x, x]))
mod = IRModule.from_expr(f)
# Compile to VMExecutable.
vm_exec = vm.compile(mod, target=target)
vm_factory = runtime.vm.VirtualMachine(vm_exec, dev)
inp = np.ones(10, dtype="float32")
vm_factory.invoke_stateful("main", inp)
outputs = vm_factory.get_outputs()
assert len(outputs) == 2
np.testing.assert_allclose(outputs[0].numpy(), inp + inp)
np.testing.assert_allclose(outputs[1].numpy(), inp)
@tvm.testing.parametrize_targets("llvm")
def test_get_input_index(target, dev):
# Build a IRModule.
data_0, data_1 = ["d1", "d2"]
x, y = [relay.var(c, shape=(10,)) for c in [data_0, data_1]]
f = relay.Function([x, y], x + y)
mod = IRModule.from_expr(f)
# Compile to VMExecutable.
vm_exec = vm.compile(mod, target=target)
vm_factory = runtime.vm.VirtualMachine(vm_exec, dev)
assert vm_factory.get_input_index(data_1) == 1
assert vm_factory.get_input_index(data_0) == 0
assert vm_factory.get_input_index("invalid") == -1
def get_one_input_relay_mod(tensor_type, shape, data_name):
x = relay.var(data_name, shape=shape, dtype=tensor_type)
y = relay.exp(x)
f = relay.Function([x], y)
return IRModule.from_expr(f)
@tvm.testing.parametrize_targets("llvm")
def test_one_set_input(target, dev):
dtype = "float32"
in_shape = [1, 2, 3, 3]
in_data_name_0 = "d0"
mod = get_one_input_relay_mod(dtype, in_shape, in_data_name_0)
# Compile to VMExecutable.
vm_exec = vm.compile(mod, target=target)
exe = runtime.vm.VirtualMachine(vm_exec, dev)
data0_core = np.random.uniform(size=in_shape).astype(dtype)
data0 = tvm.nd.array(data0_core)
ref_res_core = np.exp(data0_core)
ref_res = tvm.nd.array(ref_res_core)
exe.set_input("main", data0)
output = exe.invoke("main")
assert output.dtype == ref_res.dtype
tvm.testing.assert_allclose(ref_res_core, output.numpy())
data_dict = {in_data_name_0: data0}
exe.set_input("main", **data_dict)
output = exe.invoke("main")
assert output.dtype == ref_res.dtype
tvm.testing.assert_allclose(ref_res_core, output.numpy())
def get_multiple_input_relay_mod(tensor_type, shape, data_name0, data_name1):
x, y = [relay.var(c, shape=shape, dtype=tensor_type) for c in [data_name0, data_name1]]
f = relay.Function([x, y], x + y)
return IRModule.from_expr(f)
@tvm.testing.parametrize_targets("llvm")
def test_multiple_set_input(target, dev):
dtype = "float32"
in_shape = [1, 2, 3, 3]
in_data_name_0 = "d0"
in_data_name_1 = "d1"
mod = get_multiple_input_relay_mod(dtype, in_shape, in_data_name_0, in_data_name_1)
# Compile to VMExecutable.
vm_exec = vm.compile(mod, target=target)
exe = runtime.vm.VirtualMachine(vm_exec, dev)
data0_core = np.random.uniform(size=in_shape).astype(dtype)
data0 = tvm.nd.array(data0_core)
data1_core = np.random.uniform(size=in_shape).astype(dtype)
data1 = tvm.nd.array(data1_core)
ref_res_core = data0_core + data1_core
ref_res = tvm.nd.array(ref_res_core)
exe.set_input("main", data0, data1)
output = exe.invoke("main")
assert output.dtype == ref_res.dtype
tvm.testing.assert_allclose(ref_res_core, output.numpy())
data_dict = {in_data_name_1: data1, in_data_name_0: data0}
exe.set_input("main", **data_dict)
output = exe.invoke("main")
assert output.dtype == ref_res.dtype
tvm.testing.assert_allclose(ref_res_core, output.numpy())
@tvm.testing.parametrize_targets("llvm")
def test_one_set_one_input(target, dev):
dtype = "float32"
in_shape = [1, 2, 3, 3]
in_data_name_0 = "d0"
mod = get_one_input_relay_mod(dtype, in_shape, in_data_name_0)
# Compile to VMExecutable.
vm_exec = vm.compile(mod, target=target)
exe = runtime.vm.VirtualMachine(vm_exec, dev)
data0_core = np.random.uniform(size=in_shape).astype(dtype)
data0 = tvm.nd.array(data0_core)
ref_res_core = np.exp(data0_core)
ref_res = tvm.nd.array(ref_res_core)
exe.set_one_input("main", 0, data0)
output = exe.invoke("main")
assert output.dtype == ref_res.dtype
tvm.testing.assert_allclose(ref_res_core, output.numpy())
exe.set_one_input("main", in_data_name_0, data0)
output = exe.invoke("main")
assert output.dtype == ref_res.dtype
tvm.testing.assert_allclose(ref_res_core, output.numpy())
data_dict = {in_data_name_0: data0}
exe.set_one_input("main", **data_dict)
output = exe.invoke("main")
assert output.dtype == ref_res.dtype
tvm.testing.assert_allclose(ref_res_core, output.numpy())
@tvm.testing.parametrize_targets("llvm")
def test_multiple_set_one_input(target, dev):
dtype = "float32"
in_shape = [1, 2, 3, 3]
in_data_name_0 = "d0"
in_data_name_1 = "d1"
mod = get_multiple_input_relay_mod(dtype, in_shape, in_data_name_0, in_data_name_1)
# Compile to VMExecutable.
vm_exec = vm.compile(mod, target=target)
exe = runtime.vm.VirtualMachine(vm_exec, dev)
data0_core = np.random.uniform(size=in_shape).astype(dtype)
data0 = tvm.nd.array(data0_core)
data1_core = np.random.uniform(size=in_shape).astype(dtype)
data1 = tvm.nd.array(data1_core)
ref_res_core = data0_core + data1_core
ref_res = tvm.nd.array(ref_res_core)
exe.set_one_input("main", 1, data1)
exe.set_one_input("main", 0, data0)
output = exe.invoke("main")
assert output.dtype == ref_res.dtype
tvm.testing.assert_allclose(ref_res_core, output.numpy())
exe.set_one_input("main", in_data_name_1, data1)
exe.set_one_input("main", in_data_name_0, data0)
output = exe.invoke("main")
assert output.dtype == ref_res.dtype
tvm.testing.assert_allclose(ref_res_core, output.numpy())
data_dict = {in_data_name_1: data1}
exe.set_one_input("main", **data_dict)
data_dict = {in_data_name_0: data0}
exe.set_one_input("main", **data_dict)
output = exe.invoke("main")
assert output.dtype == ref_res.dtype
tvm.testing.assert_allclose(ref_res_core, output.numpy())
@tvm.testing.parametrize_targets("llvm")
def test_benchmark(target, dev):
mod, params = mlp.get_workload(1)
lib = vm.compile(mod, target=target, params=params)
exe = runtime.vm.VirtualMachine(lib, tvm.cpu())
data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"))
result = exe.benchmark(tvm.cpu(), data, func_name="main", repeat=2, number=1)
assert result.mean == result.median
assert result.mean > 0
assert len(result.results) == 2
with patch.object(
tvm.runtime.module.Module,
"time_evaluator",
return_value=lambda x: tvm.runtime.module.BenchmarkResult([1, 2, 2, 5]),
) as method:
result = exe.benchmark(dev, data, func_name="main", repeat=2, number=1)
assert result.mean == 2.5
assert result.median == 2.0
assert result.max == 5
assert result.min == 1
assert result.std == 1.5
def test_benchmark_end_to_end(target, dev):
mod, params = mlp.get_workload(1)
lib = vm.compile(mod, target=target, params=params)
exe = runtime.vm.VirtualMachine(lib, dev)
data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"), device=dev)
result = exe.benchmark(dev, data, func_name="main", repeat=2, number=1, end_to_end=True)
assert result.mean > 0
@tvm.testing.requires_cuda
def test_benchmark_end_to_end_rpc():
server = rpc.Server("127.0.0.1")
remote = rpc.connect(server.host, server.port)
mod, params = mlp.get_workload(1)
lib = vm.compile(mod, target="cuda", params=params)
temp = utils.tempdir()
path = temp.relpath("vm_library.so")
lib.mod.export_library(path)
remote.upload(path)
rlib = remote.load_module("vm_library.so")
exe = runtime.vm.VirtualMachine(rlib, remote.device("cuda"))
data = tvm.nd.array(
np.random.rand(1, 1, 28, 28).astype("float32"), device=remote.device("cuda")
)
result = exe.benchmark(
remote.device("cuda"), data=data, func_name="main", repeat=2, number=1, end_to_end=True
)
assert result.mean > 0
def test_shape_func_nested_function():
@tvm.register_func("relay.ext.test2")
def relay_ext_test(func):
return None
data_shape = (relay.Any(), 16)
weight_shape = (relay.Any(), 16)
dense = relay.nn.dense(
relay.var("data", shape=data_shape), relay.var("weight", shape=weight_shape)
)
mod = tvm.IRModule.from_expr(dense)
patterns = [("test.dense", is_op("nn.dense")(wildcard(), wildcard()))]
passes = tvm.transform.Sequential(
[
relay.transform.MergeComposite(patterns),
relay.transform.AnnotateTarget(["test2"]),
relay.transform.PartitionGraph(),
]
)
mod = passes(mod)
compiler = VMCompiler()
compiler.lower(mod, "llvm")
@tvm.testing.requires_cuda
def test_storage_size_and_offset_on_cpu():
"""Tests allocations place sizes and offsets on the CPU host even if the rest
of the computation is on a different device type."""
# TODO(mbs): Better would be to test ManifestAlloc independently.
# And/or move this to C++ and test the VM executable in it's C++ instead of
# pretty-printed form.
# CPU = device type 1
# GPU = device type 2
def input():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32],
param_device_types=[2], result_device_type=2) {
add(%a, %a)
}
"""
)
exe = relay.vm.compile(
input(),
tvm.target.Target("cuda"),
)
# This program needs two constants:
# - The size of the tensor's storage (first arg) to alloc_storage
# - The offset of the tensor within the storage (second arg) to alloc_tensor
# Both should be on the CPU
assert "VirtualDevice[0]: device type 1" in exe.virtual_devices
assert "VM Const[0]: NDArray[(),int64,(1,0)]=[140] on device index 0" in exe.constants
assert "VM Const[1]: NDArray[(),int64,(1,0)]=[0] on device index 0" in exe.constants
@tvm.testing.requires_cuda
def test_reshape_shape_on_cpu():
"""Tests the argument to a reshape places the shape on the CPU host even if the rest
of the computation is on a different device type."""
# TODO(mbs): Better would be to test ManifestAlloc independently.
# And/or move this to C++ and test the VM executable in it's C++ instead of
# pretty-printed form.
# CPU = device type 1
# GPU = device type 2
def input():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(2, 8), float32],
param_device_types=[2], result_device_type=2) {
reshape(%x, newshape=[2, 4, 2])
}
"""
)
exe = relay.vm.compile(
input(),
tvm.target.Target("cuda"),
)
# The newshape annotation should have been turned into a constant on the CPU.
assert "VirtualDevice[0]: device type 1" in exe.virtual_devices
assert "VM Const[0]: NDArray[(3),int64,(1,0)]=[2,4,2] on device index 0" in exe.constants
@tvm.testing.requires_cuda
def test_multi_targets():
# Build an IRModule.
n = 10
x = relay.var("x", shape=(n,))
y = relay.var("y", shape=(n,))
z = relay.var("z", shape=(n,))
f = relay.Function([x, y, z], x + relay.op.annotation.on_device(y + z, tvm.cpu()))
mod = IRModule.from_expr(f)
# Compile to VMExecutable.
with tvm.transform.PassContext(
opt_level=3, config={"relay.fallback_device_type": tvm.cuda().device_type}
):
exe = relay.vm.compile(
mod, target={"cpu": tvm.target.Target("llvm"), "cuda": tvm.target.Target("cuda")}
)
# Run
vm = runtime.vm.VirtualMachine(exe, [tvm.cuda(), tvm.cpu()])
x_data = np.random.rand(
n,
).astype("float32")
y_data = np.random.rand(
n,
).astype("float32")
z_data = np.random.rand(
n,
).astype("float32")
actual_result = vm.invoke("main", x_data, y_data, z_data)
# Test
expected_result = x_data + y_data + z_data
tvm.testing.assert_allclose(actual_result.numpy(), expected_result)
def test_let_bound_constants():
"""This tests for an ICHECK failure for ill-formed IR with let-bound constants"""
x = relay.var("x", shape=(3,), dtype="int32")
y = relay.take(x, relay.const(0))
z = relay.const(1)
f = relay.Function([x], relay.stack((z, y), axis=0))
mod = IRModule.from_expr(f)
compiler = VMCompiler()
compiler.optimize(mod, target="llvm")
def test_large_constants():
"""Large constants can be serialized outside of executable"""
target = tvm.target.Target("llvm")
dev = tvm.cpu()
# fn(x) { add(x, <large constant>) }
x = relay.var("x", shape=(1000, 1000))
const_data = np.random.rand(1000, 1000).astype("float32")
const = relay.const(const_data, dtype="float32")
func = relay.Function([x], relay.op.add(x, const))
mod = tvm.IRModule.from_expr(func)
# Compile to executable.
vm_exec = vm.compile(mod, target=target)
# Save to constants and library files
temp = utils.tempdir()
path_consts = temp.relpath("consts")
vm_exec.move_late_bound_consts(path_consts, byte_limit=256)
path_dso = temp.relpath("lib.so")
vm_exec.mod.export_library(path_dso)
# Load library files and constants
mod = runtime.load_module(path_dso)
mod["load_late_bound_consts"](path_consts)
# Test main
x_data = np.random.rand(1000, 1000).astype("float32")
the_vm = runtime.vm.VirtualMachine(mod, dev)
actual = the_vm.invoke("main", x_data)
expected = x_data + const_data
tvm.testing.assert_allclose(expected, actual.numpy())
# We load the mod again so it's missing the consts.
mod = runtime.load_module(path_dso)
exe = runtime.vm.Executable(mod)
# Also test loading consts via the VM's wrapper API.
exe.load_late_bound_consts(path_consts)
# Test main again with consts now loaded via the above API.
x_data = np.random.rand(1000, 1000).astype("float32")
the_vm = runtime.vm.VirtualMachine(exe, dev)
actual = the_vm.invoke("main", x_data)
expected = x_data + const_data
tvm.testing.assert_allclose(expected, actual.numpy())
def test_load_late_bound_consts_with_no_late_bound_consts():
"""Check that load_late_bound_consts handles a model with no late bound consts."""
target = tvm.target.Target("llvm")
dev = tvm.cpu()
const_data = np.random.rand(1).astype("float64")
x = relay.var("x", shape=(1,), dtype="float64")
const = relay.const(const_data, dtype="float64")
func = relay.Function([x], relay.op.add(x, const))
mod = tvm.IRModule.from_expr(func)
vm_exec = vm.compile(mod, target=target)
temp = utils.tempdir()
path_consts = temp.relpath("consts")
path_dso = temp.relpath("lib.so")
# Ensure const_data is below the byte threshold for a late-bound const.
byte_limit = len(const_data.tobytes()) + 1
vm_exec.move_late_bound_consts(path_consts, byte_limit=byte_limit)
vm_exec.mod.export_library(path_dso)
mod = runtime.load_module(path_dso)
mod["load_late_bound_consts"](path_consts)
x_data = np.random.rand(1).astype("float64")
loaded_vm = runtime.vm.VirtualMachine(mod, dev)
actual = loaded_vm.invoke("main", x_data)
expected = x_data + const_data
tvm.testing.assert_allclose(expected, actual.numpy())
def test_vm_save_and_load_without_designating_late_bound_consts():
"""Check that a VM can be saved and loaded without late-bound consts in play.
Specifically, this test ensures that the machinery behind late-bound const
loading does not assume the need to load late-bound consts (and cause an error)
when the user did not choose to designate any consts as such.
"""
target = tvm.target.Target("llvm")
dev = tvm.cpu()
const_data = np.random.rand(1).astype("float64")
x = relay.var("x", shape=(1,), dtype="float64")
const = relay.const(const_data, dtype="float64")
func = relay.Function([x], relay.op.add(x, const))
mod = tvm.IRModule.from_expr(func)
vm_exec = vm.compile(mod, target=target)
code, lib = vm_exec.save()
exe = runtime.vm.Executable.load_exec(code, lib)
x_data = np.random.rand(1).astype("float64")
loaded_vm = runtime.vm.VirtualMachine(exe, dev)
actual = loaded_vm.invoke("main", x_data)
expected = x_data + const_data
tvm.testing.assert_allclose(expected, actual.numpy())
def test_load_and_save_constants_via_map():
"""Large constants can be serialized outside of executable"""
target = tvm.target.Target("llvm")
dev = tvm.cpu()
# fn(x) { add(x, <large constant>) }
x = relay.var("x", shape=(1000, 1000))
const_data = np.random.rand(1000, 1000).astype("float32")
const = relay.const(const_data, dtype="float32")
func = relay.Function([x], relay.op.add(x, const))
mod = tvm.IRModule.from_expr(func)
# Compile to executable.
vm_exec = vm.compile(mod, target=target)
consts_map = vm_exec.get_late_bound_consts(byte_limit=256)
# Save to constants and library files
temp = utils.tempdir()
path_dso = temp.relpath("lib.so")
vm_exec.mod.export_library(path_dso)
# Load library files and constants
mod = runtime.load_module(path_dso)
mod["load_late_bound_consts_from_map"](consts_map)
# Test main
x_data = np.random.rand(1000, 1000).astype("float32")
the_vm = runtime.vm.VirtualMachine(mod, dev)
actual = the_vm.invoke("main", x_data)
expected = x_data + const_data
tvm.testing.assert_allclose(expected, actual.numpy())
# We load the mod again so it's missing the consts.
mod = runtime.load_module(path_dso)
exe = runtime.vm.Executable(mod)
# Also test loading consts via the VM's wrapper API.
exe.load_late_bound_consts_from_map(consts_map)
# Test main again with consts now loaded via the above API.
x_data = np.random.rand(1000, 1000).astype("float32")
the_vm = runtime.vm.VirtualMachine(exe, dev)
actual = the_vm.invoke("main", x_data)
expected = x_data + const_data
tvm.testing.assert_allclose(expected, actual.numpy())
def test_load_late_bound_consts_via_map_with_no_late_bound_consts():
"""Check that load_late_bound_consts handles a model with no late bound consts."""
target = tvm.target.Target("llvm")
dev = tvm.cpu()
const_data = np.random.rand(1).astype("float64")
x = relay.var("x", shape=(1,), dtype="float64")
const = relay.const(const_data, dtype="float64")
func = relay.Function([x], relay.op.add(x, const))
mod = tvm.IRModule.from_expr(func)
vm_exec = vm.compile(mod, target=target)
temp = utils.tempdir()
path_dso = temp.relpath("lib.so")
# Ensure const_data is below the byte threshold for a late-bound const.
byte_limit = len(const_data.tobytes()) + 1
consts_map = vm_exec.get_late_bound_consts(byte_limit=byte_limit)
vm_exec.mod.export_library(path_dso)
mod = runtime.load_module(path_dso)
mod["load_late_bound_consts_from_map"](consts_map)
x_data = np.random.rand(1).astype("float64")
loaded_vm = runtime.vm.VirtualMachine(mod, dev)
actual = loaded_vm.invoke("main", x_data)
expected = x_data + const_data
tvm.testing.assert_allclose(expected, actual.numpy())
if __name__ == "__main__":
tvm.testing.main()
| 51,303 | 31.719388 | 99 | py |
tvm | tvm-main/tests/python/relay/test_pass_fake_quantization_to_integer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-wildcard-import
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.transform import fake_quantization_to_integer
def compare_fq_to_int(expr, args, allow_rounding_error=False):
mod = tvm.IRModule.from_expr(expr)
mod = tvm.relay.transform.InferType()(mod)
mod_int = tvm.relay.transform.FakeQuantizationToInteger()(mod)
assert not tvm.ir.structural_equal(mod, mod_int)
result = (
relay.create_executor("vm", mod=mod, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
result_int = (
relay.create_executor("vm", mod=mod_int, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
if allow_rounding_error:
assert np.all(np.abs(result.astype("int32") - result_int.astype("int32")) <= 1)
else:
assert np.array_equal(result, result_int)
def test_fake_quantize_conv():
for out_dtype in ["int8", "uint8"]:
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
op = relay.op.nn.conv2d(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
kernel_size=[5, 5],
)
op = relay.qnn.op.quantize(op, one, zero, out_dtype=out_dtype)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
compare_fq_to_int(op, [x_np, w_np])
def test_fake_quantize_conv_per_channel():
for out_dtype in ["int8", "uint8"]:
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
one = relay.const([1.0] * 16)
zero_point = relay.const([np.random.randint(0, 255)] * 16)
op = relay.op.nn.conv2d(
relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(0)),
relay.qnn.op.dequantize(
w, relay.const(np.random.random([16]).astype("float32")), zero_point, axis=0
),
kernel_size=[5, 5],
channels=16,
)
op = relay.qnn.op.quantize(op, relay.const(1.0), relay.const(0), out_dtype=out_dtype)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
compare_fq_to_int(op, [x_np, w_np], allow_rounding_error=True)
def test_fake_quantize_transposeconv():
for out_dtype in ["int8", "uint8"]:
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
w = relay.var("w", shape=[3, 16, 5, 5], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
op = relay.op.nn.conv2d_transpose(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="IOHW",
)
op = relay.qnn.op.quantize(op, one, zero, out_dtype=out_dtype)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
w_np = np.random.randint(-128, 127, size=[3, 16, 5, 5], dtype="int8")
compare_fq_to_int(op, [x_np, w_np])
def test_fake_quantize_dense():
for out_dtype in ["int8", "uint8"]:
x = relay.var("x", shape=[128, 64], dtype="int8")
w = relay.var("w", shape=[256, 64], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
op = relay.op.nn.dense(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, one, zero, out_dtype=out_dtype)
x_np = np.random.randint(-128, 127, size=[128, 64], dtype="int8")
w_np = np.random.randint(-128, 127, size=[256, 64], dtype="int8")
compare_fq_to_int(op, [x_np, w_np])
def test_fake_quantize_dense_per_channel():
for out_dtype in ["int8", "uint8"]:
x = relay.var("x", shape=[128, 64], dtype="int8")
w = relay.var("w", shape=[256, 64], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
op = relay.op.nn.dense(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(
w,
relay.const(np.random.random([256]).astype("float32")),
relay.const([0] * 256),
axis=0,
),
units=256,
)
op = relay.qnn.op.quantize(op, one, zero, out_dtype=out_dtype)
x_np = np.random.randint(-128, 127, size=[128, 64], dtype="int8")
w_np = np.random.randint(-128, 127, size=[256, 64], dtype="int8")
compare_fq_to_int(op, [x_np, w_np], allow_rounding_error=True)
def test_fake_quantize_dense_bias():
out_dtype = "int8"
x = relay.var("x", shape=[128, 64], dtype="int8")
w = relay.var("w", shape=[256, 64], dtype="int8")
bias = relay.var("bias", shape=[256], dtype="int32")
one = relay.const(1.0)
zero = relay.const(0)
w_scale = np.random.random([256]).astype("float32")
op = relay.op.nn.dense(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(
w,
relay.const(w_scale),
zero,
axis=0,
),
units=256,
)
op += relay.qnn.op.dequantize(
bias,
relay.const(2.0 * w_scale),
zero,
)
op = relay.qnn.op.quantize(op, one, zero, out_dtype=out_dtype)
x_np = np.random.randint(-128, 127, size=[128, 64], dtype="int8")
w_np = np.random.randint(-128, 127, size=[256, 64], dtype="int8")
bias_np = np.random.randint(-128, 127, size=[256], dtype="int32")
compare_fq_to_int(op, [x_np, w_np, bias_np], allow_rounding_error=True)
def test_fake_quantize_batch_matmul():
for out_dtype in ["int8", "uint8"]:
x = relay.var("x", shape=[1, 128, 64], dtype="int8")
w = relay.var("w", shape=[1, 256, 64], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
op = relay.op.nn.batch_matmul(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, one, zero, out_dtype=out_dtype)
x_np = np.random.randint(-128, 127, size=[1, 128, 64], dtype="int8")
w_np = np.random.randint(-128, 127, size=[1, 256, 64], dtype="int8")
compare_fq_to_int(op, [x_np, w_np])
def test_fake_transpose_quantize_conv():
x = relay.var("x", shape=[1, 224, 224, 3], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
x = relay.transpose(x, [0, 3, 1, 2])
op = relay.op.nn.conv2d(
x, relay.qnn.op.dequantize(w, relay.const(0.5), zero), kernel_size=[5, 5]
)
op = relay.qnn.op.quantize(op, one, zero)
x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], dtype="int8")
w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
compare_fq_to_int(op, [x_np, w_np])
@pytest.mark.parametrize("const_bias", [False, True])
def test_fake_transpose_quantize_conv_bias_add(const_bias):
x = relay.var("x", shape=[1, 224, 224, 3], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
if const_bias:
bias = relay.const(np.random.random(16).astype("float32"))
else:
bias = relay.qnn.op.dequantize(relay.var("bias", shape=[16], dtype="int32"), one, zero)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
x = relay.transpose(x, [0, 3, 1, 2])
op = relay.op.nn.conv2d(
x, relay.qnn.op.dequantize(w, relay.const(0.5), zero), kernel_size=[5, 5]
)
op = relay.op.nn.bias_add(op, bias)
op = relay.qnn.op.quantize(op, one, zero)
x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], dtype="int8")
w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
bias_np = np.random.randint(-32768, 32767, size=[16], dtype="int32")
args = [x_np, w_np]
if not const_bias:
args.append(bias_np)
compare_fq_to_int(op, args)
def test_fake_transpose_quantize_conv_bias_add_per_channel():
x = relay.var("x", shape=[1, 224, 224, 3], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
bias = relay.var("bias", shape=[16], dtype="int32")
one = relay.const(1.0)
zero = relay.const(0)
w_scale = (np.random.random([16]).astype("float32") - 0.5) / 10 + 0.5
noise = (np.random.random([16]).astype("float32") - 0.5) * 1e-15
w_zp = relay.const([0] * 16)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
x = relay.transpose(x, [0, 3, 1, 2])
op = relay.op.nn.conv2d(
x, relay.qnn.op.dequantize(w, relay.const(w_scale), w_zp, axis=0), kernel_size=[5, 5]
)
op = relay.op.nn.bias_add(
op, relay.qnn.op.dequantize(bias, relay.const(2.0 * w_scale + noise), w_zp, axis=0)
)
op = relay.qnn.op.quantize(op, one, zero)
x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], dtype="int8")
w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
bias_np = np.random.randint(-32768, 32767, size=[16], dtype="int32")
compare_fq_to_int(op, [x_np, w_np, bias_np], allow_rounding_error=True)
def test_fake_transpose_quantize_conv_bias_add_mismatch():
x = relay.var("x", shape=[1, 224, 224, 3], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
bias = relay.var("bias", shape=[16], dtype="int32")
one = relay.const(1.0)
two = relay.const(2.0)
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
x = relay.transpose(x, [0, 3, 1, 2])
op = relay.op.nn.conv2d(
x, relay.qnn.op.dequantize(w, relay.const(0.5), zero), kernel_size=[5, 5]
)
op = relay.op.nn.bias_add(op, relay.qnn.op.dequantize(bias, two, zero))
op = relay.qnn.op.quantize(op, one, zero)
x_np = np.random.randint(-128, 127, size=[1, 224, 224, 3], dtype="int8")
w_np = np.random.randint(-128, 127, size=[16, 3, 5, 5], dtype="int8")
bias_np = np.random.randint(-32768, 32767, size=[16], dtype="int32")
compare_fq_to_int(op, [x_np, w_np, bias_np])
def test_fake_quantize_maxpool():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.nn.max_pool2d(x, [3, 3])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
@pytest.mark.parametrize("output_size", [None, 1])
def test_fake_quantize_adaptive_avgpool1d(output_size):
x = relay.var("x", shape=[1, 128, 768], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(-12))
op = relay.op.nn.adaptive_avg_pool1d(x, output_size)
op = relay.qnn.op.quantize(op, relay.const(0.5), relay.const(10))
x_np = np.random.randint(-128, 127, size=[1, 128, 768], dtype="int8")
compare_fq_to_int(op, [x_np], True)
def test_fake_quantize_avgpool():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(-12))
op = relay.op.nn.avg_pool2d(x, [3, 3])
op = relay.qnn.op.quantize(op, relay.const(0.5), relay.const(10))
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np], True)
def test_fake_quantize_global_avg_pool():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(-12))
op = relay.op.nn.global_avg_pool2d(x)
op = relay.qnn.op.quantize(op, relay.const(0.5), relay.const(10))
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np], True)
class TestUnaryQNNOp:
def helper_test_fake_quantize_unary_op(self, fp32_op, pos_values=False):
for dtype in ["int8", "uint8"]:
x = relay.var("x", shape=[1, 3, 3, 3], dtype=dtype)
zero = -128 if dtype == "int8" else 0
if pos_values:
# Use a positive range for quanitzed ops that only work on positive values
input_mid_point = relay.const(zero)
output_mid_point = relay.const(zero)
else:
input_mid_point = relay.const(np.random.randint(0, 255) + zero)
output_mid_point = relay.const(np.random.randint(0, 255) + zero)
input_scale = relay.const(np.random.rand())
output_scale = relay.const(np.random.rand())
x = relay.qnn.op.dequantize(x, input_scale, input_mid_point)
op = fp32_op(x)
op = relay.qnn.op.quantize(op, output_scale, output_mid_point, out_dtype=dtype)
x_np = np.random.randint(0 + zero, 255 + zero, size=[1, 3, 3, 3], dtype=dtype)
compare_fq_to_int(op, [x_np], True)
def test_sqrt(self):
self.helper_test_fake_quantize_unary_op(fp32_op=relay.sqrt, pos_values=True)
def test_rsqrt(self):
self.helper_test_fake_quantize_unary_op(fp32_op=relay.rsqrt, pos_values=True)
def test_exp(self):
self.helper_test_fake_quantize_unary_op(fp32_op=relay.exp)
def test_erf(self):
self.helper_test_fake_quantize_unary_op(fp32_op=relay.erf)
def test_sigmoid(self):
self.helper_test_fake_quantize_unary_op(fp32_op=relay.sigmoid)
def test_tanh(self):
self.helper_test_fake_quantize_unary_op(fp32_op=relay.tanh)
def test_log(self):
self.helper_test_fake_quantize_unary_op(fp32_op=relay.log, pos_values=True)
def test_fake_quantize_reshape():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.reshape(x, [1, 3, -1])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_image_resize_bilinear():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.image.resize2d(x, size=[4, 4], method="linear")
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np], allow_rounding_error=True)
def test_fake_quantize_abs():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.abs(x)
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_expand_dims():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.expand_dims(x, axis=1)
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_squeeze():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.squeeze(x, axis=[0])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_strided_slice():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.strided_slice(x, begin=[0, 0, 0, 0], end=[1, 1, 112, 112])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_split():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.split(x, axis=3, indices_or_sections=2)
op = relay.qnn.op.quantize(op[0], relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
op = relay.op.split(x, axis=3, indices_or_sections=[56, 112, 168])
op = relay.qnn.op.quantize(op[1], relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_batch_flatten():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.nn.batch_flatten(x)
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_transpose_reshape():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.transpose(x, [1, 0, 2, 3])
op = relay.op.reshape(op, [3, -1])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_concat():
zero = relay.const(0)
inputs = []
for i in range(4):
inputs.append(
relay.qnn.op.dequantize(
relay.var("x%d" % i, shape=[1, 4], dtype="int8"), relay.const(i + 0.5), zero
)
)
concat = relay.op.concatenate(inputs, axis=1)
out = relay.qnn.op.quantize(concat, relay.const(3.5), zero)
inputs_np = []
for i in range(4):
inputs_np.append(np.random.randint(-128, 127, size=[1, 4], dtype="int8"))
compare_fq_to_int(out, inputs_np)
@pytest.mark.parametrize("k", [0, 1, 5])
@pytest.mark.parametrize("axis", [0, -1, 1])
@pytest.mark.parametrize("is_ascend", [True, False])
@pytest.mark.parametrize("dtype", ["int8", "uint8"])
def test_fake_quantize_topk(k, axis, is_ascend, dtype):
x = relay.var("x", shape=[20, 100], dtype=dtype)
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.topk(x, k, axis, "values", is_ascend, "float32")
op = relay.qnn.op.quantize(op, relay.const(2.0), zero, out_dtype=dtype)
x_np = np.random.randint(0, 127, size=[20, 100], dtype=dtype)
compare_fq_to_int(op, [x_np])
def test_fake_quantize_clip():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="uint8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(114))
op = relay.op.clip(x, 0, 6)
op = relay.qnn.op.quantize(op, relay.const(2.0), relay.const(114), out_dtype="uint8")
x_np = np.random.randint(0, 255, size=[1, 3, 224, 224], dtype="uint8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_clip_per_channel():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="uint8")
x = relay.qnn.op.dequantize(
x, relay.const([1.0, 2.0, 3.0]), relay.const([96, 114, 128]), axis=1
)
op = relay.op.clip(x, 0, 6)
op = relay.qnn.op.quantize(
op, relay.const([1.0, 2.0, 3.0]), relay.const([96, 114, 128]), out_dtype="uint8", axis=1
)
x_np = np.random.randint(0, 255, size=[1, 3, 224, 224], dtype="uint8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_relu():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="uint8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(114))
op = relay.op.nn.relu(x)
op = relay.qnn.op.quantize(op, relay.const(2.0), relay.const(114), out_dtype="uint8")
x_np = np.random.randint(0, 255, size=[1, 3, 224, 224], dtype="uint8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_mean():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="uint8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(114))
op = relay.op.mean(x)
op = relay.qnn.op.quantize(op, relay.const(2.0), relay.const(114), out_dtype="uint8")
x_np = np.random.randint(0, 255, size=[1, 3, 224, 224], dtype="uint8")
compare_fq_to_int(op, [x_np], allow_rounding_error=True)
def test_fake_quantize_relu_per_channel():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="uint8")
x = relay.qnn.op.dequantize(
x, relay.const([1.0, 2.0, 3.0]), relay.const([96, 114, 128]), axis=1
)
op = relay.op.nn.relu(x)
op = relay.qnn.op.quantize(
op, relay.const([1.0, 2.0, 3.0]), relay.const([96, 114, 128]), out_dtype="uint8", axis=1
)
x_np = np.random.randint(0, 255, size=[1, 3, 224, 224], dtype="uint8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_leaky_relu():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="uint8")
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(114))
op = relay.op.nn.leaky_relu(x, 0.1)
op = relay.qnn.op.quantize(op, relay.const(2.0), relay.const(114), out_dtype="uint8")
x_np = np.random.randint(0, 255, size=[1, 3, 224, 224], dtype="uint8")
compare_fq_to_int(op, [x_np], True)
@pytest.mark.parametrize(
"operator",
[relay.op.add, relay.op.multiply, relay.op.subtract, relay.op.minimum, relay.op.maximum],
)
def test_fake_quantize_binary(operator):
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(0.1), relay.const(0))
y = relay.var("y", shape=[1, 3, 224, 224], dtype="int8")
y = relay.qnn.op.dequantize(y, relay.const(0.2), relay.const(0))
op = operator(x, y)
if operator == relay.op.multiply:
out_scale = relay.const(20.0)
else:
out_scale = relay.const(0.1)
op = relay.qnn.op.quantize(op, out_scale, relay.const(0), out_dtype="int8")
x_np = np.random.randint(-25, 25, size=[1, 3, 224, 224], dtype="int8")
y_np = np.random.randint(-25, 25, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np, y_np])
@pytest.mark.parametrize(
"operator",
[relay.op.add, relay.op.multiply, relay.op.subtract, relay.op.minimum, relay.op.maximum],
)
def test_fake_quantize_binary_per_channel(operator):
def verify_binary_per_channel(lhs_scale, rhs_scale, lhs_zp, rhs_zp, out_zp, lhs_axis, rhs_axis):
if operator == relay.op.multiply:
out_scale = relay.const(2.0)
rhs_axis = lhs_axis # TODO: Support different axes for per-channel quantized multiply
else:
out_scale = relay.const(0.1)
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(lhs_scale), relay.const(lhs_zp), axis=lhs_axis)
y = relay.var("y", shape=[1, 3, 224, 224], dtype="int8")
y = relay.qnn.op.dequantize(y, relay.const(rhs_scale), relay.const(rhs_zp), axis=rhs_axis)
op = operator(x, y)
op = relay.qnn.op.quantize(op, out_scale, relay.const(out_zp), out_dtype="int8")
x_np = np.random.randint(-25, 25, size=[1, 3, 224, 224], dtype="int8")
y_np = np.random.randint(-25, 25, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np, y_np], allow_rounding_error=True)
# Same axis
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 3),
rhs_scale=np.random.uniform(1.0, 5.0, 3),
lhs_zp=0,
rhs_zp=0,
out_zp=0,
lhs_axis=1,
rhs_axis=1,
)
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 3),
rhs_scale=np.random.uniform(1.0, 5.0, 3),
lhs_zp=np.random.randint(1, 3),
rhs_zp=np.random.randint(1, 3),
out_zp=0,
lhs_axis=1,
rhs_axis=1,
)
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 3),
rhs_scale=np.random.uniform(1.0, 5.0, 3),
lhs_zp=np.random.randint(1, 3),
rhs_zp=np.random.randint(1, 3),
out_zp=np.random.randint(1, 3),
lhs_axis=1,
rhs_axis=1,
)
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 224),
rhs_scale=np.random.uniform(1.0, 5.0, 224),
lhs_zp=np.random.randint(1, 3),
rhs_zp=np.random.randint(1, 3),
out_zp=np.random.randint(1, 3),
lhs_axis=-1,
rhs_axis=-1,
)
# Different axes
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 224),
rhs_scale=np.random.uniform(1.0, 5.0, 224),
lhs_zp=0,
rhs_zp=0,
out_zp=0,
lhs_axis=2,
rhs_axis=3,
)
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 224),
rhs_scale=np.random.uniform(1.0, 5.0, 224),
lhs_zp=np.random.randint(1, 3),
rhs_zp=np.random.randint(1, 3),
out_zp=0,
lhs_axis=2,
rhs_axis=3,
)
verify_binary_per_channel(
lhs_scale=np.random.uniform(1.0, 5.0, 224),
rhs_scale=np.random.uniform(1.0, 5.0, 224),
lhs_zp=np.random.randint(1, 3),
rhs_zp=np.random.randint(1, 3),
out_zp=np.random.randint(1, 3),
lhs_axis=2,
rhs_axis=3,
)
@pytest.mark.parametrize(
"operator",
[
relay.op.add,
relay.op.multiply,
relay.op.subtract,
relay.op.minimum,
relay.op.maximum,
],
)
def test_fake_quantize_binary_const(operator):
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(0.1), relay.const(10))
y = relay.const(1.0)
op = operator(x, y)
op = relay.qnn.op.quantize(op, relay.const(0.1), relay.const(10), out_dtype="int8")
x_np = np.random.randint(-25, 25, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_subtract_different_output_zp():
for dtype in ["uint8"]:
x = relay.var("x", shape=[1, 128, 128, 3], dtype=dtype)
x = relay.qnn.op.dequantize(x, relay.const(0.1), relay.const(0), axis=1)
y = relay.const(0.5)
op = relay.subtract(x, y)
op = relay.transpose(op, axes=[0, 3, 1, 2])
op = relay.qnn.op.quantize(op, relay.const(0.2), relay.const(128), out_dtype=dtype, axis=1)
x_np = np.random.randint(0, 255, size=[1, 128, 128, 3], dtype=dtype)
compare_fq_to_int(op, [x_np], True)
def test_fake_quantize_pad():
x = relay.var("x", shape=[1, 383, 128], dtype="int8")
x = relay.qnn.op.dequantize(x, relay.const(1.0), relay.const(10))
op = relay.op.nn.pad(x, [[0, 0], [0, 1], [0, 0]], 0.0)
op = relay.qnn.op.quantize(op, relay.const(1.0), relay.const(10), out_dtype="int8")
x_np = np.random.randint(-25, 25, size=[1, 383, 128], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_depth_to_space():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.nn.depth_to_space(x, 4)
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 224, 224], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_max_min():
def run_test_case(partial_func):
x = relay.var("x", shape=[1, 3, 10, 10], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
# To be a little more realistic since max/min will rarely be by themselves
x = relay.op.nn.depth_to_space(x, 4)
op = partial_func(x)
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
x_np = np.random.randint(-128, 127, size=[1, 3, 10, 10], dtype="int8")
compare_fq_to_int(op, [x_np])
run_test_case(relay.op.max)
run_test_case(relay.op.min)
# Test forwarding kwargs works
run_test_case(lambda x: relay.op.max(x, axis=1))
run_test_case(lambda x: relay.op.min(x, axis=1))
def test_fq_avg_pool_conv2d():
dtype = "uint8"
shape_x = [1, 4, 24, 24]
shape_w = [8, 4, 1, 1]
x = relay.var("x", shape=shape_x, dtype=dtype)
w = relay.var("w", shape=shape_w, dtype=dtype)
zero = relay.const(0)
one = relay.const(1.0)
# Tested expression.
op0 = relay.qnn.op.dequantize(x, relay.const(0.64), relay.const(2))
op1 = relay.op.nn.avg_pool2d(op0, [3, 3])
op2 = relay.qnn.op.dequantize(w, relay.const(0.5), relay.const(10))
op3 = relay.op.nn.conv2d(op1, op2, kernel_size=[1, 1])
expr = relay.qnn.op.quantize(op3, one, zero, out_dtype="uint8")
x_np = np.random.randint(0, 255, size=shape_x, dtype=dtype)
w_np = np.random.randint(0, 255, size=shape_w, dtype=dtype)
compare_fq_to_int(expr, [x_np, w_np])
def test_fq_hard_fail():
@tvm.ir.register_op_attr("nn.conv2d", "FTVMFakeQuantizationToInteger", level=11)
def conv2d(expr, type_map): # pylint: disable=unused-variable
raise NotImplementedError
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
one = relay.const(1.0)
zero = relay.const(0)
op = relay.op.nn.conv2d(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
kernel_size=[5, 5],
)
op = relay.qnn.op.quantize(op, one, zero, out_dtype="int8")
mod = tvm.IRModule.from_expr(op)
mod = tvm.relay.transform.InferType()(mod)
mod_int = tvm.relay.transform.FakeQuantizationToInteger(hard_fail=False)(mod)
assert tvm.ir.structural_equal(mod_int, mod)
# Catch a generic exception because the tvm FFI eats the python exception type
with pytest.raises(Exception):
mod_int = tvm.relay.transform.FakeQuantizationToInteger(hard_fail=True)(mod)
def compare_expected_fq_qat_to_int(expr, expected_expr, args, allow_rounding_error=False):
mod = tvm.IRModule.from_expr(expr)
mod_def = tvm.relay.transform.InferType()(mod)
mod_int = tvm.relay.transform.FakeQuantizationToInteger(False, True)(mod_def)
mod_exp = tvm.relay.transform.InferType()(tvm.IRModule.from_expr(expected_expr))
assert not tvm.ir.structural_equal(mod, mod_int)
assert tvm.ir.structural_equal(mod_int, mod_exp)
result_def = (
relay.create_executor("vm", mod=mod_def, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
result_int = (
relay.create_executor("vm", mod=mod_int, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
result_exp = (
relay.create_executor("vm", mod=mod_exp, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
if allow_rounding_error:
assert np.all(np.abs(result_def.astype("int32") - result_int.astype("int32")) <= 1)
else:
assert np.array_equal(result_def, result_int)
assert np.array_equal(result_int, result_exp)
def test_fq_qat_op_positive_part():
# Only the first operation is converted, since the next operation("add") is not enabled.
shape_x = [1, 4, 2]
shape_w = [1, 4, 2]
a = relay.var("a", shape=shape_x, dtype="int8")
b = relay.var("b", shape=shape_w, dtype="int8")
op0 = relay.qnn.op.dequantize(a, relay.const(2.0), relay.const(0))
op1 = relay.qnn.op.dequantize(b, relay.const(6.0), relay.const(0))
op2 = relay.op.nn.batch_matmul(op0, op1)
op3 = relay.op.add(op2, relay.const(1.0))
expr = relay.op.erf(op3)
op0 = relay.qnn.op.qnn.batch_matmul(
a, b, relay.const(0), relay.const(0), relay.const(2.0), relay.const(6.0)
)
op1 = relay.qnn.op.qnn.dequantize(op0, relay.const(12.0), relay.const(0))
op2 = relay.op.add(op1, relay.const(1.0))
expected_expr = relay.op.erf(op2)
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np, w_np])
def test_fq_qat_negative_all():
# None of the operations are converted, since the first operation("add") is not enabled.
shape_x = [1, 4, 2]
shape_w = [1, 4, 2]
a = relay.var("a", shape=shape_x, dtype="int8")
b = relay.var("b", shape=shape_w, dtype="int8")
op0 = relay.qnn.op.dequantize(a, relay.const(2.0), relay.const(0))
op1 = relay.qnn.op.dequantize(b, relay.const(6.0), relay.const(0))
op2 = relay.op.add(op1, relay.const(1.0))
op3 = relay.op.nn.batch_matmul(op0, op2)
expr = relay.op.erf(op3)
expected_expr = expr
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np, w_np])
def test_fq_qat_positive_single():
# The single operation is converted.
shape_x = [1, 4, 2]
shape_w = [1, 4, 2]
a = relay.var("a", shape=shape_x, dtype="int8")
b = relay.var("b", shape=shape_w, dtype="int8")
op0 = relay.qnn.op.dequantize(a, relay.const(2.0), relay.const(0))
op1 = relay.qnn.op.dequantize(b, relay.const(6.0), relay.const(0))
expr = relay.op.nn.batch_matmul(op0, op1)
op0 = relay.qnn.op.qnn.batch_matmul(
a, b, relay.const(0), relay.const(0), relay.const(2.0), relay.const(6.0)
)
expected_expr = relay.qnn.op.qnn.dequantize(op0, relay.const(12.0), relay.const(0))
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np, w_np])
def test_fq_qat_positive_nothing_to_do():
# All operations are converted by the non-QAT pass.
shape_x = [1, 4, 2]
shape_w = [1, 4, 2]
a = relay.var("a", shape=shape_x, dtype="int8")
b = relay.var("b", shape=shape_w, dtype="int8")
op0 = relay.qnn.op.dequantize(a, relay.const(2.0), relay.const(0))
op1 = relay.qnn.op.dequantize(b, relay.const(6.0), relay.const(0))
op2 = relay.op.nn.batch_matmul(op0, op1)
op3 = relay.op.add(op2, relay.const(1.0))
expr = relay.qnn.op.quantize(op3, relay.const(1.0), relay.const(0), out_dtype="int8")
op0 = relay.qnn.op.batch_matmul(
a, b, relay.const(0), relay.const(0), relay.const(2.0), relay.const(6.0)
)
op1 = relay.qnn.op.quantize(
relay.const(1.0), relay.const(12.0), relay.const(0), out_dtype="int32"
)
op2 = relay.op.add(
op0,
op1,
)
expected_expr = relay.qnn.op.requantize(
op2, relay.const(12.0), relay.const(0), relay.const(1.0), relay.const(0), out_dtype="int8"
)
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np, w_np])
def test_fq_qat_positive_couple():
# Several consecutive operations are converted.
shape_x = [1, 2, 4]
shape_w = [2]
a = relay.var("a", shape=shape_x, dtype="int8")
b = relay.var("b", shape=shape_w, dtype="int8")
op0 = relay.qnn.op.dequantize(a, relay.const(2.0), relay.const(0))
op1 = relay.qnn.op.dequantize(b, relay.const(6.0), relay.const(0))
op2 = relay.op.reshape(op0, (1, 4, 2))
op3 = relay.op.broadcast_to(op1, (2, 2, 2))
op4 = relay.op.nn.batch_matmul(op2, op3)
expr = relay.op.erf(op4)
op0 = relay.op.reshape(a, (1, 4, 2))
op1 = relay.op.broadcast_to(b, (2, 2, 2))
op3 = relay.qnn.op.qnn.batch_matmul(
op0, op1, relay.const(0), relay.const(0), relay.const(2.0), relay.const(6.0)
)
op4 = relay.qnn.op.qnn.dequantize(op3, relay.const(12.0), relay.const(0))
expected_expr = relay.op.erf(op4)
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np, w_np])
def test_fq_positive_single_arg_part():
# The single-argument operation is converted.
shape_x = [1, 2, 4]
a = relay.var("a", shape=shape_x, dtype="int8")
op0 = relay.qnn.op.dequantize(a, relay.const(2.0), relay.const(0))
op1 = relay.op.reshape(op0, (1, 4, 2))
expr = relay.op.erf(op1)
op0 = relay.op.reshape(a, (1, 4, 2))
op1 = relay.qnn.op.dequantize(op0, relay.const(2.0), relay.const(0))
expected_expr = relay.op.erf(op1)
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np])
def test_fq_qat_intermediate_infertype():
# Complex conversion of non-QAT and QAT passes that form FakeQuantizationToInteger.
shape_x = [1, 2, 4]
x = relay.var("x", shape=shape_x, dtype="float32")
const_0 = relay.const(np.random.uniform(size=[1, 4, 2]).astype("float32"))
op0 = relay.qnn.op.quantize(x, relay.const(17.0), relay.const(0), out_dtype="int8")
op1 = relay.qnn.op.dequantize(op0, relay.const(17.0), relay.const(0))
op2 = relay.op.reshape(op1, (1, 4, 2))
op3 = relay.qnn.op.quantize(op2, relay.const(10.0), relay.const(0), out_dtype="int8")
op4 = relay.qnn.op.quantize(const_0, relay.const(1.0), relay.const(8), out_dtype="int8")
op5 = relay.qnn.op.dequantize(op3, relay.const(10.0), relay.const(0))
op6 = relay.qnn.op.dequantize(op4, relay.const(4.0), relay.const(9))
op7 = relay.op.nn.batch_matmul(op5, op6)
expr = relay.op.add(op7, relay.const(5.0))
op0 = relay.qnn.op.quantize(x, relay.const(17.0), relay.const(0), out_dtype="int8")
op1 = relay.op.reshape(op0, (1, 4, 2))
op2 = relay.qnn.op.requantize(
op1, relay.const(17.0), relay.const(0), relay.const(10.0), relay.const(0), out_dtype="int8"
)
op3 = relay.qnn.op.quantize(const_0, relay.const(1.0), relay.const(8), out_dtype="int8")
op4 = relay.qnn.op.batch_matmul(
op2, op3, relay.const(0), relay.const(9), relay.const(10.0), relay.const(4.0)
)
op5 = relay.qnn.op.dequantize(op4, relay.const(40.0), relay.const(0))
expected_expr = relay.op.add(op5, relay.const(5.0))
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int32").astype("float32")
compare_expected_fq_qat_to_int(expr, expected_expr, [x_np])
def test_fake_quantize_take():
x = relay.var("x", shape=[33, 11], dtype="int8")
indices_np = np.random.randint(0, 33, size=[37], dtype="int32")
indices = relay.const(indices_np)
x = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(114))
op = relay.op.take(x, indices, axis=0)
op = relay.qnn.op.quantize(op, relay.const(2.0), relay.const(114), out_dtype="uint8")
x_np = np.random.randint(-25, 25, size=[33, 11], dtype="int8")
compare_fq_to_int(op, [x_np])
def test_fake_quantize_softmax():
shape = [5, 10]
x_ = relay.var("x", shape=shape, dtype="int8")
is_sorted = lambda a: np.all(a[:-1] <= a[1:])
for scale in [1.0, 0.1, 0.01]:
x = relay.qnn.op.dequantize(x_, relay.const(scale), relay.const(0))
op = relay.op.nn.softmax(x, axis=1)
op = relay.qnn.op.quantize(
op, relay.const(1.0 / 256.0), relay.const(-128), out_dtype="int8"
)
x_np = np.random.randint(-128, 127, size=shape, dtype="int8")
x_np = np.sort(x_np)
args = [x_np]
mod = tvm.IRModule.from_expr(op)
mod = tvm.relay.transform.InferType()(mod)
mod_int = tvm.relay.transform.FakeQuantizationToInteger(
hard_fail=True, optional_qnn_ops=["nn.softmax"]
)(mod)
assert not tvm.ir.structural_equal(mod, mod_int)
result = (
relay.create_executor("vm", mod=mod, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
result_int = (
relay.create_executor("vm", mod=mod_int, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
# Check at least the softmax output is in ascending order,
# since it is difficult to use allclose due to not-so-good accuracy.
for qdq, qop in zip(result, result_int):
assert is_sorted(qdq)
assert is_sorted(qop)
try:
np.testing.assert_allclose(result_int, result, atol=1)
except AssertionError as e:
# To see the difference
print(e)
if __name__ == "__main__":
tvm.testing.main()
| 42,014 | 35.002571 | 100 | py |
tvm | tvm-main/tests/python/relay/test_pass_combine_parallel_batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-locals,too-many-arguments,missing-module-docstring
import tvm
from tvm import relay
from tvm.relay import transform
def run_opt_pass(expr, opt_pass):
"runs the opt_pass on the expr of a function the function"
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = tvm.relay.transform.InferType()(mod)
mod = opt_pass(mod)
return mod["main"]
def test_combine_parallel_batch_matmul():
"""Simple testcase."""
def before(x, w1, w2, w3):
args = [x, w1, w2, w3]
y1 = relay.nn.batch_matmul(x, w1)
y2 = relay.nn.batch_matmul(x, w2)
y3 = relay.nn.batch_matmul(x, w3)
y = relay.Tuple((y1, y2, y3))
return relay.Function(args, y)
def expected(x, w1, w2, w3):
# use a fixed order of args so alpha equal check can pass
s1 = w1.type_annotation.shape[1]
s2 = w2.type_annotation.shape[1]
s3 = w3.type_annotation.shape[1]
args = [x, w1, w2, w3]
w = relay.concatenate((w1, w2, w3), axis=1)
y = relay.nn.batch_matmul(x, w)
y1 = relay.strided_slice(
y, begin=[0, 0, 0], end=[-1, -1, s1], strides=[1, 1, 1], slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=[0, 0, s1], end=[-1, -1, s2], strides=[1, 1, 1], slice_mode="size"
)
y3 = relay.strided_slice(
y, begin=[0, 0, s1 + s2], end=[-1, -1, s3], strides=[1, 1, 1], slice_mode="size"
)
y = relay.Tuple((y1, y2, y3))
return relay.Function(args, y)
def check(b, i, j, k):
x = relay.var("x", shape=(b, i, k))
w1 = relay.var("w1", shape=(b, j, k))
w2 = relay.var("w2", shape=(b, j, k))
w3 = relay.var("w3", shape=(b, j, k))
y_before = before(x, w1, w2, w3)
y = run_opt_pass(y_before, transform.CombineParallelBatchMatmul(min_num_branches=2))
y_expected = expected(x, w1, w2, w3)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(2, 3, 5, 4)
check(1, 100, 200, 300)
def test_combine_parallel_batch_matmul_biasadd():
"""Simple testcase with bias"""
def before(x, w1, w2, w3, b1, b2, b3):
args = [x, w1, w2, w3, b1, b2, b3]
y1 = relay.nn.batch_matmul(x, w1)
y2 = relay.nn.batch_matmul(x, w2)
y3 = relay.nn.batch_matmul(x, w3)
y1 = relay.add(y1, b1)
y2 = relay.add(y2, b2)
y3 = relay.add(y3, b3)
y = relay.Tuple((y1, y2, y3))
return relay.Function(args, y)
def expected(x, w1, w2, w3, b1, b2, b3):
# use a fixed order of args so alpha equal check can pass
s1 = w1.type_annotation.shape[1]
s2 = w2.type_annotation.shape[1]
s3 = w3.type_annotation.shape[1]
args = [x, w1, w2, w3, b1, b2, b3]
w = relay.concatenate((w1, w2, w3), axis=1)
b = relay.concatenate((b1, b2, b3), axis=-1)
y = relay.nn.batch_matmul(x, w)
y = relay.add(y, b)
y1 = relay.strided_slice(
y, begin=[0, 0, 0], end=[-1, -1, s1], strides=[1, 1, 1], slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=[0, 0, s1], end=[-1, -1, s2], strides=[1, 1, 1], slice_mode="size"
)
y3 = relay.strided_slice(
y, begin=[0, 0, s1 + s2], end=[-1, -1, s3], strides=[1, 1, 1], slice_mode="size"
)
y = relay.Tuple((y1, y2, y3))
return relay.Function(args, y)
def check(b, i, j, k):
x = relay.var("x", shape=(b, i, k))
w1 = relay.var("w1", shape=(b, j, k))
w2 = relay.var("w2", shape=(b, j, k))
w3 = relay.var("w3", shape=(b, j, k))
b1 = relay.var("b1", shape=(j,))
b2 = relay.var("b2", shape=(j,))
b3 = relay.var("b3", shape=(j,))
y_before = before(x, w1, w2, w3, b1, b2, b3)
y = run_opt_pass(y_before, transform.CombineParallelBatchMatmul(min_num_branches=2))
y_expected = expected(x, w1, w2, w3, b1, b2, b3)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(2, 3, 5, 4)
check(1, 100, 200, 300)
if __name__ == "__main__":
test_combine_parallel_batch_matmul()
test_combine_parallel_batch_matmul_biasadd()
| 5,219 | 36.826087 | 92 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_dequantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import relay
from tvm.contrib import graph_executor
from tvm.relay.testing import run_infer_type
def dequantize_test_driver(
in_dtype, quant_args, in_data, verify_output_data, axis, out_dtype="float32"
):
shape = in_data.shape
input_data = relay.var("input_data", shape=shape, dtype=in_dtype)
input_zero_point = relay.const(quant_args["in_zero_point"], "int32")
input_scale = relay.const(quant_args["in_scale"], "float32")
quantized_output = relay.qnn.op.dequantize(
input_data,
input_scale=input_scale,
input_zero_point=input_zero_point,
axis=axis,
out_dtype=out_dtype,
)
mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output)
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm", params=None)
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
rt_mod.set_input(input_data=in_data)
rt_mod.set_input(**params)
rt_mod.run()
res = rt_mod.get_output(0).numpy()
np.testing.assert_equal(res, verify_output_data)
assert res.dtype == out_dtype
def test_uint8_to_float32():
data = np.array([0, 1, 2, 3, 4, 251, 252, 253, 254, 255]).astype("uint8").reshape((2, 5))
output = (
np.array([-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64])
.astype("float32")
.reshape((2, 5))
)
quant_args = {"in_zero_point": 127, "in_scale": 0.5}
dequantize_test_driver(
in_dtype="uint8", quant_args=quant_args, in_data=data, verify_output_data=output, axis=-1
)
def test_int8_to_float32():
data = (
np.array([-128, -127, -126, -125, -124, 123, 124, 125, 126, 127])
.astype("int8")
.reshape((2, 5))
)
output = (
np.array([-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64])
.astype("float32")
.reshape((2, 5))
)
quant_args = {"in_zero_point": -1, "in_scale": 0.5}
dequantize_test_driver(
in_dtype="int8", quant_args=quant_args, in_data=data, verify_output_data=output, axis=-1
)
def test_int8_to_float16():
data = (
np.array([-128, -127, -126, -125, -124, 123, 124, 125, 126, 127])
.astype("int8")
.reshape((2, 5))
)
output = (
np.array([-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64])
.astype("float16")
.reshape((2, 5))
)
quant_args = {"in_zero_point": -1, "in_scale": 0.5}
dequantize_test_driver(
in_dtype="int8",
quant_args=quant_args,
in_data=data,
verify_output_data=output,
axis=-1,
out_dtype="float16",
)
def test_scalar_int8_to_float32():
data = np.array(-128).astype("int8")
output = np.array(-63.5).astype("float32")
quant_args = {"in_zero_point": -1, "in_scale": 0.5}
dequantize_test_driver(
in_dtype="int8", quant_args=quant_args, in_data=data, verify_output_data=output, axis=-1
)
def test_int32_to_float32():
data = np.array([113, 29, -1052]).astype("int32")
output = np.array([0.6550452, 0.16810896, -6.098297]).astype("float32")
quant_args = {"in_zero_point": 0, "in_scale": 0.0057968604}
dequantize_test_driver(
in_dtype="int32", quant_args=quant_args, in_data=data, verify_output_data=output, axis=-1
)
def test_channelwise_axis_1():
data = np.transpose(
np.array([0, 1, 2, 3, 4, 243, 247, 249, 250, 251]).astype("uint8").reshape((2, 5))
)
output = np.transpose(
np.array([-63.5, -63, -62.5, -62, -61.5, 30, 31, 31.5, 31.75, 32])
.astype("float32")
.reshape((2, 5))
)
quant_args = {
"in_zero_point": np.array([127, 123]).astype("int32"),
"in_scale": np.array([0.5, 0.25]).astype("float32"),
}
dequantize_test_driver(
in_dtype="uint8", quant_args=quant_args, in_data=data, verify_output_data=output, axis=-1
)
def test_channelwise_axis_0():
data = np.array([0, 1, 2, 3, 4, 243, 247, 249, 250, 251]).astype("uint8").reshape((2, 5))
output = (
np.array([-63.5, -63, -62.5, -62, -61.5, 30, 31, 31.5, 31.75, 32])
.astype("float32")
.reshape((2, 5))
)
quant_args = {
"in_zero_point": np.array([127, 123]).astype("int32"),
"in_scale": np.array([0.5, 0.25]).astype("float32"),
}
dequantize_test_driver(
in_dtype="uint8", quant_args=quant_args, in_data=data, verify_output_data=output, axis=0
)
def test_per_tensor_vector_args():
data = np.array([0, 1, 2, 3, 4, 251, 252, 253, 254, 255]).astype("uint8")
output = np.array([-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64]).astype("float32")
quant_args = {
"in_zero_point": np.array([127]).astype("int32"),
"in_scale": np.array([0.5]).astype("float32"),
}
dequantize_test_driver(
in_dtype="uint8", quant_args=quant_args, in_data=data, verify_output_data=output, axis=-1
)
def test_dynamic_dequantize():
x = relay.var("x", shape=(1, 2, 3, 4), dtype="int8")
scale_var = relay.var("scale", shape=(), dtype="float32")
zp_var = relay.var("zp", shape=(), dtype="int32")
deq_x = relay.qnn.op.dequantize(x, scale_var * scale_var, zp_var + zp_var)
tt = run_infer_type(deq_x)
assert tt.checked_type == relay.TensorType((1, 2, 3, 4), "float32")
func = relay.Function([x, scale_var, zp_var], deq_x)
data = np.random.uniform(size=(1, 2, 3, 4)).astype("int8")
scale = np.array(1).astype("float32")
zp = np.array(0).astype("int32")
mod = tvm.ir.IRModule.from_expr(func)
for target, dev in tvm.testing.enabled_targets():
# TODO: (electriclilies) enable AlterOpLayout when it is fixed
with relay.build_config(opt_level=3, disabled_pass=["AlterOpLayout"]):
lib = relay.build(mod, target=target)
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input(**{"x": data, "scale": scale, "zp": zp})
module.run()
if __name__ == "__main__":
test_uint8_to_float32()
test_int8_to_float32()
test_int8_to_float16()
test_scalar_int8_to_float32()
test_int32_to_float32()
test_channelwise_axis_1()
test_channelwise_axis_0()
test_dynamic_dequantize()
| 7,169 | 33.471154 | 97 | py |
tvm | tvm-main/tests/python/relay/test_pass_manifest_lifetimes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.relay import Function, transform
from tvm.relay.testing import inception_v3
import pytest
import sys
def optimize_and_check(before_program, after_program, passes):
if isinstance(before_program, str):
before_program = tvm.relay.parse(before_program)
if isinstance(after_program, str):
after_program = tvm.relay.parse(after_program)
if not isinstance(passes, list):
passes = [passes]
optimize = tvm.transform.Sequential(passes)
optimized_program = optimize(before_program)
print("Actual:")
print(optimized_program)
print("Expected:")
print(after_program)
assert tvm.ir.structural_equal(optimized_program, after_program, map_free_vars=True)
def test_simple_linear():
before_program = """
#[version = "0.0.5"]
def @main(%x: int) {
let %y = %x + %x;
let %z = %y + %y;
let %w = %z + %z;
%w
}
"""
after_program = """
#[version = "0.0.5"]
def @main(%x: int) {
let %y = %x + %x;
let %_0 = memory.kill(%x);
let %z = %y + %y;
let %_1 = memory.kill(%y);
let %w = %z + %z;
let %_2 = memory.kill(%z);
%w
}
"""
optimize_and_check(before_program, after_program, transform.ManifestLifetimes())
def test_simple_if():
before_program = """
#[version = "0.0.5"]
def @main(%x: int) {
let %y = cast(%x, dtype="bool");
let %z = if (%y) {
let %v0 = %x + %x;
let %v1 = %v0 * 2;
%v1
} else {
%x
};
%z
}
"""
after_program = """
#[version = "0.0.5"]
def @main(%x: int) {
let %y = cast(%x, dtype="bool");
let %z = if (%y) {
let %v0 = %x + %x;
let %_0 = memory.kill(%x);
let %v1 = %v0 * 2;
let %_1 = memory.kill(%v0);
%v1
} else {
%x
};
let %_1 = memory.kill(%y);
%z
}
"""
optimize_and_check(before_program, after_program, transform.ManifestLifetimes())
def test_simple_match():
before_program = """
#[version = "0.0.5"]
type List[A] {
Cons(A, List[A]),
Nil,
}
def @main(%x: int) {
let %l : List[int] = Nil;
let %m = (match (%l) {
Cons(%head, %rest) => {
let %y = %x + 1;
let %z = %y + %y;
%z
},
Nil => -1,
});
%m
}
"""
after_program = """
#[version = "0.0.5"]
type List[A] {
Cons(A, List[A]),
Nil,
}
def @main(%x: int) {
let %l : List[int] = Nil;
let %m = (match (%l) {
Cons(%head, %rest) => {
let %y = %x + 1;
let %_0 = memory.kill(%x);
let %z = %y + %y;
let %_1 = memory.kill(%y);
/* TODO: %head and %rest should be immediately killed */
%z
},
Nil => -1
});
let %_2 = memory.kill(%l);
%m
}
"""
optimize_and_check(before_program, after_program, transform.ManifestLifetimes())
if __name__ == "__main__":
tvm.testing.main()
| 4,059 | 26.248322 | 88 | py |
tvm | tvm-main/tests/python/relay/test_pass_partition_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for graph partitioning."""
# pylint: disable=not-callable
import os
import sys
import numpy as np
import tvm
from tvm.relay.backend import te_compiler
from tvm.relay.backend.runtime import Runtime
import tvm.relay.testing
import tvm.relay.op as reg
from tvm import relay
from tvm import runtime as tvm_runtime
from tvm.relay import transform
from tvm.relay.testing import byoc
from tvm.contrib import utils
from tvm.relay.expr_functor import ExprMutator
from tvm.relay.op.annotation import compiler_begin, compiler_end
from tvm.relay.op.contrib.register import get_pattern_table
from tvm.relay.build_module import bind_params_by_name
# Leverage the pass manager to write a simple allowed list based annotator
@transform.function_pass(opt_level=0)
class AllowedListAnnotator:
def __init__(self, op_list, compiler):
assert isinstance(op_list, (list, tuple, set))
self.op_list = op_list
self.compiler = compiler
def transform_function(self, func, mod, dev):
annotator = self
class Annotator(tvm.relay.ExprMutator):
def visit_call(self, call):
op_name = call.op.name
if op_name in annotator.op_list:
new_args = []
for arg in call.args:
ann = compiler_begin(super().visit(arg), annotator.compiler)
new_args.append(ann)
new_call = relay.Call(call.op, new_args, call.attrs, call.type_args)
return compiler_end(new_call, annotator.compiler)
else:
return super().visit_call(call)
return Annotator().visit(func)
class WholeGraphAnnotator(ExprMutator):
"""
An annotator that creates a compiler for an entire graph.
"""
def __init__(self, compiler):
super(WholeGraphAnnotator, self).__init__()
self.compiler = compiler
self.last_call = True
def visit_call(self, call):
curr_last = self.last_call
self.last_call = False
params = []
for arg in call.args:
param = super().visit(arg)
if isinstance(param, relay.expr.Var):
param = compiler_begin(param, self.compiler)
params.append(param)
new_call = relay.Call(call.op, params, call.attrs)
if curr_last:
new_call = compiler_end(new_call, self.compiler)
return new_call
class MobileNetAnnotator(ExprMutator):
"""
Annotate mobilenet until global_avg_pool.
"""
def __init__(self, compiler):
super(MobileNetAnnotator, self).__init__()
self.compiler = compiler
self.compiler_open = False
def visit_call(self, call):
if call.op.name == "nn.global_avg_pool2d":
self.compiler_open = True
compiler_open = self.compiler_open
params = []
for arg in call.args:
param = super().visit(arg)
if call.op.name == "nn.global_avg_pool2d":
param = compiler_end(param, self.compiler)
if compiler_open and isinstance(param, relay.expr.Var):
param = compiler_begin(param, self.compiler)
params.append(param)
new_call = relay.Call(call.op, params, call.attrs)
return new_call
def check_result(
mod,
map_inputs,
out_shape,
result,
tol=1e-5,
target="llvm",
device=tvm.cpu(),
params=None,
runtime=Runtime("cpp"),
):
if sys.platform == "win32":
print("Skip test on Windows for now")
return
def update_lib(lib):
test_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
source_dir = os.path.join(test_dir, "..", "..", "..")
contrib_path = os.path.join(source_dir, "src", "runtime", "contrib")
kwargs = {}
kwargs["options"] = ["-O2", "-std=c++17", "-I" + contrib_path]
tmp_path = utils.tempdir()
lib_name = "lib.so"
lib_path = tmp_path.relpath(lib_name)
lib.export_library(lib_path, fcompile=False, **kwargs)
lib = tvm_runtime.load_module(lib_path)
return lib
def check_vm_result():
te_compiler.get().clear()
with tvm.transform.PassContext(opt_level=3):
exe = relay.vm.compile(mod, target=target, params=params)
code, lib = exe.save()
lib = update_lib(lib)
exe = tvm_runtime.vm.Executable.load_exec(code, lib)
vm = tvm_runtime.vm.VirtualMachine(exe, device)
outs = vm.run(**map_inputs)
outs = outs if isinstance(outs, tvm_runtime.container.ADT) else [outs]
results = result if isinstance(result, list) else [result]
for out, ref in zip(outs, results):
tvm.testing.assert_allclose(out.numpy(), ref, rtol=tol, atol=tol)
def check_graph_executor_result():
te_compiler.get().clear()
with tvm.transform.PassContext(opt_level=3):
json, lib, param = relay.build(mod, target=target, params=params, runtime=runtime)
lib = update_lib(lib)
rt_mod = tvm.contrib.graph_executor.create(json, lib, device)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**param)
rt_mod.run()
out_shapes = out_shape if isinstance(out_shape, list) else [out_shape]
results = result if isinstance(result, list) else [result]
for idx, shape in enumerate(out_shapes):
out = tvm.nd.empty(shape, device=device)
out = rt_mod.get_output(idx, out)
tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=tol, atol=tol)
check_vm_result()
check_graph_executor_result()
def test_multi_node_compiler():
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
w3 = relay.var("w3", shape=(10, 10))
w4 = relay.var("w4", shape=(10, 10))
w5 = relay.var("w5", shape=(10, 10))
w6 = relay.var("w6", shape=(10, 10))
w7 = relay.var("w7", shape=(10, 10))
# C compiler
# FIXME: We generate two compilers for this case but they should be merged to one
# due to the common input (x).
z0 = relay.add(x, w0)
p0 = relay.subtract(z0, w1)
q0 = relay.multiply(p0, w2)
z1 = relay.add(x, w3)
p1 = relay.subtract(z1, w4)
q1 = relay.multiply(p1, w5)
# Other parts on TVM
z2 = relay.add(x, w6)
q2 = relay.subtract(z2, w7)
r = relay.concatenate((q0, q1, q2), axis=0)
f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
mod = tvm.IRModule()
ann = byoc.CcompilerAnnotator()
mod["main"] = ann.visit(f)
mod = transform.PartitionGraph()(mod)
mod = transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = {"w{}".format(i): w_data[i] for i in range(8)}
map_inputs["x"] = x_data
targets = [("llvm", Runtime("cpp")), ("c", Runtime("crt", {"system-lib": True}))]
for tgt, rt in targets:
check_result(
mod,
map_inputs,
(30, 10),
np.concatenate(
(
((x_data + w_data[0]) - w_data[1]) * w_data[2],
((x_data + w_data[3]) - w_data[4]) * w_data[5],
x_data + w_data[6] - w_data[7],
),
axis=0,
),
target=tgt,
runtime=rt,
)
def test_extern_ccompiler_single_op():
@transform.function_pass(opt_level=0)
class MyAnnotator:
def transform_function(self, func, mod, dev):
class Annotator(tvm.relay.ExprMutator):
def visit_call(self, call):
new_args = []
for arg in call.args:
ann = compiler_begin(self.visit(arg), "ccompiler")
new_args.append(ann)
new_call = relay.Call(call.op, new_args)
return compiler_end(new_call, "ccompiler")
return Annotator().visit(func)
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
z = x + y
f = relay.Function([x, y], z)
x_data = np.random.rand(8, 8).astype("float32")
y_data = np.random.rand(8, 8).astype("float32")
mod = tvm.IRModule()
mod["main"] = f
mod = MyAnnotator()(mod)
mod = transform.PartitionGraph()(mod)
check_result(mod, {"x": x_data, "y": y_data}, (8, 8), x_data + y_data)
def set_func_attr(func, compile_name, symbol_name):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compile_name)
func = func.with_attr("global_symbol", symbol_name)
return func
def test_extern_ccompiler_default_ops():
def expected():
mod = tvm.IRModule()
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
add = x0 + y0
# Function that uses C compiler
func = relay.Function([x0, y0], add)
func = set_func_attr(func, "ccompiler", "tvmgen_default_ccompiler_main_0")
glb_0 = relay.GlobalVar("tvmgen_default_ccompiler_main_0")
mod[glb_0] = func
add_call = relay.Call(glb_0, [x, y])
# Function that uses default compiler. Ops are fused in this function.
p0 = relay.var("p0", shape=(8, 8))
log = relay.log(p0)
exp = relay.exp(p0)
concat = relay.concatenate([log, exp], axis=0)
fused_func = relay.Function([p0], concat)
fused_func = fused_func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
fused_call = relay.Call(fused_func, [add_call])
main = relay.Function([x, y], fused_call)
mod["main"] = main
mod = transform.InferType()(mod)
return mod
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
add = x + y
log = relay.log(add)
exp = relay.exp(add)
concat = relay.concatenate([log, exp], axis=0)
f = relay.Function([x, y], concat)
mod = tvm.IRModule()
mod["main"] = f
mod = AllowedListAnnotator(["add", "subtract", "multiply"], "ccompiler")(mod)
mod = transform.PartitionGraph()(mod)
fused_mod = transform.FuseOps(2)(mod)
expected_mod = expected()
assert tvm.ir.structural_equal(fused_mod, expected_mod, map_free_vars=True)
x_data = np.random.rand(8, 8).astype("float32")
y_data = np.random.rand(8, 8).astype("float32")
np_add = x_data + y_data
res = np.concatenate([np.log(np_add), np.exp(np_add)])
check_result(mod, {"x": x_data, "y": y_data}, (16, 8), res)
def test_extern_compiler_sanitized_ops():
def expected():
mod = tvm.IRModule()
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
add = x0 + y0
# Function that uses C compiler
func = relay.Function([x0, y0], add)
func = set_func_attr(func, "unsanitary-name++", "tvmgen_default_unsanitary_name___main_0")
glb_0 = relay.GlobalVar("tvmgen_default_unsanitary_name___main_0")
mod[glb_0] = func
add_call = relay.Call(glb_0, [x, y])
# Function that uses default compiler. Ops are fused in this function.
p0 = relay.var("p0", shape=(8, 8))
log = relay.log(p0)
exp = relay.exp(p0)
concat = relay.concatenate([log, exp], axis=0)
fused_func = relay.Function([p0], concat)
fused_func = fused_func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
fused_call = relay.Call(fused_func, [add_call])
main = relay.Function([x, y], fused_call)
mod["main"] = main
mod = transform.InferType()(mod)
return mod
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
add = x + y
log = relay.log(add)
exp = relay.exp(add)
concat = relay.concatenate([log, exp], axis=0)
f = relay.Function([x, y], concat)
mod = tvm.IRModule()
mod["main"] = f
mod = AllowedListAnnotator(["add", "subtract", "multiply"], "unsanitary-name++")(mod)
mod = transform.PartitionGraph()(mod)
fused_mod = transform.FuseOps(2)(mod)
expected_mod = expected()
assert tvm.ir.structural_equal(fused_mod, expected_mod, map_free_vars=True)
def test_extern_ccompiler_multiple_functions():
def expected():
mod = tvm.IRModule()
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
add = x0 + y0
# Function that uses C compiler
func = relay.Function([x0, y0], add)
func = set_func_attr(func, "ccompiler", "tvmgen_default_ccompiler_main_0")
glb_0 = relay.GlobalVar("tvmgen_default_ccompiler_main_0")
mod[glb_0] = func
add_call = relay.Call(glb_0, [x, y])
# Function that uses default compiler. Ops are fused in this function.
p0 = relay.var("p0", shape=(8, 8))
log = relay.log(p0)
exp = relay.exp(p0)
concat = relay.concatenate([log, exp], axis=0)
fused_func = relay.Function([p0], concat)
fused_func = fused_func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
fused_call = relay.Call(fused_func, [add_call])
main = relay.Function([x, y], fused_call)
mod["main"] = main
# define the second one
a = relay.var("a", shape=(16, 16))
b = relay.var("b", shape=(16, 16))
a0 = relay.var("a0", shape=(16, 16))
b0 = relay.var("b0", shape=(16, 16))
add = a0 + b0
# Function that uses C compiler
func = relay.Function([a0, b0], add)
func = set_func_attr(func, "ccompiler", "tvmgen_default_ccompiler_subfunction_0")
glb_0 = relay.GlobalVar("tvmgen_default_ccompiler_subfunction_0")
mod[glb_0] = func
add_call = relay.Call(glb_0, [a, b])
# Function that uses default compiler. Ops are fused in this function.
p0 = relay.var("p0", shape=(16, 16))
log = relay.log(p0)
exp = relay.exp(p0)
concat = relay.concatenate([log, exp], axis=0)
fused_func = relay.Function([p0], concat)
fused_func = fused_func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
fused_call = relay.Call(fused_func, [add_call])
sunfunction = relay.Function([a, b], fused_call)
mod["subfunction"] = sunfunction
mod = transform.InferType()(mod)
return mod
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
add = x + y
log = relay.log(add)
exp = relay.exp(add)
concat = relay.concatenate([log, exp], axis=0)
f = relay.Function([x, y], concat)
mod = tvm.IRModule()
mod["main"] = f
# define second function
a = relay.var("a", shape=(16, 16))
b = relay.var("b", shape=(16, 16))
add = a + b
log = relay.log(add)
exp = relay.exp(add)
concat = relay.concatenate([log, exp], axis=0)
f2 = relay.Function([a, b], concat)
mod["subfunction"] = f2
mod = AllowedListAnnotator(["add", "subtract", "multiply"], "ccompiler")(mod)
mod = transform.PartitionGraph()(mod)
fused_mod = transform.FuseOps(2)(mod)
expected_mod = expected()
assert tvm.ir.structural_equal(fused_mod, expected_mod, map_free_vars=True)
x_data = np.random.rand(8, 8).astype("float32")
y_data = np.random.rand(8, 8).astype("float32")
np_add = x_data + y_data
res = np.concatenate([np.log(np_add), np.exp(np_add)])
check_result(mod, {"x": x_data, "y": y_data}, (16, 8), res)
def test_extern_ccompiler():
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
z = x + x
p = y * y
f = relay.Function([x, y], p - z)
x_data = np.random.rand(2, 2).astype("float32")
y_data = np.random.rand(2, 2).astype("float32")
mod = tvm.IRModule()
mod["main"] = f
mod = AllowedListAnnotator(["add", "subtract", "multiply"], "ccompiler")(mod)
mod = transform.PartitionGraph()(mod)
check_result(mod, {"x": x_data, "y": y_data}, (2, 2), (y_data * y_data) - (x_data + x_data))
def test_extern_dnnl():
if not tvm.get_global_func("relay.ext.dnnl", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
def expected():
data0 = relay.var("data", shape=(ishape), dtype=dtype)
input0 = relay.var("input", shape=(w1shape), dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data0, input0, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, input0, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
func = relay.Function([data0, input0], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_main_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_main_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data = relay.var("data", shape=(ishape), dtype=dtype)
weight = relay.var("input", shape=(w1shape), dtype=dtype)
main_f = relay.Function([data, weight], glb_var(data, weight))
mod["main"] = main_f
mod = transform.InferType()(mod)
return mod
def get_func():
data = relay.var("data", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight1", shape=(w1shape), dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
return relay.Function([data, weight1], out)
mod = tvm.IRModule()
mod["main"] = WholeGraphAnnotator("dnnl").visit(get_func())
mod = transform.PartitionGraph()(mod)
mod = transform.InferType()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
ref_mod = tvm.IRModule()
ref_mod["main"] = get_func()
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(
i_data, w1_data
)
check_result(
mod, {"data": i_data, "weight1": w1_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5
)
def test_extern_dnnl_mobilenet():
if not tvm.get_global_func("relay.ext.dnnl", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 3, 224, 224)
ref_mod, params = relay.testing.mobilenet.get_workload(batch_size=1, dtype="float32")
mod = transform.AnnotateTarget(["dnnl"])(ref_mod)
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu(0)).evaluate()(
i_data, **params
)
te_compiler.get().clear()
check_result(mod, {"data": i_data}, (1, 1000), ref_res.numpy(), tol=1e-5, params=params)
def test_function_lifting():
def partition():
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.var("weight", relay.TensorType((16, 3, 3, 3), "float32"))
bn_gamma = relay.var("bn_gamma", relay.TensorType((16,), "float32"))
bn_beta = relay.var("bn_beta", relay.TensorType((16,), "float32"))
bn_mmean = relay.var("bn_mean", relay.TensorType((16,), "float32"))
bn_mvar = relay.var("bn_var", relay.TensorType((16,), "float32"))
conv = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
bn_output = relay.nn.batch_norm(conv, bn_gamma, bn_beta, bn_mmean, bn_mvar)
func = relay.Function(
[data, weight, bn_gamma, bn_beta, bn_mmean, bn_mvar], bn_output.astuple()
)
mod = tvm.IRModule()
mod["main"] = func
mod = relay.transform.InferType()(mod)
op_list = ["nn.batch_norm", "nn.conv2d"]
mod = AllowedListAnnotator(op_list, "test_compiler")(mod)
opt_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.PartitionGraph(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.AlterOpLayout(),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = opt_pass(mod)
return mod
def expected():
# function for batch_norm
data0 = relay.var("data0", relay.TensorType((1, 16, 224, 224), "float32"))
mod = tvm.IRModule()
bn_gamma = relay.var("bn_gamma1", relay.TensorType((16,), "float32"))
bn_beta = relay.var("bn_beta1", relay.TensorType((16,), "float32"))
bn_mmean = relay.var("bn_mean1", relay.TensorType((16,), "float32"))
bn_mvar = relay.var("bn_var1", relay.TensorType((16,), "float32"))
bn = relay.nn.batch_norm(data0, bn_gamma, bn_beta, bn_mmean, bn_mvar)
func0 = relay.Function([data0, bn_gamma, bn_beta, bn_mmean, bn_mvar], bn.astuple())
func0 = set_func_attr(func0, "test_compiler", "tvmgen_default_test_compiler_main_2")
gv0 = relay.GlobalVar("tvmgen_default_test_compiler_main_2")
mod[gv0] = func0
mod = transform.InferType()(mod)
# function for conv2d
data1 = relay.var("data1", relay.TensorType((1, 3, 224, 224), "float32"))
weight1 = relay.var("weight1", relay.TensorType((16, 3, 3, 3), "float32"))
conv = relay.nn.conv2d(
data=data1, weight=weight1, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
func1 = relay.Function([data1, weight1], conv)
func1 = set_func_attr(func1, "test_compiler", "tvmgen_default_test_compiler_main_0")
gv1 = relay.GlobalVar("tvmgen_default_test_compiler_main_0")
mod[gv1] = func1
mod = transform.InferType()(mod)
# main function
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.var("weight", relay.TensorType((16, 3, 3, 3), "float32"))
bn_gamma0 = relay.var("bn_gamma", relay.TensorType((16,), "float32"))
bn_beta0 = relay.var("bn_beta", relay.TensorType((16,), "float32"))
bn_mmean0 = relay.var("bn_mean", relay.TensorType((16,), "float32"))
bn_mvar0 = relay.var("bn_var", relay.TensorType((16,), "float32"))
call1 = gv1(data, weight)
call0 = gv0(call1, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0)
mod["main"] = relay.Function(
[data, weight, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0], call0
)
mod = transform.InferType()(mod)
return mod
partitioned = partition()
ref_mod = expected()
assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)
def test_function_lifting_inline():
def partition():
data = relay.var("data", relay.TensorType((1, 16, 224, 224), "float32"))
bn_gamma = relay.var("bn_gamma", relay.TensorType((16,), "float32"))
bn_beta = relay.var("bn_beta", relay.TensorType((16,), "float32"))
bn_mmean = relay.var("bn_mean", relay.TensorType((16,), "float32"))
bn_mvar = relay.var("bn_var", relay.TensorType((16,), "float32"))
bn_output = relay.nn.batch_norm(data, bn_gamma, bn_beta, bn_mmean, bn_mvar)
func = relay.Function([data, bn_gamma, bn_beta, bn_mmean, bn_mvar], bn_output.astuple())
mod = tvm.IRModule()
mod["main"] = func
op_list = ["nn.batch_norm", "nn.conv2d"]
mod = AllowedListAnnotator(op_list, "test_compiler")(mod)
opt_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.PartitionGraph(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.AlterOpLayout(),
transform.Inline(),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = opt_pass(mod)
return mod
def expected():
# function for batch_norm
data0 = relay.var("data0", relay.TensorType((1, 16, 224, 224), "float32"))
mod = tvm.IRModule()
bn_gamma = relay.var("bn_gamma1", relay.TensorType((16,), "float32"))
bn_beta = relay.var("bn_beta1", relay.TensorType((16,), "float32"))
bn_mmean = relay.var("bn_mean1", relay.TensorType((16,), "float32"))
bn_mvar = relay.var("bn_var1", relay.TensorType((16,), "float32"))
bn = relay.nn.batch_norm(data0, bn_gamma, bn_beta, bn_mmean, bn_mvar)
func0 = relay.Function([data0, bn_gamma, bn_beta, bn_mmean, bn_mvar], bn.astuple())
func0 = set_func_attr(func0, "test_compiler", "tvmgen_default_test_compiler_main_0")
# main function
data = relay.var("data", relay.TensorType((1, 16, 224, 224), "float32"))
bn_gamma0 = relay.var("bn_gamma", relay.TensorType((16,), "float32"))
bn_beta0 = relay.var("bn_beta", relay.TensorType((16,), "float32"))
bn_mmean0 = relay.var("bn_mean", relay.TensorType((16,), "float32"))
bn_mvar0 = relay.var("bn_var", relay.TensorType((16,), "float32"))
call0 = func0(data, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0)
mod["main"] = relay.Function([data, bn_gamma0, bn_beta0, bn_mmean0, bn_mvar0], call0)
mod = transform.InferType()(mod)
return mod
partitioned = partition()
ref_mod = expected()
assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)
def test_constant_propagation():
ones = np.ones(shape=(8, 8), dtype="float32")
def expected():
mod = tvm.IRModule()
y = relay.var("y", shape=(8, 8))
x0 = relay.const(ones)
y0 = relay.var("y0", shape=(8, 8))
add = x0 + y0
# Function that uses C compiler
func = relay.Function([y0], add)
func = set_func_attr(func, "ccompiler", "tvmgen_default_ccompiler_main_0")
glb_0 = relay.GlobalVar("tvmgen_default_ccompiler_main_0")
mod[glb_0] = func
mod = relay.transform.InferType()(mod)
add_call = relay.Call(glb_0, [y])
log = relay.log(add_call)
main = relay.Function([y], log)
mod["main"] = main
mod = relay.transform.InferType()(mod)
return mod
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
add = x + y
log = relay.log(add)
f = relay.Function([x, y], log)
f = bind_params_by_name(f, {"x": tvm.nd.array(ones)})
mod = tvm.IRModule()
mod["main"] = f
mod = AllowedListAnnotator(["add"], "ccompiler")(mod)
mod = transform.PartitionGraph()(mod)
mod = relay.transform.InferType()(mod)
expected_mod = expected()
expected_mod = relay.transform.InferType()(expected_mod)
assert tvm.ir.structural_equal(mod, expected_mod, map_free_vars=True)
y_data = np.random.rand(8, 8).astype("float32")
np_add = ones + y_data
check_result(mod, {"y": y_data}, (8, 8), np.log(np_add))
def test_multiple_outputs():
def create_graph():
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.var("weight", relay.TensorType((16, 3, 3, 3), "float32"))
bn_gamma = relay.var("bn_gamma", relay.TensorType((16,), "float32"))
bn_beta = relay.var("bn_beta", relay.TensorType((16,), "float32"))
bn_mean = relay.var("bn_mean", relay.TensorType((16,), "float32"))
bn_var = relay.var("bn_var", relay.TensorType((16,), "float32"))
data_cb = compiler_begin(data, "test_target")
weight_cb = compiler_begin(weight, "test_target")
bn_gamma_cb = compiler_begin(bn_gamma, "test_target")
bn_beta_cb = compiler_begin(bn_beta, "test_target")
bn_mean_cb = compiler_begin(bn_mean, "test_target")
bn_var_cb = compiler_begin(bn_var, "test_target")
conv_o = relay.nn.conv2d(
data=data_cb, weight=weight_cb, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
bn_o = relay.nn.batch_norm(conv_o, bn_gamma_cb, bn_beta_cb, bn_mean_cb, bn_var_cb)
relu_o = relay.nn.relu(bn_o[0])
relu_o_ce = compiler_end(relu_o, "test_target")
bn_omean = bn_o[1]
rebn_omean_ce = compiler_end(bn_omean, "test_target")
bn_ovar = bn_o[2]
bn_ovar_ce = compiler_end(bn_ovar, "test_target")
dummy_mean_abs = relay.abs(rebn_omean_ce)
dummy_ovar_abs = relay.abs(bn_ovar_ce)
dummy_tuple = relay.Tuple((relu_o_ce, dummy_mean_abs, dummy_ovar_abs))
func = relay.Function([data, weight, bn_gamma, bn_beta, bn_mean, bn_var], dummy_tuple)
return func
def expected():
mod = tvm.IRModule()
# function 0
data = relay.var("test_target_0_i0", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.var("test_target_0_i1", relay.TensorType((16, 3, 3, 3), "float32"))
bn_gamma = relay.var("test_target_0_i2", relay.TensorType((16,), "float32"))
bn_beta = relay.var("test_target_0_i3", relay.TensorType((16,), "float32"))
bn_mean = relay.var("test_target_0_i4", relay.TensorType((16,), "float32"))
bn_var = relay.var("test_target_0_i5", relay.TensorType((16,), "float32"))
conv_o = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
bn_o = relay.nn.batch_norm(conv_o, bn_gamma, bn_beta, bn_mean, bn_var)
relu_o = relay.nn.relu(bn_o[0])
tuple_o = relay.Tuple((relu_o, bn_o[1], bn_o[2]))
func0 = relay.Function([data, weight, bn_gamma, bn_beta, bn_mean, bn_var], tuple_o)
func0 = set_func_attr(func0, "test_target", "tvmgen_default_test_target_main_0")
gv0 = relay.GlobalVar("tvmgen_default_test_target_main_0")
mod[gv0] = func0
mod = relay.transform.InferType()(mod)
# body
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.var("weight", relay.TensorType((16, 3, 3, 3), "float32"))
bn_gamma = relay.var("bn_gamma", relay.TensorType((16,), "float32"))
bn_beta = relay.var("bn_beta", relay.TensorType((16,), "float32"))
bn_mean = relay.var("bn_mean", relay.TensorType((16,), "float32"))
bn_var = relay.var("bn_var", relay.TensorType((16,), "float32"))
f0_o = gv0(data, weight, bn_gamma, bn_beta, bn_mean, bn_var)
f0_relu_o = relay.TupleGetItem(f0_o, 0)
f0_mean_o = relay.TupleGetItem(f0_o, 1)
f0_var_o = relay.TupleGetItem(f0_o, 2)
f0_mean_abs = relay.abs(f0_mean_o)
f0_var_abs = relay.abs(f0_var_o)
main_tuple = relay.Tuple((f0_relu_o, f0_mean_abs, f0_var_abs))
func = relay.Function([data, weight, bn_gamma, bn_beta, bn_mean, bn_var], main_tuple)
mod["main"] = func
mod = relay.transform.InferType()(mod)
return mod
mod = tvm.IRModule()
mod["main"] = create_graph()
ref_mod = expected()
partitioned = transform.PartitionGraph()(mod)
assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)
def test_mixed_single_multiple_outputs():
def create_graph():
data = relay.var("data", shape=(10, 10))
cb_1 = compiler_begin(data, "test_target")
O_1 = relay.abs(cb_1)
ce_2 = compiler_end(O_1, "test_target")
O_2 = relay.nn.relu(O_1)
ce_3 = compiler_end(O_2, "test_target")
X = relay.tanh(ce_2)
cb_3 = compiler_begin(ce_3, "test_target")
cb_4 = compiler_begin(X, "test_target")
O_3 = relay.add(cb_3, cb_4)
ce_4 = compiler_end(O_3, "test_target")
func = relay.Function([data], ce_4)
return func
def expected():
mod = tvm.IRModule()
# function 1
f1_cb1 = relay.var("test_target_0_i0", shape=(10, 10))
f1_O_1 = relay.abs(f1_cb1)
f1_O_2 = relay.nn.relu(f1_O_1)
f1_out = relay.Tuple((f1_O_2, f1_O_1))
func1 = relay.Function([f1_cb1], f1_out)
func1 = set_func_attr(func1, "test_target", "tvmgen_default_test_target_main_0")
gv1 = relay.GlobalVar("tvmgen_default_test_target_main_0")
mod[gv1] = func1
mod = relay.transform.InferType()(mod)
# function 0
f2_cb3 = relay.var("test_target_1_i0", shape=(10, 10))
f2_cb4 = relay.var("test_target_1_i1", shape=(10, 10))
f2_O_3 = relay.add(f2_cb3, f2_cb4)
func0 = relay.Function([f2_cb3, f2_cb4], f2_O_3)
func0 = set_func_attr(func0, "test_target", "tvmgen_default_test_target_main_1")
gv0 = relay.GlobalVar("tvmgen_default_test_target_main_1")
mod[gv0] = func0
mod = relay.transform.InferType()(mod)
# body
data = relay.var("data", shape=(10, 10))
tuple_out = gv1(data)
ce_2 = relay.TupleGetItem(tuple_out, 1)
ce_3 = relay.TupleGetItem(tuple_out, 0)
X = relay.tanh(ce_2)
ce_4 = gv0(ce_3, X)
func = relay.Function([data], ce_4)
mod["main"] = func
mod = relay.transform.InferType()(mod)
return mod
mod = tvm.IRModule()
mod["main"] = create_graph()
mod = transform.InferType()(mod)
ref_mod = expected()
partitioned = transform.PartitionGraph()(mod)
assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)
def test_dnnl_fuse():
dnnl_patterns = get_pattern_table("dnnl")
for pattern in dnnl_patterns:
if pattern[0] == "dnnl.conv2d_bias_relu":
conv2d_bias_relu_pat = pattern
elif pattern[0] == "dnnl.conv2d_bias_sigmoid":
conv2d_bias_sigmoid_pat = pattern
elif pattern[0] == "dnnl.conv2d_bias":
conv2d_bias_pat = pattern
elif pattern[0] == "dnnl.conv2d_relu":
conv2d_relu_pat = pattern
elif pattern[0] == "dnnl.conv2d_sigmoid":
conv2d_sigmoid_pat = pattern
elif pattern[0] == "dnnl.conv2d_bias_sum":
conv2d_bias_sum_pat = pattern
elif pattern[0] == "dnnl.conv2d_bias_sum_relu":
conv2d_bias_sum_relu_pat = pattern
def get_blocks(
prefix,
data,
in_channel,
out_channel,
include_bias_add=True,
include_bn=True,
include_sigmoid=False,
):
weight = relay.var(prefix + "weight")
bias = relay.var(prefix + "bias")
bn_gamma = relay.var(prefix + "bn_gamma")
bn_beta = relay.var(prefix + "bn_beta")
bn_mmean = relay.var(prefix + "bn_mean")
bn_mvar = relay.var(prefix + "bn_var")
layer = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=out_channel, padding=(1, 1)
)
if include_bias_add:
layer = relay.nn.bias_add(layer, bias)
if include_bn:
bn_output = relay.nn.batch_norm(layer, bn_gamma, bn_beta, bn_mmean, bn_mvar)
layer = bn_output[0]
if include_sigmoid:
# dummy layer to prevent pattern detection
layer = relay.sigmoid(layer)
layer = relay.nn.relu(layer)
return layer
def get_net(include_bias_add=True, include_bn=True, include_sigmoid=False):
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
block1 = get_blocks("block1_", data, 3, 8, include_bias_add, include_bn, include_sigmoid)
# The second block is always conv + relu, to make it more interesting
block2 = get_blocks("block2_", block1, 8, 8, False, False, include_sigmoid)
return relay.Function(relay.analysis.free_vars(block2), block2)
def get_partitoned_mod(mod, params, pattern_table):
# This is required for constant folding
mod["main"] = bind_params_by_name(mod["main"], params)
remove_bn_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
# fold consecutive add ops to simplify pattern `conv2d-bias_add-bn-relu`
remove_linear_pass = tvm.transform.Sequential(
[
transform.SimplifyExpr(),
transform.FoldConstant(),
]
)
composite_partition = tvm.transform.Sequential(
[
transform.CanonicalizeOps(),
remove_bn_pass,
remove_linear_pass,
transform.MergeComposite(pattern_table),
transform.AnnotateTarget("dnnl"),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
return composite_partition(mod)
def test_detect_pattern(
pattern_table, include_bias_add, include_bn, include_sigmoid, num_expected_partition
):
net = get_net(include_bias_add, include_bn, include_sigmoid)
mod, params = tvm.relay.testing.create_workload(net)
mod = get_partitoned_mod(mod, params, pattern_table)
assert len(mod.functions) - 1 == num_expected_partition # -1 for main
def test_sum_pattern(pattern_table, num_expected_partition):
def get_conv2d_bn_sum_relu(
x_shape=(1, 32, 8, 8),
k_shape=(16, 32, 3, 3),
sum_shape=(1, 16, 6, 6),
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.const(np.random.randint(0, 1, k_shape).astype(dtype))
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
beta = relay.const(np.zeros(k_shape[0]).astype(dtype))
gamma = relay.const(np.ones(k_shape[0]).astype(dtype))
moving_mean = relay.const(np.zeros(k_shape[0]).astype(dtype))
moving_var = relay.const(np.ones(k_shape[0]).astype(dtype))
sum_data = relay.var("data1", shape=sum_shape, dtype=dtype)
dic = {"x": x_shape, "bias": (k_shape[0],), "sum_data": sum_shape}
param_lst = ["bias", "sum_data"]
conv = relay.nn.conv2d(
x,
kernel,
channels=k_shape[0],
kernel_size=k_shape[2:4],
)
conv_bias = relay.nn.bias_add(conv, bias)
conv_bias_bn, _, _ = relay.nn.batch_norm(
conv_bias,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
axis=1,
center=True,
scale=True,
epsilon=1e-5,
)
conv_bias_bn_sum = relay.add(conv_bias_bn, sum_data)
return relay.nn.relu(conv_bias_bn_sum), dic, param_lst
net, dic, param_lst = get_conv2d_bn_sum_relu()
net = tvm.IRModule.from_expr(net)
params = {x: np.random.uniform(-1, 1, dic[x]).astype("float32") for x in param_lst}
mod = get_partitoned_mod(net, params, pattern_table)
assert len(mod.functions) - 1 == num_expected_partition # -1 for main
def test_partition():
# conv + bn + relu, conv + relu -> fused conv_bias_relu, conv, and relu
test_detect_pattern([conv2d_bias_relu_pat], False, True, False, 3)
# conv + bn + relu, conv + relu -> conv, bias, relu, and fused conv_relu
test_detect_pattern([conv2d_relu_pat], False, True, False, 4)
# conv + bn + relu, conv + relu -> fused conv_bias_relu, and fused conv_relu
test_detect_pattern([conv2d_bias_relu_pat, conv2d_relu_pat], False, True, False, 2)
# conv + bias_add + bn + relu, conv + relu -> fused conv_bias_relu, and fused conv_relu
test_detect_pattern([conv2d_bias_relu_pat, conv2d_relu_pat], True, True, False, 2)
# conv + relu, conv + relu -> two fused conv_relu
test_detect_pattern([conv2d_relu_pat], False, False, False, 2)
# conv + relu, conv + relu -> no fusion, 4 partition each with a single op
test_detect_pattern([conv2d_bias_relu_pat], False, False, False, 4)
# conv + bn + sigmoid + relu, conv + sigmoid + relu -> no fusion
test_detect_pattern([conv2d_bias_relu_pat, conv2d_relu_pat], False, True, True, 7)
# conv + bias_add + bn + sigmoid + relu, conv + sigmoid + relu -> fused conv_bias
# and single op sigmoid, relu, conv, sigmoid, relu
test_detect_pattern([conv2d_bias_pat, conv2d_relu_pat], True, True, True, 6)
# conv + bias_add + bn + sigmoid + relu, conv + sigmoid + relu -> fused conv_bias_sigmoid
# and single op relu, conv, sigmoid, relu
test_detect_pattern([conv2d_bias_sigmoid_pat, conv2d_relu_pat], True, True, True, 5)
# conv + bias_add + bn + sigmoid + relu, conv + sigmoid + relu -> fused conv_bias_sigmoid,
# fused conv_sigmoid and single op relu, relu
test_detect_pattern([conv2d_bias_sigmoid_pat, conv2d_sigmoid_pat], True, True, True, 4)
# conv + bias_add + bn + add + relu -> fused conv_bias_sum, relu
test_sum_pattern([conv2d_bias_sum_pat], 2)
# conv + bias_add + bn + add + relu -> fused conv_bias_sum_relu,
test_sum_pattern([conv2d_bias_sum_relu_pat], 1)
def test_partition_mobilenet():
mod, params = relay.testing.mobilenet.get_workload()
mod = get_partitoned_mod(mod, params, dnnl_patterns)
# 27 fused conv + bn + relu, one dense, one softmax and one global_avg_pooling
assert len(mod.functions) - 1 == 30 # -1 for main
def test_exec(mod, params, ref_mod, ref_params, out_shape):
ishape = (1, 3, 224, 224)
i_data = np.random.randn(*ishape).astype(np.float32)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu(0)).evaluate()(
i_data, **ref_params
)
te_compiler.get().clear()
mod = get_partitoned_mod(mod, params, dnnl_patterns)
check_result(mod, {"data": i_data}, out_shape, ref_res.numpy(), tol=1e-5, params=params)
test_partition()
test_partition_mobilenet()
if not tvm.get_global_func("relay.ext.dnnl", True):
print("skip because DNNL codegen is not available")
return
net = get_net()
mod, params = tvm.relay.testing.create_workload(net)
ref_mod, ref_params = tvm.relay.testing.create_workload(net)
test_exec(mod, params, ref_mod, ref_params, (1, 8, 224, 224))
mod, params = relay.testing.mobilenet.get_workload()
ref_mod, ref_params = relay.testing.mobilenet.get_workload()
test_exec(mod, params, ref_mod, ref_params, (1, 1000))
def test_multiple_use_of_an_output():
def expected_same_output_region():
mod = tvm.IRModule()
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
z = relay.var("z", shape=(8, 8))
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
log = relay.log(x0)
sub = x0 - y0
mul = log * sub
# The partitioned graph contains log, subtract, and multiply
func = relay.Function([x0, y0], mul)
func = set_func_attr(func, "ccompiler", "tvmgen_default_ccompiler_main_0")
glb_0 = relay.GlobalVar("tvmgen_default_ccompiler_main_0")
mod[glb_0] = func
mod = transform.InferType()(mod)
add = x + y
call = relay.Call(glb_0, [add, z])
main = relay.Function([x, y, z], call)
mod["main"] = main
mod = transform.InferType()(mod)
return mod
def expected_different_output_region():
mod = tvm.IRModule()
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
z = relay.var("z", shape=(8, 8))
# The partitioned graph contains log
i0 = relay.var("i0", shape=(8, 8))
log = relay.log(i0)
func = relay.Function([i0], log)
func = set_func_attr(func, "ccompiler", "tvmgen_default_ccompiler_main_0")
glb_0 = relay.GlobalVar("tvmgen_default_ccompiler_main_0")
mod[glb_0] = func
mod = transform.InferType()(mod)
# The partitioned graph contains subtract
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
sub = x0 - y0
func = relay.Function([x0, y0], sub)
func = set_func_attr(func, "ccompiler", "tvmgen_default_ccompiler_main_1")
glb_1 = relay.GlobalVar("tvmgen_default_ccompiler_main_1")
mod[glb_1] = func
mod = transform.InferType()(mod)
add = x + y
call_log = relay.Call(glb_0, [add])
call_sub = relay.Call(glb_1, [add, z])
main = relay.Function([x, y, z], call_log * call_sub)
mod["main"] = main
mod = transform.InferType()(mod)
return mod
def get_mod():
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
z = relay.var("z", shape=(8, 8))
add = x + y
sub = add - z
log = relay.log(add)
sub1 = log * sub
f = relay.Function([x, y, z], sub1)
mod = tvm.IRModule()
mod["main"] = f
return mod
def test_same_output_region():
mod = get_mod()
mod = AllowedListAnnotator(["subtract", "log", "multiply"], "ccompiler")(mod)
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph()(mod)
expected_mod = expected_same_output_region()
assert tvm.ir.structural_equal(mod, expected_mod, map_free_vars=True)
def test_different_output_region():
mod = get_mod()
mod = AllowedListAnnotator(["subtract", "log"], "ccompiler")(mod)
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph()(mod)
expected_mod = expected_different_output_region()
assert tvm.ir.structural_equal(mod, expected_mod, map_free_vars=True)
test_same_output_region()
test_different_output_region()
def test_duplicate_outputs():
target = "test_duplicate_outputs"
@tvm.ir.register_op_attr("abs", "target." + target)
def abs(expr): # pylint: disable=unused-variable
return True
def create_graph():
data = relay.var("data", shape=(10, 10))
x = relay.abs(data)
out_1 = relay.nn.relu(x)
out_2 = relay.tanh(x)
out_3 = relay.log(x)
out = relay.Tuple([out_1, out_2, out_3])
func = relay.Function([data], out)
return func
def expected():
mod = tvm.IRModule()
# function 0
f0_i0 = relay.var(target + "_0_i0", shape=(10, 10))
f0_o0 = relay.abs(f0_i0)
func0 = relay.Function([f0_i0], f0_o0)
func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func0 = func0.with_attr("Compiler", target)
func0 = func0.with_attr("global_symbol", "tvmgen_default_" + target + "_main_0")
gv0 = relay.GlobalVar("tvmgen_default_" + target + "_main_0")
mod[gv0] = func0
mod = transform.InferType()(mod)
# body
data = relay.var("data", shape=(10, 10))
function_out = gv0(data)
out_1 = relay.nn.relu(function_out)
out_2 = relay.tanh(function_out)
out_3 = relay.log(function_out)
out = relay.Tuple([out_1, out_2, out_3])
func = relay.Function([data], out)
mod["main"] = func
mod = transform.InferType()(mod)
return mod
mod = tvm.IRModule()
mod["main"] = create_graph()
seq = tvm.transform.Sequential(
[
transform.AnnotateTarget(target),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
ref_mod = expected()
partitioned = seq(mod)
assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)
def test_duplicate_merge_and_tuplegetitem():
target = "test_duplicate_merge_and_tuplegetitem"
@tvm.ir.register_op_attr("nn.batch_norm", "target." + target)
def batch_norm(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("nn.relu", "target." + target)
def relu(expr): # pylint: disable=unused-variable
return True
def create_graph():
data = relay.var("data", shape=(10, 10))
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
x = relay.nn.batch_norm(data, bn_gamma, bn_beta, bn_mmean, bn_mvar)
out_1 = relay.nn.relu(x[0])
bn_out_1 = x[1]
out_2 = relay.tanh(bn_out_1)
out_3 = relay.log(bn_out_1)
out = relay.Tuple([out_1, out_2, out_3])
func = relay.Function([data, bn_gamma, bn_beta, bn_mmean, bn_mvar], out)
return func
def expected():
mod = tvm.IRModule()
# function 0
f0_i0 = relay.var(target + "_0_i0", shape=(10, 10))
f0_i1 = relay.var(target + "_0_i1")
f0_i2 = relay.var(target + "_0_i2")
f0_i3 = relay.var(target + "_0_i3")
f0_i4 = relay.var(target + "_0_i4")
f0_n0 = relay.nn.batch_norm(f0_i0, f0_i1, f0_i2, f0_i3, f0_i4)
f0_n1 = f0_n0[1]
f0_n2 = relay.nn.relu(f0_n0[0])
f0_o0 = relay.Tuple([f0_n2, f0_n1])
func0 = relay.Function([f0_i0, f0_i1, f0_i2, f0_i3, f0_i4], f0_o0)
func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func0 = func0.with_attr("Compiler", target)
func0 = func0.with_attr("global_symbol", "tvmgen_default_" + target + "_main_0")
gv0 = relay.GlobalVar("tvmgen_default_" + target + "_main_0")
mod[gv0] = func0
mod = transform.InferType()(mod)
# body
data = relay.var("data", shape=(10, 10))
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
function_out = gv0(data, bn_gamma, bn_beta, bn_mmean, bn_mvar)
get_out0 = relay.TupleGetItem(function_out, 0)
get_out1 = relay.TupleGetItem(function_out, 1)
out_2 = relay.tanh(get_out1)
out_3 = relay.log(get_out1)
out = relay.Tuple([get_out0, out_2, out_3])
func = relay.Function([data, bn_gamma, bn_beta, bn_mmean, bn_mvar], out)
mod["main"] = func
mod = transform.InferType()(mod)
return mod
mod = tvm.IRModule()
mod["main"] = create_graph()
mod = transform.InferType()(mod)
seq = tvm.transform.Sequential(
[
transform.AnnotateTarget(target),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
ref_mod = expected()
partitioned = seq(mod)
assert tvm.ir.structural_equal(partitioned, ref_mod, map_free_vars=True)
def test_constant_tuples():
@tvm.ir.register_op_attr("qnn.concatenate", "target.const_tuples")
def add(expr): # pylint: disable=unused-variable
return True
def create_graph():
a = relay.var("a", shape=(10, 10), dtype="uint8")
b = relay.var("b", shape=(10, 10), dtype="uint8")
a1 = relay.abs(a)
zeroi = relay.const(1, "int32")
zerof = relay.const(0, "float32")
con = relay.qnn.op.concatenate(
(a1, b),
input_scales=(zerof, zerof),
input_zero_points=(zeroi, zeroi),
output_scale=zerof,
output_zero_point=zeroi,
axis=1,
)
f = relay.Function([a, b], con)
mod = tvm.IRModule.from_expr(f)
mod = transform.InferType()(mod)
return mod
seq = tvm.transform.Sequential(
[
transform.AnnotateTarget("const_tuples"),
transform.InferType(),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
partitioned = seq(create_graph())
concat = partitioned["tvmgen_default_const_tuples_main_0"].body
assert type(concat.args[1]) == relay.Tuple
assert type(concat.args[2]) == relay.Tuple
assert type(concat.args[3]) == relay.Constant
assert type(concat.args[4]) == relay.Constant
def test_flatten_tuple_output():
target = "test_flatten_tuple_output"
@tvm.ir.register_op_attr("split", "target." + target)
def split(expr): # pylint: disable=unused-variable
return True
@tvm.ir.register_op_attr("abs", "target." + target)
def abs(expr): # pylint: disable=unused-variable
return True
def create_graph():
a = relay.var("a", shape=(10, 10), dtype="uint8")
a_split = relay.split(a, 2)
a_split_0 = relay.TupleGetItem(a_split.astuple(), 0)
a_split_0_abs = relay.abs(a_split_0)
a_con = relay.concatenate(a_split, 0)
a_split_0_relu = relay.nn.relu(a_split_0_abs)
out = relay.Tuple((a_con, a_split_0_relu))
f = relay.Function([a], out)
mod = tvm.IRModule.from_expr(f)
mod = transform.InferType()(mod)
return mod
def expected():
mod = tvm.IRModule()
# function 0
f0_i0 = relay.var(target + "_0_i0", shape=(10, 10), dtype="uint8")
a_split = relay.split(f0_i0, 2)
a_split_0 = relay.TupleGetItem(a_split.astuple(), 0)
a_split_1 = relay.TupleGetItem(a_split.astuple(), 1)
a_split_abs_in = relay.TupleGetItem(a_split.astuple(), 0)
abs = relay.abs(a_split_abs_in)
tuple_out = relay.Tuple((a_split_0, a_split_1, abs))
func0 = relay.Function([f0_i0], tuple_out)
func0 = func0.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func0 = func0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func0 = func0.with_attr("Compiler", target)
func0 = func0.with_attr("global_symbol", "tvmgen_default_" + target + "_main_0")
gv0 = relay.GlobalVar("tvmgen_default_" + target + "_main_0")
mod[gv0] = func0
mod = transform.InferType()(mod)
# body
data = relay.var("a", shape=(10, 10), dtype="uint8")
f_out = gv0(data)
f_out_0 = relay.TupleGetItem(f_out, 0)
f_out_1 = relay.TupleGetItem(f_out, 1)
tuple = relay.Tuple((f_out_0, f_out_1))
concat = relay.concatenate(tuple, 0)
f_out_2 = relay.TupleGetItem(f_out, 2)
relu = relay.nn.relu(f_out_2)
ret_tuple = relay.Tuple((concat, relu))
mod["main"] = relay.Function([data], ret_tuple)
mod = transform.InferType()(mod)
return mod
seq = tvm.transform.Sequential(
[
transform.AnnotateTarget(target),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
partitioned = seq(create_graph())
partitioned = transform.InferType()(partitioned)
expected_mod = transform.InferType()(expected())
assert tvm.ir.structural_equal(partitioned, expected_mod, map_free_vars=True)
def test_tuple_output_exec():
"""Test C codegen and runtime for a subgraph with a tuple output"""
a = relay.var("a", shape=(10, 10), dtype="float32")
b = relay.var("b", shape=(10, 10), dtype="float32")
ba = relay.annotation.compiler_begin(a, "ccompiler")
bb = relay.annotation.compiler_begin(b, "ccompiler")
add = relay.add(ba, bb)
sub = relay.subtract(ba, bb)
out = relay.Tuple((add, sub))
eout = relay.annotation.compiler_end(out, "ccompiler")
func = relay.Function([a, b], eout)
mod = tvm.IRModule()
mod["main"] = func
mod = transform.InferType()(mod)
mod = transform.PartitionGraph()(mod)
a_data = np.random.rand(10, 10).astype("float32")
b_data = np.random.rand(10, 10).astype("float32")
check_result(
mod,
{"a": a_data, "b": b_data},
[(10, 10), (10, 10)],
[(a_data + b_data), (a_data - b_data)],
)
def test_extern_opt():
def Optimize(mod):
return relay.transform.FoldConstant()(mod)
tvm.register_func("relay.ext.test_target.optimize", Optimize)
x = relay.var("x", shape=(2, 2))
y0 = relay.var("y0", shape=(2, 2))
y1 = relay.var("y1", shape=(2, 2))
yy0 = relay.annotation.compiler_begin(y0, "test_target")
yy1 = relay.annotation.compiler_begin(y1, "test_target")
z = yy0 + yy1
end = relay.annotation.compiler_end(z, "test_target")
f = relay.Function([x, y0, y1], end * x)
c = np.ones(shape=(2, 2), dtype="float32")
f = bind_params_by_name(f, {"y0": tvm.nd.array(c), "y1": tvm.nd.array(c)})
mod = tvm.IRModule()
mod["main"] = f
mod = transform.InferType()(mod)
mod = transform.PartitionGraph()(mod)
try:
t0 = mod["tvmgen_default_test_target_main_0"]
except:
raise KeyError("test_target_main_0 not found")
assert isinstance(t0.body, relay.Constant)
expected = np.empty([2, 2])
expected.fill(2)
tvm.testing.assert_allclose(t0.body.data.numpy(), expected, rtol=1e-5, atol=1e-5)
def test_preserve_type_import():
"""Test to make sure type definition and imports are preserved during the BYOC pipeline."""
from tvm.relay.prelude import Prelude, StaticTensorArrayOps
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
write = p.get_global_var_static("tensor_array_write", dtype, shape)
gather = p.get_global_var_static("tensor_array_gather", dtype, shape)
v = relay.var("v")
indice = relay.var("indice")
init_tensor_array = tensor_array(relay.const(3))
tensor_array1 = write(init_tensor_array, relay.const(0), tensor(v))
tensor_array2 = write(tensor_array1, relay.const(1), tensor(v))
tensor_array3 = write(tensor_array2, relay.const(2), tensor(v))
out = gather(tensor_array3, indice)
mod["main"] = relay.Function([v, indice], out)
mod = transform.RemoveUnusedFunctions()(mod)
mod = transform.PartitionGraph()(mod)
run("float32", [2, 3])
def test_not_bind_constant():
def get_net(prefix, data, out_channel):
weight = relay.var(prefix + "weight")
bn_gamma = relay.var(prefix + "bn_gamma")
bn_beta = relay.var(prefix + "bn_beta")
bn_mmean = relay.var(prefix + "bn_mean")
bn_mvar = relay.var(prefix + "bn_var")
layer = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=out_channel, padding=(1, 1)
)
bn_output = relay.nn.batch_norm(layer, bn_gamma, bn_beta, bn_mmean, bn_mvar)
out = relay.nn.relu(bn_output[0])
return relay.Function(relay.analysis.free_vars(out), out)
def get_partitoned_mod(mod, params, pattern_table, bind_constants):
mod["main"] = bind_params_by_name(mod["main"], params)
remove_bn_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
composite_partition = tvm.transform.Sequential(
[
remove_bn_pass,
transform.MergeComposite(pattern_table),
transform.AnnotateTarget("dnnl"),
transform.PartitionGraph(bind_constants=bind_constants),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
return composite_partition(mod)
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
net = get_net("block_", data, 8)
mod, params = tvm.relay.testing.create_workload(net)
mod = get_partitoned_mod(mod, params, get_pattern_table("dnnl"), bind_constants=True)
len(mod["main"].body.args) == 1
mod = get_partitoned_mod(mod, params, get_pattern_table("dnnl"), bind_constants=False)
len(mod["main"].body.args) == 3
if __name__ == "__main__":
test_multi_node_compiler()
test_extern_ccompiler_single_op()
test_extern_ccompiler_default_ops()
test_extern_ccompiler_multiple_functions()
test_extern_ccompiler()
test_extern_dnnl()
test_extern_dnnl_mobilenet()
test_function_lifting()
test_function_lifting_inline()
test_constant_propagation()
test_multiple_outputs()
test_mixed_single_multiple_outputs()
test_dnnl_fuse()
test_multiple_use_of_an_output()
test_duplicate_outputs()
test_duplicate_merge_and_tuplegetitem()
test_constant_tuples()
test_flatten_tuple_output()
test_tuple_output_exec()
test_extern_opt()
test_not_bind_constant()
| 62,741 | 37.234004 | 98 | py |
tvm | tvm-main/tests/python/relay/test_ir_parser.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Union
import numpy as np
import pytest
import tvm
import tvm.relay.testing
import tvm.testing
from numpy import isclose
from tvm import relay
SEMVER = '#[version = "0.0.5"]\n'
BINARY_OPS = {
"*": relay.multiply,
"/": relay.divide,
"+": relay.add,
"-": relay.subtract,
"<": relay.less,
">": relay.greater,
"<=": relay.less_equal,
">=": relay.greater_equal,
"==": relay.equal,
"!=": relay.not_equal,
}
TYPES = {
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
"float16",
"float32",
"float64",
"bool",
"int8x4",
"uint1x4",
"float16x4",
}
LIST_DEFN = """
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
def assert_graph_equal(lhs, rhs):
tvm.ir.assert_structural_equal(lhs, rhs, map_free_vars=True)
def graph_equal(lhs, rhs):
return tvm.ir.structural_equal(lhs, rhs, map_free_vars=True)
def roundtrip_expr(expr):
text = expr.astext()
x = tvm.relay.parse_expr(text)
assert_graph_equal(x, expr)
# Testing Utilities for expressions.
def roundtrip(expr):
x = tvm.relay.fromtext(expr.astext())
assert_graph_equal(x, expr)
def parse_text(code):
expr = tvm.relay.parse_expr(code)
roundtrip_expr(expr)
return expr
def parses_as(code, expr):
# type: (str, relay.Expr) -> bool
parsed = parse_text(code)
result = graph_equal(parsed, expr)
return result
# Testing Utilities for full modules.
def parse_module(code):
mod = tvm.relay.parse(SEMVER + code)
roundtrip(mod)
return mod
def assert_parses_as(code, expr):
parsed = parse_text(code)
assert_graph_equal(parsed, expr)
def assert_parse_module_as(code, mod):
mod = tvm.relay.transform.InferType()(mod)
parsed = parse_module(code)
assert_graph_equal(parsed, mod)
def get_scalar(x):
# type: (relay.Constant) -> (Union[float, int, bool])
return x.data.numpy().item()
int32 = relay.scalar_type("int32")
_ = relay.Var("_")
X = relay.Var("x")
Y = relay.Var("y")
X_ANNO = relay.Var("x", int32)
Y_ANNO = relay.Var("y", int32)
UNIT = relay.Tuple([])
def test_comments():
assert_parses_as(
"""
// This is a line comment!
()
""",
UNIT,
)
assert_parses_as(
"""
/* This is a block comment!
This is still a block comment!
*/
()
""",
UNIT,
)
assert_parses_as(
"""
/* This is a block comment!
/*Block comment is recursive!*/
*/
()
""",
UNIT,
)
def test_int_literal():
assert isinstance(parse_text("1"), relay.Constant)
assert isinstance(parse_text("1").data, tvm.nd.NDArray)
assert get_scalar(parse_text("1")) == 1
assert get_scalar(parse_text("10")) == 10
assert get_scalar(parse_text("0")) == 0
assert get_scalar(parse_text("-100")) == -100
assert get_scalar(parse_text("-05")) == -5
assert get_scalar(parse_text("9223372036854775807")) == 9223372036854775807
assert get_scalar(parse_text("-42i")) == -42
assert get_scalar(parse_text("-42i16")) == -42
assert get_scalar(parse_text("-42i32")) == -42
assert get_scalar(parse_text("-42i64")) == -42
assert_parses_as("-42i16", relay.const(-42, "int16"))
assert_parses_as("-42i32", relay.const(-42, "int32"))
assert_parses_as("-42i", relay.const(-42, "int32"))
assert_parses_as("-42", relay.const(-42, "int32"))
assert_parses_as("-42i64", relay.const(-42, "int64"))
assert_parses_as("2147483647", relay.const(2147483647, "int32"))
assert_parses_as("2147483648", relay.const(2147483648, "int64"))
with pytest.raises(tvm.error.DiagnosticError):
# Unrepresentable
parse_text("2147483648i32")
with pytest.raises(tvm.error.DiagnosticError):
# Unrepresentable
parse_text("32768i16")
def test_float_literal():
assert get_scalar(parse_text("1.0f")) == 1.0
assert isclose(get_scalar(parse_text("1.56667f")), 1.56667)
assert get_scalar(parse_text("0.0f")) == 0.0
assert get_scalar(parse_text("-10.0f")) == -10.0
# scientific notation
assert isclose(get_scalar(parse_text("1e-1f")), 1e-1)
assert get_scalar(parse_text("1e+1f")) == 1e1
assert isclose(get_scalar(parse_text("1E-1f")), 1e-1)
assert get_scalar(parse_text("1E+1f")) == 1e1
assert isclose(get_scalar(parse_text("1.0e-1f")), 1.0e-1)
assert get_scalar(parse_text("1.0e+1f")) == 1.0e1
assert isclose(get_scalar(parse_text("1.0E-1f")), 1.0e-1)
assert get_scalar(parse_text("1.0E+1f")) == 1.0e1
assert get_scalar(parse_text("3f16")) == 3.0
assert get_scalar(parse_text("3f32")) == 3.0
assert_parses_as("3f16", relay.const(3.0, "float16"))
assert_parses_as("3f32", relay.const(3.0, "float32"))
assert_parses_as("3f", relay.const(3.0, "float32"))
assert_parses_as("3f64", relay.const(3.0, "float64"))
with pytest.raises(tvm.error.DiagnosticError):
# Unrepresentable
parse_text("3.40283e+38f32")
with pytest.raises(tvm.error.DiagnosticError):
# Unrepresentable
parse_text("65505f16")
def test_bool_literal():
assert get_scalar(parse_text("True")) == True
assert get_scalar(parse_text("False")) == False
assert_parses_as("True", relay.const(True, "bool"))
def test_negative():
# need to handle parsing non-literal operations
# assert isinstance(parse_text("let %x = 1; -%x").body, relay.Call)
assert get_scalar(parse_text("--10")) == 10
assert get_scalar(parse_text("---10")) == -10
def test_bin_op():
for bin_op in BINARY_OPS.keys():
assert_parses_as(
"1 {} 1".format(bin_op), BINARY_OPS.get(bin_op)(relay.const(1), relay.const(1))
)
def test_parens():
assert graph_equal(parse_text("1 * 1 + 1"), parse_text("(1 * 1) + 1"))
assert not graph_equal(parse_text("1 * 1 + 1"), parse_text("1 * (1 + 1)"))
def test_op_assoc():
assert graph_equal(parse_text("1 * 1 + 1 < 1 == 1"), parse_text("(((1 * 1) + 1) < 1) == 1"))
assert graph_equal(parse_text("1 == 1 < 1 + 1 * 1"), parse_text("1 == (1 < (1 + (1 * 1)))"))
def test_vars():
# var
var = parse_text("let %foo = (); %foo")
assert isinstance(var.body, relay.Var)
assert var.body.name_hint == "foo"
# global var
global_var = parse_text("@foo")
assert isinstance(global_var, relay.GlobalVar)
assert global_var.name_hint == "foo"
# operator id
op = parse_text("add")
assert isinstance(op, tvm.ir.Op)
assert op.name == "add"
# operator id with prefix
op = parse_text("nn.global_avg_pool2d")
assert isinstance(op, tvm.ir.Op)
assert op.name == "nn.global_avg_pool2d"
def test_meta_ref():
with pytest.raises(tvm.error.DiagnosticError):
meta_op = parse_text("meta[type_key][1337]")
assert meta_op.attrs.node_type_key == "type_key"
assert meta_op.attrs.node_index == 1337
def test_let():
assert_parses_as("let %x = 1; ()", relay.Let(X, relay.const(1), UNIT))
assert_parses_as(
"""
let %x = 1;
let %y = 2;
()
""",
relay.Let(X, relay.const(1), relay.Let(Y, relay.const(2), UNIT)),
)
def test_seq():
assert_parses_as("(); ()", relay.Let(_, UNIT, UNIT))
assert_parses_as("let %_ = 1; ()", relay.Let(X, relay.const(1), UNIT))
def test_graph():
code = "%0 = (); %1 = 1; (%0, %0, %1)"
assert_parses_as(code, relay.Tuple([UNIT, UNIT, relay.const(1)]))
def test_graph_single():
assert_parses_as("%1 = (); %1", relay.Tuple([]))
def test_let_global_var():
with pytest.raises(tvm.error.DiagnosticError):
parse_text("let @x = 1; ()")
def test_let_op():
with pytest.raises(tvm.error.DiagnosticError):
parse_text("let x = 1; ()")
def test_tuple():
assert_parses_as("()", relay.Tuple([]))
assert_parses_as("(0,)", relay.Tuple([relay.const(0)]))
assert_parses_as("(0, 1)", relay.Tuple([relay.const(0), relay.const(1)]))
assert_parses_as("(0, 1, 2)", relay.Tuple([relay.const(0), relay.const(1), relay.const(2)]))
def test_tuple_proj():
x = relay.var("x", shape=())
assert_parses_as(
"free_var %x: float32; %x((%x,).0, %x)",
relay.Call(x, [relay.TupleGetItem(relay.Tuple([x]), 0), x]),
)
def test_func():
# 0 args
assert_parses_as("fn () { 0 }", relay.Function([], relay.const(0), None, []))
# 1 arg
assert_parses_as("fn (%x) { %x }", relay.Function([X], X, None, []))
# 2 args
assert_parses_as("fn (%x, %y) { %x + %y }", relay.Function([X, Y], relay.add(X, Y), None, []))
# annotations
assert_parses_as("fn (%x: int32) -> int32 { %x }", relay.Function([X_ANNO], X_ANNO, int32, []))
# Refactor the attribute syntax and printing.
#
# # attributes
# assert_parses_as(
# "fn (n=5) { () }",
# relay.Function([], UNIT, None, None, tvm.ir.make_node("DictAttrs", n=relay.const(5)))
# )
# TODO(@jmp): Crashes if %x isn't annnotated.
def test_defn():
id_defn = parse_module(
"""
def @id(%x: int32) -> int32 {
%x
}
"""
)
assert isinstance(id_defn, tvm.IRModule)
def test_recursive_call():
id_defn = parse_module(
"""
def @id(%x: int32) -> int32 {
@id(%x)
}
"""
)
assert isinstance(id_defn, tvm.IRModule)
def test_ifelse():
assert_parses_as(
"""
if (True) {
0
} else {
1
}
""",
relay.If(relay.const(True), relay.const(0), relay.const(1)),
)
def test_ifelse_scope():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
if (True) {
let %x = ();
()
} else {
%x
}
"""
)
def test_ref():
program = """
#[version = "0.0.5"]
def @main(%x: float32) {
%0 = ref(%x);
ref_write(%0, 1f);
ref_read(%0)
}
"""
tvm.relay.parse(program)
def test_call():
# select right function to call: simple ident case
id_func = relay.Var("id")
assert_parses_as(
"""
let %id = fn (%x) { %x };
10 * %id(10)
""",
relay.Let(
id_func,
relay.Function([X], X, None, []),
relay.multiply(relay.const(10), relay.Call(id_func, [relay.const(10)])),
),
)
# 0 args
constant = relay.Var("constant")
assert_parses_as(
"""
let %constant = fn () { 0 };
%constant()
""",
relay.Let(
constant,
relay.Function([], relay.const(0), None, []),
relay.Call(constant, [], None, None),
),
)
# 1 arg
id_var = relay.Var("id")
assert_parses_as(
"""
let %id = fn (%x) { %x };
%id(1)
""",
relay.Let(
id_var,
relay.Function([X], X, None, []),
relay.Call(id_var, [relay.const(1)], None, None),
),
)
# 2 args
multiply = relay.Var("multiply")
assert_parses_as(
"""
let %multiply = fn (%x, %y) { %x * %y };
%multiply(0, 0)
""",
relay.Let(
multiply,
relay.Function([X, Y], relay.multiply(X, Y), None, []),
relay.Call(multiply, [relay.const(0), relay.const(0)], None, None),
),
)
# anonymous function
assert_parses_as(
"""
(fn (%x) { %x })(0)
""",
relay.Call(relay.Function([X], X, None, []), [relay.const(0)], None, None),
)
# curried function
curried_mult = relay.Var("curried_mult")
assert_parses_as(
"""
let %curried_mult =
fn (%x) {
fn (%y) {
%x * %y
}
};
%curried_mult(0);
%curried_mult(0)(0)
""",
relay.Let(
curried_mult,
relay.Function([X], relay.Function([Y], relay.multiply(X, Y), None, []), None, []),
relay.Let(
_,
relay.Call(curried_mult, [relay.const(0)], None, None),
relay.Call(
relay.Call(curried_mult, [relay.const(0)], None, None),
[relay.const(0)],
None,
None,
),
),
),
)
# op
assert_parses_as("abs(1)", relay.Call(relay.op.get("abs"), [relay.const(1)], None, None))
# Types
def test_incomplete_type():
assert_parses_as("let %_ : _ = (); ()", relay.Let(_, UNIT, UNIT))
def test_builtin_types():
for builtin_type in TYPES:
parse_text("let %_ : {} = (); ()".format(builtin_type))
def test_tensor_type():
assert_parses_as(
"let %_ : Tensor[(), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((1,), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(1, 1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((1, 1), "float32")), UNIT, UNIT),
)
assert_parses_as(
"let %_ : Tensor[(?, 1), float32] = (); ()",
relay.Let(relay.Var("_", relay.TensorType((tvm.tir.Any(), 1), "float32")), UNIT, UNIT),
)
def test_function_type():
assert_parses_as(
"""
let %_: fn () -> int32 = fn () -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([], int32, [], [])),
relay.Function([], relay.const(0), int32, []),
UNIT,
),
)
assert_parses_as(
"""
let %_: fn (int32) -> int32 = fn (%x: int32) -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([int32], int32, [], [])),
relay.Function([relay.Var("x", int32)], relay.const(0), int32, []),
UNIT,
),
)
assert_parses_as(
"""
let %_: fn (int32, int32) -> int32 = fn (%x: int32, %y: int32) -> int32 { 0 }; ()
""",
relay.Let(
relay.Var("_", relay.FuncType([int32, int32], int32, [], [])),
relay.Function(
[relay.Var("x", int32), relay.Var("y", int32)], relay.const(0), int32, []
),
UNIT,
),
)
def test_tuple_type():
assert_parses_as(
"""
let %_: () = (); ()
""",
relay.Let(relay.Var("_", relay.TupleType([])), UNIT, UNIT),
)
assert_parses_as(
"""
let %_: (int32,) = (0,); ()
""",
relay.Let(relay.Var("_", relay.TupleType([int32])), relay.Tuple([relay.const(0)]), UNIT),
)
assert_parses_as(
"""
let %_: (int32, int32) = (0, 1); ()
""",
relay.Let(
relay.Var("_", relay.TupleType([int32, int32])),
relay.Tuple([relay.const(0), relay.const(1)]),
UNIT,
),
)
def test_adt_defn():
mod = tvm.IRModule()
glob_typ_var = relay.GlobalTypeVar("Ayy")
prog = relay.TypeData(glob_typ_var, [], [relay.Constructor("Nil", [], glob_typ_var)])
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Ayy { Nil }
""",
mod,
)
def test_adt_any():
code = """
type my_dtype {
my_cons(Tensor[(?, 1), uint16]),
}
"""
mod = parse_module(code)
items = mod.type_definitions.items()
global_type_var, type_data = items[0]
assert global_type_var.name_hint == "my_dtype"
ctors = type_data.constructors
assert len(ctors) == 1
my_cons = ctors[0]
assert my_cons.name_hint == "my_cons"
ty_shape = my_cons.inputs[0].shape
assert isinstance(ty_shape[0], tvm.tir.Any)
assert ty_shape[1] == 1
def test_empty_adt_defn():
mod = tvm.IRModule()
glob_typ_var = relay.GlobalTypeVar("Ayy")
prog = relay.TypeData(glob_typ_var, [], [])
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Ayy { }
""",
mod,
)
def test_multiple_cons_defn():
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
prog = relay.TypeData(
list_var,
[typ_var],
[
relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var),
relay.Constructor("Nil", [], list_var),
],
)
mod[list_var] = prog
assert_parse_module_as(LIST_DEFN, mod)
def test_multiple_type_param_defn():
glob_typ_var = relay.GlobalTypeVar("Either")
typ_var_a = relay.TypeVar("A")
typ_var_b = relay.TypeVar("B")
prog = relay.TypeData(
glob_typ_var,
[typ_var_a, typ_var_b],
[
relay.Constructor("Left", [typ_var_a], glob_typ_var),
relay.Constructor("Right", [typ_var_b], glob_typ_var),
],
)
mod = tvm.IRModule()
mod[glob_typ_var] = prog
assert_parse_module_as(
"""
type Either[A, B] {
Left(A),
Right(B),
}
""",
mod,
)
def test_match():
# pair each match keyword with whether it specifies a complete match or not
match_keywords = [("match", True), ("match?", False)]
for (match_keyword, is_complete) in match_keywords:
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
cons_constructor = relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var)
nil_constructor = relay.Constructor("Nil", [], list_var)
list_def = relay.TypeData(list_var, [typ_var], [cons_constructor, nil_constructor])
mod[list_var] = list_def
length_var = relay.GlobalVar("length")
typ_var = relay.TypeVar("A")
input_type = list_var(typ_var)
input_var = relay.Var("xs", input_type)
rest_var = relay.Var("rest")
cons_case = relay.Let(
relay.var("", type_annotation=None),
UNIT,
relay.add(relay.const(1), relay.Call(length_var, [rest_var])),
)
body = relay.Match(
input_var,
[
relay.Clause(
relay.PatternConstructor(
cons_constructor, [relay.PatternWildcard(), relay.PatternVar(rest_var)]
),
cons_case,
),
relay.Clause(relay.PatternConstructor(nil_constructor, []), relay.const(0)),
],
complete=is_complete,
)
length_func = relay.Function([input_var], body, int32, [typ_var])
mod[length_var] = length_func
assert_parse_module_as(
"""
%s
def @length[A](%%xs: List[A]) -> int32 {
%s (%%xs) {
Cons(_, %%rest : List[A]) => {
();
1 + @length(%%rest)
},
Nil => 0,
}
}
"""
% (LIST_DEFN, match_keyword),
mod,
)
def test_adt_cons_expr():
mod = tvm.IRModule()
list_var = relay.GlobalTypeVar("List")
typ_var = relay.TypeVar("A")
cons_constructor = relay.Constructor("Cons", [typ_var, list_var(typ_var)], list_var)
nil_constructor = relay.Constructor("Nil", [], list_var)
list_def = relay.TypeData(list_var, [typ_var], [cons_constructor, nil_constructor])
mod[list_var] = list_def
make_singleton_var = relay.GlobalVar("make_singleton")
input_var = relay.Var("x", int32)
make_singleton_func = relay.Function(
[input_var], cons_constructor(input_var, nil_constructor()), list_var(int32)
)
mod[make_singleton_var] = make_singleton_func
assert_parse_module_as(
"""
%s
def @make_singleton(%%x: int32) -> List[int32] {
Cons(%%x, Nil)
}
"""
% LIST_DEFN,
mod,
)
def test_duplicate_adt_defn():
with pytest.raises(tvm.error.DiagnosticError):
parse_module(
"""
%s
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
% LIST_DEFN
)
def test_duplicate_adt_cons():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
type Ayy { Lmao }
type Haha { Lmao }
"""
)
def test_duplicate_adt_cons_defn():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
type Ayy { Lmao }
type Lmao { Ayy }
"""
)
def test_duplicate_global_var():
with pytest.raises(tvm.error.DiagnosticError):
parse_text(
"""
def @id[A](%x: A) -> A { x }
def @id[A](%x: A) -> A { x }
"""
)
def test_extern_adt_defn():
mod = tvm.IRModule()
extern_var = relay.GlobalTypeVar("T")
typ_var = relay.TypeVar("A")
extern_def = relay.TypeData(extern_var, [typ_var], [])
mod[extern_var] = extern_def
assert_parse_module_as(
"""
extern type T[A]
""",
mod,
)
def test_import_grad():
mod = tvm.IRModule()
mod.import_from_std("gradient.rly")
def test_mlp():
mod, _ = relay.testing.mlp.get_workload(1)
text = mod.astext()
parsed_mod = tvm.relay.parse(text)
tvm.ir.assert_structural_equal(mod, parsed_mod)
def inline_params(mod, params):
main_fn = mod["main"]
str_to_var = {}
for param in main_fn.params:
str_to_var[param.name_hint] = param
bind_map = {}
for param in params:
bind_map[str_to_var[param]] = relay.const(params[param])
body = relay.bind(main_fn.body, bind_map)
main_fn = relay.Function(relay.analysis.free_vars(body), body)
mod._add("main", main_fn, True)
return mod
def test_mlp_inlined_params():
mod, params = relay.testing.mlp.get_workload(1)
mod = inline_params(mod, params)
mod = relay.transform.InferType()(mod)
text = mod.astext()
parsed_mod = tvm.relay.parse(text)
tvm.ir.assert_structural_equal(mod, parsed_mod)
def test_tuple_return_value():
program = """
type Box[T] {
constructor(T)
}
def @example() {
%0 = ();
%1 = constructor(%0);
%2 = constructor(0f);
(%1, %2,)
}
"""
parse_module(program)
def test_parse_if_in_binding():
program = """
def @example(%b: bool) {
%0 = if (%b) {
1
} else {
0
};
%0
}
"""
parse_module(program)
def test_op_string_attr():
call = parse_text(
"""
free_var %x: Tensor[(1, 32, 32, 3), float32];
free_var %y: Tensor[(1, 1, 3, 3), float32];
nn.conv2d(%x, %y, data_layout="NHWC", kernel_layout="HWIO")
"""
)
assert isinstance(call.op, tvm.ir.Op)
assert call.op.name == "nn.conv2d"
assert call.attrs.data_layout == "NHWC"
assert call.attrs.kernel_layout == "HWIO"
def test_load_prelude():
mod = tvm.IRModule()
mod.import_from_std("prelude.rly")
tvm.relay.parse(mod.astext())
def test_call_attrs():
def get_func(shape, dtype):
x0 = relay.var("data", shape=shape, dtype=dtype)
w0 = relay.var("weight", shape=shape, dtype=dtype)
a = relay.nn.dense(x0, w0)
b = relay.nn.relu(a)
d = relay.add(b, relay.const(1.0, dtype=dtype))
return relay.Function([x0, w0], d)
# build relay graph
shape = (2, 4)
dtype = "float32"
sub_func = get_func(shape, dtype)
p0 = relay.var("p0", shape=shape, dtype=dtype)
p1 = relay.var("p1", shape=shape, dtype=dtype)
attr = tvm.ir.make_node("attrs.TestAttrs", name="func_call_attrs")
call = relay.Call(sub_func, [p0, p1], attrs=attr)
func = relay.Function([p0, p1], call)
# build relay module
mod = tvm.IRModule()
mod["main"] = func
mod = tvm.relay.transform.InferType()(mod)
# assert equal
program = """
def @main(%p0: Tensor[(2, 4), float32], %p1: Tensor[(2, 4), float32]) {
%2 = fn (%data: Tensor[(2, 4), float32], %weight: Tensor[(2, 4), float32]) {
%0 = nn.dense(%data, %weight, units=None);
%1 = nn.relu(%0);
add(%1, 1f)
};
%2(%p0, %p1, name="func_call_attrs", attrs_type_key="attrs.TestAttrs")
}
"""
parsed = parse_module(program)
assert_graph_equal(parsed, mod)
def test_tokenize_inf():
x = relay.var("x", shape=(3, 4), dtype="float32")
y = relay.clip(x, -np.inf, np.inf)
f = relay.Function([x], y)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.AnnotateSpans()(mod)
def test_func_attrs():
attrs = tvm.ir.make_node("DictAttrs", **{"Primitive": 1, "relay.reshape_only": 1})
x = relay.var("x", shape=(2, 3))
func = relay.Function([x], relay.reshape(x, (-1,)), attrs=attrs)
assert_parses_as(func.astext(), func)
def test_init_module_and_metatable():
init_metatable = {"relay.Constant": [relay.const(np.random.rand(2, 3), dtype="float32")]}
init_module = tvm.relay.fromtext(
SEMVER
+ """
def @f(%y : Tensor[(2, 3), float32]) -> Tensor[(2, 3), float32] {
negative(%y)
}
""",
)
mod = tvm.relay.parse(
SEMVER
+ """
def @main(%x: Tensor[(2, 3), float32]) {
add(@f(%x), meta[relay.Constant][0])
}
""",
"from_string",
init_module,
init_metatable,
)
roundtrip(mod)
if __name__ == "__main__":
tvm.testing.main()
| 26,705 | 24.852856 | 99 | py |
tvm | tvm-main/tests/python/relay/test_cmp_op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm import relay
a = relay.Var("a")
b = relay.expr.const(1.0, dtype="float32")
c = a < b
d = relay.less(a, b)
assert c.astext() == d.astext()
c = a > b
d = relay.greater(a, b)
assert c.astext() == d.astext()
c = a >= b
d = relay.greater_equal(a, b)
assert c.astext() == d.astext()
c = a <= b
d = relay.less_equal(a, b)
assert c.astext() == d.astext()
| 1,147 | 29.210526 | 62 | py |
tvm | tvm-main/tests/python/relay/test_pass_eliminate_common_subexpr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test eliminate common subexpr pass"""
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm.relay.op import register_alter_op_layout
from tvm.relay import transform, analysis
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_simple():
def before():
x = relay.var("x", shape=(1, 16))
y1 = relay.nn.relu(x)
y2 = relay.nn.relu(x)
y1 = relay.add(y1, relay.const(1.0, "float32"))
y2 = relay.add(y2, relay.const(1.0, "float32"))
y = relay.add(y1, y2)
f = relay.Function([x], y)
return f
def expected():
x = relay.var("x", shape=(1, 16))
y = relay.nn.relu(x)
y = relay.add(y, relay.const(1.0, "float32"))
y = relay.add(y, y)
f = relay.Function([x], y)
return run_opt_pass(f, transform.InferType())
z = before()
z = run_opt_pass(z, transform.EliminateCommonSubexpr())
assert tvm.ir.structural_equal(z, expected())
def test_callback():
def before():
x = relay.var("x", shape=(1, 16))
y1 = relay.nn.relu(x)
y2 = relay.nn.relu(x)
y1 = relay.add(y1, relay.const(1.0, "float32"))
y2 = relay.add(y2, relay.const(1.0, "float32"))
y = relay.add(y1, y2)
f = relay.Function([x], y)
return f
def expected():
x = relay.var("x", shape=(1, 16))
y = relay.nn.relu(x)
y1 = relay.add(y, relay.const(1.0, "float32"))
y2 = relay.add(y, relay.const(1.0, "float32"))
y = relay.add(y1, y2)
f = relay.Function([x], y)
return run_opt_pass(f, transform.InferType())
def fskip(expr):
if isinstance(expr, relay.expr.Call) and expr.op.name == "add":
return True
return False
z = before()
z = run_opt_pass(z, transform.EliminateCommonSubexpr(fskip))
assert tvm.ir.structural_equal(z, expected())
def test_tuple_get_time():
def before():
x = relay.var("x", shape=(1, 16, 1, 1))
var = relay.var("var", shape=(16,))
mean = relay.var("mean", shape=(16,))
beta = relay.var("beta", shape=(16,))
gamma = relay.var("gamma", shape=(16,))
BN = relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)
T1 = BN[0]
T2 = BN[0]
add = T1 + T2
f = relay.Function([x, var, mean, beta, gamma], add)
return f
def expected():
x = relay.var("x", shape=(1, 16, 1, 1))
var = relay.var("var", shape=(16,))
mean = relay.var("mean", shape=(16,))
beta = relay.var("beta", shape=(16,))
gamma = relay.var("gamma", shape=(16,))
BN = relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon=1e-5)
T1 = BN[0]
add = T1 + T1
f = relay.Function([x, var, mean, beta, gamma], add)
return run_opt_pass(f, transform.InferType())
z = before()
z = run_opt_pass(z, transform.EliminateCommonSubexpr())
assert tvm.ir.structural_equal(z, expected())
def test_tuple_arg():
def before():
x = relay.var("x", shape=(1, 16))
y1 = relay.nn.relu(x)
y2 = relay.nn.relu(x)
y1 = relay.add(y1, relay.const(1.0, "float32"))
y2 = relay.add(y2, relay.const(1.0, "float32"))
c0 = relay.const(np.ones((1, 16)), "float32")
y1 = relay.concatenate([y1, c0], axis=0)
y2 = relay.concatenate([y2, c0], axis=0)
y = relay.add(y1, y2)
f = relay.Function([x], y)
return f
def expected():
x = relay.var("x", shape=(1, 16))
y = relay.nn.relu(x)
y = relay.add(y, relay.const(1.0, "float32"))
c0 = relay.const(np.ones((1, 16)), "float32")
y = relay.concatenate([y, c0], axis=0)
y = relay.add(y, y)
f = relay.Function([x], y)
return run_opt_pass(f, transform.InferType())
z = before()
z = run_opt_pass(z, transform.EliminateCommonSubexpr())
assert tvm.ir.structural_equal(z, expected())
if __name__ == "__main__":
tvm.testing.main()
| 5,032 | 32.331126 | 76 | py |
tvm | tvm-main/tests/python/relay/test_pass_simplify_inference.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.ir import IRModule, structural_equal
from tvm import relay as rly
from tvm.relay.transform import SimplifyInference, InferType
def test_simplify_batchnorm(dtype="float32"):
def simple_bn(x, gamma, beta, moving_mean, moving_var, axis=1, epsilon=1e-5, shape=None):
# expect = (x - moving_mean) / sqrt(moving_var + eps) * gamma + beta
scale = rly.multiply(
rly.const(1, dtype) / rly.sqrt(moving_var + rly.const(epsilon, dtype)), gamma
)
shift = rly.add(rly.multiply(rly.negative(moving_mean), scale), beta)
num_newaxis = len(shape) - (axis + 1)
if num_newaxis:
scale = rly.expand_dims(scale, axis=1, num_newaxis=num_newaxis)
shift = rly.expand_dims(shift, axis=1, num_newaxis=num_newaxis)
return x * scale + shift
def check(dim, axis, nstep):
eps = 0.01
ttype1 = rly.TensorType(tuple(10 for i in range(dim)), dtype)
ttype2 = rly.TensorType((10,), dtype)
x = rly.var("x", ttype1)
beta = rly.var("beta", ttype2)
gamma = rly.var("gamma", ttype2)
moving_var = rly.var("moving_var", ttype2)
moving_mean = rly.var("moving_mean", ttype2)
y1, y2 = x, x
for _ in range(nstep):
y1, _, _ = rly.nn.batch_norm(
y1 + rly.const(1, dtype),
gamma,
beta,
moving_mean,
moving_var,
epsilon=eps,
axis=axis,
)
y1 = rly.nn.dropout(y1)
y2 = simple_bn(
y2 + rly.const(1, dtype),
gamma,
beta,
moving_mean,
moving_var,
epsilon=eps,
axis=axis,
shape=ttype1.shape,
)
mod = IRModule.from_expr(y1)
simplify = SimplifyInference()
mod = InferType()(mod)
mod = simplify(mod)
y1 = mod["main"].body
assert structural_equal(y1, y2, map_free_vars=True)
check(2, 1, 1)
check(4, 1, 1)
check(4, 0, 3)
if __name__ == "__main__":
test_simplify_batchnorm(dtype="float32")
test_simplify_batchnorm(dtype="float16")
| 3,018 | 34.517647 | 93 | py |
tvm | tvm-main/tests/python/relay/test_autotvm_task_extraction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test task extraction for autotvm"""
import tvm.relay.testing
from tvm import relay
from tvm import autotvm
def get_network(name, batch_size):
"""Get the symbol definition and random weight of a network"""
input_shape = (batch_size, 3, 224, 224)
if name == "resnet-18":
mod, params = relay.testing.resnet.get_workload(num_layers=18, batch_size=batch_size)
elif name == "resnet3d-18":
mod, params = relay.testing.resnet_3d.get_workload(num_layers=18, batch_size=batch_size)
elif name == "mobilenet":
mod, params = relay.testing.mobilenet.get_workload(batch_size=batch_size)
elif name == "dcgan":
mod, params = relay.testing.dcgan.get_workload(batch_size=batch_size)
input_shape = (batch_size, 100)
else:
raise ValueError("Unsupported network: " + name)
return mod, params, input_shape
def test_task_extraction():
target = "llvm"
mod_list = []
params_list = []
conv2d = relay.op.get("nn.conv2d")
conv3d = relay.op.get("nn.conv3d")
conv2d_transpose = relay.op.get("nn.conv2d_transpose")
dense = relay.op.get("nn.dense")
mod, params, _ = get_network("resnet-18", batch_size=1)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(conv2d,)
)
assert len(tasks) == 12
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(conv2d,))
assert len(tasks) == 12
mod, params, _ = get_network("resnet-18", batch_size=1)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(dense,)
)
assert len(tasks) == 2
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(dense,))
assert len(tasks) == 2
mod, params, _ = get_network("resnet-18", batch_size=1)
mod_list.append(mod)
params_list.append(params)
tasks = autotvm.task.extract_from_program(
mod["main"], target=target, params=params, ops=(conv2d, dense)
)
assert len(tasks) == 14
tasks = autotvm.task.extract_from_program(
mod, target=target, params=params, ops=(conv2d, dense)
)
assert len(tasks) == 14
tasks = autotvm.task.extract_from_program(mod, target=target, params=params)
assert len(tasks) == 14
mod, params, _ = get_network("resnet3d-18", batch_size=1)
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(conv3d,))
assert len(tasks) == 12
mod, params, _ = get_network("mobilenet", batch_size=1)
mod_list.append(mod)
params_list.append(params)
tasks = autotvm.task.extract_from_program(
mod, target=target, params=params, ops=(conv2d, dense)
)
assert len(tasks) == 21
mod, params, _ = get_network("dcgan", batch_size=1)
tasks = autotvm.task.extract_from_program(
mod, target=target, params=params, ops=(conv2d_transpose,)
)
assert len(tasks) == 4
tasks = autotvm.task.extract_from_multiple_program(
mod_list, params_list, target=target, ops=(conv2d,)
)
assert len(tasks) == 31
def test_task_extraction_for_dense_int8_cuda():
target = "cuda"
dense = relay.op.get("nn.dense")
def get_net(batch, in_dim, out_dim, dtype, out_dtype):
data = tvm.relay.var("data", shape=[batch, in_dim], dtype=dtype)
weight = tvm.relay.var("weight", shape=[out_dim, in_dim], dtype=dtype)
out = relay.nn.dense(data, weight, out_dtype=out_dtype)
mod, params = relay.testing.create_workload(out)
return mod, params
mod, params = get_net(1, 16, 32, "float32", "float32")
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(dense,))
assert len(tasks) == 1 and tasks[0].name == "dense_small_batch.gpu"
mod, params = get_net(1, 16, 32, "int8", "int32")
tasks = autotvm.task.extract_from_program(mod, target=target, params=params, ops=(dense,))
assert len(tasks) == 1 and tasks[0].name == "dense_int8.cuda"
if __name__ == "__main__":
test_task_extraction()
test_task_extraction_for_dense_int8_cuda()
| 4,914 | 37.398438 | 96 | py |
tvm | tvm-main/tests/python/relay/test_prng.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.relay
import tvm.testing
from tvm.relay.testing import run_infer_type
@tvm.testing.parametrize_targets
def test_threefry_repeatability(target, dev):
key1 = tvm.relay.random.threefry_key(1)
rand1 = tvm.relay.random.threefry_generate(key1, (12,))
out_key1, out1 = tvm.relay.create_executor(
"vm", tvm.IRModule.from_expr(tvm.relay.Function([], rand1)), target=target, device=dev
).evaluate()()
key2 = tvm.relay.random.threefry_key(1)
rand2 = tvm.relay.random.threefry_generate(key2, (12,))
out_key2, out2 = tvm.relay.create_executor(
"vm", tvm.IRModule.from_expr(tvm.relay.Function([], rand2)), target=target, device=dev
).evaluate()()
assert (
out1.numpy() == out2.numpy()
).all(), "Generate on same seed should have the same output random numbers"
assert (
out_key1.numpy() == out_key2.numpy()
).all(), "Generate on same seed should have the same next keys"
@tvm.testing.parametrize_targets
def test_threefry_split(target, dev):
key = tvm.relay.random.threefry_key(1)
left, right = tvm.relay.TupleWrapper(tvm.relay.random.threefry_split(key), 2)
_, rand1 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(left, (16,)), 2)
_, rand2 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(right, (16,)), 2)
out1, out2 = tvm.relay.create_executor(
"vm",
tvm.IRModule.from_expr(tvm.relay.Function([], tvm.relay.Tuple((rand1, rand2)))),
target=target,
device=dev,
).evaluate()()
assert (
out1.numpy() != out2.numpy()
).any(), "Generate after split should not have the same output"
@tvm.testing.parametrize_targets
def test_threefry_sequential_generate(target, dev):
key = tvm.relay.random.threefry_key(1)
key, rand1 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(key, (4,)), 2)
_, rand2 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(key, (4,)), 2)
out1, out2 = tvm.relay.create_executor(
"vm",
tvm.IRModule.from_expr(tvm.relay.Function([], tvm.relay.Tuple((rand1, rand2)))),
target=target,
device=dev,
).evaluate()()
assert (
out1.numpy() != out2.numpy()
).any(), "Sequential generates should not have the same output"
@tvm.testing.parametrize_targets
def test_threefry_sequential_generate_remaining(target, dev):
key = tvm.relay.random.threefry_key(1)
key, rand1 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(key, (7,)), 2)
_, rand2 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(key, (7,)), 2)
out1, out2 = tvm.relay.create_executor(
"vm",
tvm.IRModule.from_expr(tvm.relay.Function([], tvm.relay.Tuple((rand1, rand2)))),
target=target,
device=dev,
).evaluate()()
assert (
out1.numpy()[-3:] != out2.numpy()[-3:]
).any(), "Sequential generates should not have the same output"
def test_threefry_generate_infer():
oshape = (12,)
key_type = tvm.relay.TensorType([10], dtype="uint64")
gen_type = tvm.relay.TensorType(oshape, dtype="uint64")
expected_type = tvm.relay.TupleType([key_type, gen_type])
key = tvm.relay.random.threefry_key(1)
rand1 = tvm.relay.random.threefry_generate(key, oshape)
f = tvm.relay.Function([], rand1)
f = run_infer_type(f)
assert tvm.ir.structural_equal(f.ret_type, expected_type)
def test_threefry_split_infer():
key_type = tvm.relay.TensorType([10], dtype="uint64")
expected_type = tvm.relay.TupleType([key_type, key_type])
key = tvm.relay.random.threefry_key(1)
out_keys = tvm.relay.random.threefry_split(key)
f = tvm.relay.Function([], out_keys)
f = run_infer_type(f)
assert tvm.ir.structural_equal(f.ret_type, expected_type)
def test_uniform_infer():
oshape = (12,)
odtypes = ["float32", "float64"]
for odtype in odtypes:
key_type = tvm.relay.TensorType([10], dtype="uint64")
gen_type = tvm.relay.TensorType(oshape, dtype=odtype)
expected_type = tvm.relay.TupleType([key_type, gen_type])
key = tvm.relay.random.threefry_key(1)
rand1 = tvm.relay.random.uniform(key, oshape, odtype)
f = tvm.relay.Function([], rand1)
f = run_infer_type(f)
assert tvm.ir.structural_equal(f.ret_type, expected_type)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_threefry_generate_infer_fail():
# xfail: key size should be 10
fake_key = tvm.relay.const([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype="uint64")
rand1 = tvm.relay.random.threefry_generate(fake_key, (12,))
f = tvm.relay.Function([], rand1)
f = run_infer_type(f)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_threefry_split_infer_fail():
# xfail: key size should be 10
fake_key = tvm.relay.const([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype="uint64")
out_keys = tvm.relay.random.threefry_split(fake_key)
f = tvm.relay.Function([], out_keys)
f = run_infer_type(f)
@tvm.testing.requires_llvm
def test_threefry_generate_out_size():
key = tvm.relay.random.threefry_key(1)
key, rand1 = tvm.relay.TupleWrapper(tvm.relay.random.threefry_generate(key, (5,)), 2)
out = tvm.relay.create_executor(
"vm",
tvm.IRModule.from_expr(tvm.relay.Function([], rand1)),
target=tvm.target.Target("llvm"),
device=tvm.device("cpu"),
).evaluate()()
if __name__ == "__main__":
tvm.testing.main()
| 6,269 | 35.882353 | 94 | py |
tvm | tvm-main/tests/python/relay/test_type_infer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test that type checker correcly computes types
for expressions.
"""
import pytest
import tvm
from tvm import IRModule, parser, relay, te
from tvm.relay import analysis, op, transform
from tvm.relay.op import op as _op
import numpy as np
def infer_mod(mod, annotate_spans=True):
if annotate_spans:
mod = relay.transform.AnnotateSpans()(mod)
mod = transform.InferType()(mod)
return mod
def infer_expr(expr):
transform.InferTypeLocal(expr)
return expr
def assert_has_type(expr, typ, mod=None):
if not mod:
mod = tvm.IRModule({})
mod["main"] = expr
mod = infer_mod(mod)
checked_expr = mod["main"]
checked_type = checked_expr.checked_type
if checked_type != typ:
raise RuntimeError("Type mismatch %s vs %s" % (checked_type, typ))
def initialize_box_adt(mod):
# initializes simple ADT for tests
box = relay.GlobalTypeVar("box")
tv = relay.TypeVar("tv")
constructor = relay.Constructor("constructor", [tv], box)
data = relay.TypeData(box, [tv], [constructor])
mod[box] = data
return box, constructor
def test_monomorphic_let():
"Program: let %x = 1; %x"
# TODO(@jroesch): this seems whack.
sb = relay.ScopeBuilder()
x = relay.var("x", dtype="float64", shape=())
x = sb.let(x, relay.const(1.0, "float64"))
sb.ret(x)
xchecked = infer_expr(sb.get())
assert xchecked.checked_type == relay.scalar_type("float64")
def test_single_op():
"Program: fn (%x : float32) { let %t1 = f(%x); %t1 }"
x = relay.var("x", shape=[])
func = relay.Function([x], op.log(x))
ttype = relay.TensorType([], dtype="float32")
assert_has_type(func, relay.FuncType([ttype], ttype))
def test_add_broadcast_op():
"""
Program:
fn (%x: Tensor[(10, 4), float32], %y: Tensor[(5, 10, 1), float32])
-> Tensor[(5, 10, 4), float32] {
%x + %y
}
"""
x = relay.var("x", shape=(10, 4))
y = relay.var("y", shape=(5, 10, 1))
z = x + y
func = relay.Function([x, y], z)
t1 = relay.TensorType((10, 4), "float32")
t2 = relay.TensorType((5, 10, 1), "float32")
t3 = relay.TensorType((5, 10, 4), "float32")
expected_ty = relay.FuncType([t1, t2], t3)
assert_has_type(func, expected_ty)
def test_dual_op():
"""Program:
fn (%x : Tensor[(10, 10), float32]) {
let %t1 = log(x);
let %t2 = add(%t1, %x);
%t1
}
"""
tp = relay.TensorType((10, 10), "float32")
x = relay.var("x", tp)
sb = relay.ScopeBuilder()
t1 = sb.let("t1", relay.log(x))
t2 = sb.let("t2", relay.add(t1, x))
sb.ret(t2)
f = relay.Function([x], sb.get())
fchecked = infer_expr(f)
assert fchecked.checked_type == relay.FuncType([tp], tp)
def test_decl():
"""Program:
def @f(%x : Tensor[(10, 10), float32]) {
log(%x)
}
"""
tp = relay.TensorType((10, 10))
x = relay.var("x", tp)
f = relay.Function([x], relay.log(x))
fchecked = infer_expr(f)
assert fchecked.checked_type == relay.FuncType([tp], tp)
def test_recursion():
"""
Program:
def @f(%n: int32, %data: float32) -> float32 {
if (%n == 0) {
%data
} else {
@f(%n - 1, log(%data))
}
}
"""
sb = relay.ScopeBuilder()
f = relay.GlobalVar("f")
ti32 = relay.scalar_type("int32")
tf32 = relay.scalar_type("float32")
n = relay.var("n", ti32)
data = relay.var("data", tf32)
with sb.if_scope(relay.equal(n, relay.const(0, ti32))):
sb.ret(data)
with sb.else_scope():
sb.ret(f(relay.subtract(n, relay.const(1, ti32)), relay.log(data)))
mod = tvm.IRModule()
mod[f] = relay.Function([n, data], sb.get())
mod = infer_mod(mod)
assert "@f(%1, %2)" in mod.astext()
assert mod["f"].checked_type == relay.FuncType([ti32, tf32], tf32)
def test_incomplete_call():
tt = relay.scalar_type("int32")
x = relay.var("x", tt)
f_type = relay.FuncType([tt], tt)
f = relay.var("f")
func = relay.Function([x, f], relay.Call(f, [x]), tt)
ft = infer_expr(func)
assert ft.checked_type == relay.FuncType([tt, f_type], tt)
def test_higher_order_argument():
a = relay.TypeVar("a")
x = relay.Var("x", a)
id_func = relay.Function([x], x, a, [a])
b = relay.TypeVar("b")
f = relay.Var("f", relay.FuncType([b], b))
y = relay.Var("y", b)
ho_func = relay.Function([f, y], f(y), b, [b])
# id func should be an acceptable argument to the higher-order
# function even though id_func takes a type parameter
ho_call = ho_func(id_func, relay.const(0, "int32"))
hc = infer_expr(ho_call)
expected = relay.scalar_type("int32")
assert hc.checked_type == expected
def test_higher_order_return():
a = relay.TypeVar("a")
x = relay.Var("x", a)
id_func = relay.Function([x], x, a, [a])
b = relay.TypeVar("b")
nested_id = relay.Function([], id_func, relay.FuncType([b], b), [b])
ft = infer_expr(nested_id)
assert ft.checked_type == relay.FuncType([], relay.FuncType([b], b), [b])
def test_higher_order_nested():
a = relay.TypeVar("a")
x = relay.Var("x", a)
id_func = relay.Function([x], x, a, [a])
choice_t = relay.FuncType([], relay.scalar_type("bool"))
f = relay.Var("f", choice_t)
b = relay.TypeVar("b")
z = relay.Var("z")
top = relay.Function(
[f], relay.If(f(), id_func, relay.Function([z], z)), relay.FuncType([b], b), [b]
)
expected = relay.FuncType([choice_t], relay.FuncType([b], b), [b])
ft = infer_expr(top)
assert ft.checked_type == expected
def test_tuple():
tp = relay.TensorType((10,))
x = relay.var("x", tp)
res = relay.Tuple([x, x])
assert infer_expr(res).checked_type == relay.TupleType([tp, tp])
def test_ref():
x = relay.var("x", "float32")
y = relay.var("y", "float32")
r = relay.RefCreate(x)
st = relay.scalar_type("float32")
assert infer_expr(r).checked_type == relay.RefType(st)
g = relay.RefRead(r)
assert infer_expr(g).checked_type == st
w = relay.RefWrite(r, y)
assert infer_expr(w).checked_type == relay.TupleType([])
def test_free_expr():
x = relay.var("x", "float32")
y = relay.add(x, x)
yy = infer_expr(y)
assert tvm.ir.structural_equal(yy.args[0], x, map_free_vars=True)
assert yy.checked_type == relay.scalar_type("float32")
assert x.vid.same_as(yy.args[0].vid)
def test_type_args():
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(1, 10))
z = relay.add(x, y)
# InferTypeLocal does not support populating the type_args field
mod = infer_mod(IRModule.from_expr(z))
mod = infer_mod(mod, annotate_spans=False)
ty_args = mod["main"].body.type_args
assert len(ty_args) == 2
assert ty_args[0].dtype == "float32"
assert ty_args[1].dtype == "float32"
sh1 = ty_args[0].shape
sh2 = ty_args[1].shape
assert sh1[0].value == 10
assert sh1[1].value == 10
assert sh2[0].value == 1
assert sh2[1].value == 10
def test_global_var_recursion():
mod = tvm.IRModule({})
gv = relay.GlobalVar("main")
x = relay.var("x", shape=[])
tt = relay.scalar_type("float32")
func = relay.Function([x], relay.Call(gv, [x]), tt)
mod[gv] = func
mod = infer_mod(mod)
func_ty = mod["main"].checked_type
assert func_ty == relay.FuncType([tt], tt)
def test_equal():
i = relay.var("i", shape=[], dtype="int32")
eq = op.equal(i, relay.const(0, dtype="int32"))
func = relay.Function([i], eq)
ft = infer_expr(func)
expected = relay.FuncType([relay.scalar_type("int32")], relay.scalar_type("bool"))
assert ft.checked_type == expected
assert ft.checked_type == relay.FuncType(
[relay.scalar_type("int32")], relay.scalar_type("bool")
)
def test_constructor_type():
mod = tvm.IRModule()
box, constructor = initialize_box_adt(mod)
a = relay.TypeVar("a")
x = relay.Var("x", a)
func = relay.Function([x], constructor(x), box(a), [a])
mod["main"] = func
mod = infer_mod(mod)
func_ty = mod["main"].checked_type
box = mod.get_global_type_var("box")
expected = relay.FuncType([a], box(a), [a])
assert func_ty == expected
def test_constructor_call():
mod = tvm.IRModule()
box, constructor = initialize_box_adt(mod)
box_unit = constructor(relay.Tuple([]))
box_constant = constructor(relay.const(0, "float32"))
func = relay.Function([], relay.Tuple([box_unit, box_constant]))
mod["main"] = func
mod = infer_mod(mod)
ret_type = mod["main"].checked_type.ret_type.fields
# NB(@jroesch): when we annotate spans the ast fragments before
# annotation the previous fragments will no longer be directly equal.
box = mod.get_global_type_var("box")
expected1 = box(relay.TupleType([]))
expected2 = box(relay.TensorType((), "float32"))
assert ret_type[0] == expected1
assert ret_type[1] == expected2
def test_adt_match():
mod = tvm.IRModule()
box, constructor = initialize_box_adt(mod)
v = relay.Var("v", relay.TensorType((), "float32"))
match = relay.Match(
constructor(relay.const(0, "float32")),
[
relay.Clause(
relay.PatternConstructor(constructor, [relay.PatternVar(v)]), relay.Tuple([])
),
# redundant but shouldn't matter to typechecking
relay.Clause(relay.PatternWildcard(), relay.Tuple([])),
],
)
func = relay.Function([], match)
mod["main"] = func
mod = infer_mod(mod)
actual = mod["main"].checked_type.ret_type
assert actual == relay.TupleType([])
def test_adt_match_type_annotations():
mod = tvm.IRModule()
box, constructor = initialize_box_adt(mod)
# the only type annotation is inside the match pattern var
# but that should be enough info
tt = relay.TensorType((2, 2), "float32")
x = relay.Var("x")
mv = relay.Var("mv", tt)
match = relay.Match(
constructor(x),
[
relay.Clause(
relay.PatternConstructor(constructor, [relay.PatternVar(mv)]), relay.Tuple([])
)
],
)
mod["main"] = relay.Function([x], match)
mod = infer_mod(mod)
ft = mod["main"].checked_type
assert ft == relay.FuncType([tt], relay.TupleType([]))
def test_let_polymorphism():
id = relay.Var("id")
xt = relay.TypeVar("xt")
x = relay.Var("x", xt)
body = relay.Tuple([id(relay.const(1)), id(relay.Tuple([]))])
body = relay.Let(id, relay.Function([x], x, xt, [xt]), body)
body = infer_expr(body)
int32 = relay.TensorType((), "int32")
tvm.ir.assert_structural_equal(body.checked_type, relay.TupleType([int32, relay.TupleType([])]))
def test_type_arg_infer():
code = """
#[version = "0.0.5"]
def @id[A](%x: A) -> A {
%x
}
def @main(%f: float32) -> float32 {
@id(%f)
}
"""
mod = tvm.relay.fromtext(code)
mod = transform.InferType()(mod)
tvm.ir.assert_structural_equal(mod["main"].body.type_args, [relay.TensorType((), "float32")])
def test_dynamic_function():
dy_tt = relay.TensorType([relay.Any()], "float32")
s_tt = relay.TensorType([10], "float32")
x = relay.Var("x", dy_tt)
f = relay.Function([x], x + x)
y = relay.Var("y", s_tt)
c = f(y)
mod = tvm.IRModule()
mod["main"] = relay.Function([y], c)
mod = transform.InferType()(mod)
assert mod["main"].params[0].checked_type == s_tt
data = relay.var(
"data", shape=(relay.Any(), relay.Any(), relay.Any(), relay.Any()), dtype="float32"
)
weigth = relay.const(np.full((16, 16, 3, 3), 0.25), dtype="float32")
x = relay.nn.conv2d(data, weigth, kernel_size=(3, 3), channels=16, groups=2)
mod = tvm.IRModule.from_expr(x)
mod = transform.InferType()(mod)
def test_custom_op_infer():
"""Tests infer type for custom_op"""
op_name = "custom_log"
_op.register(op_name, r"code(cal log of a tensor.)code")
_op.get(op_name).set_num_inputs(1)
_op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
# call default relation functions
_op.get(op_name).add_type_rel("Identity")
_op.get(op_name).set_support_level(1)
_op.register_pattern(op_name, _op.OpPattern.ELEMWISE)
_op.register_stateful(op_name, False)
def clog(x):
return relay.Call(_op.get(op_name), [x])
tp = relay.TensorType((10, 10), "float32")
x = relay.var("x", tp)
sb = relay.ScopeBuilder()
t1 = sb.let("t1", clog(x))
t2 = sb.let("t2", relay.add(t1, x))
sb.ret(t2)
f = relay.Function([x], sb.get())
fchecked = infer_expr(f)
assert fchecked.checked_type == relay.FuncType([tp], tp)
def test_custom_add_broadcast_op():
"""Tests infer type for broadcast custom_op"""
op_name = "custom_broadcast_add"
_op.register(op_name, r"code(Add two tensor with inner broadcasting.)code")
_op.get(op_name).set_num_inputs(2)
_op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
_op.get(op_name).add_argument("data_1", "Tensor", "The input data tensor.")
# call default relation functions
_op.get(op_name).add_type_rel("Broadcast")
_op.get(op_name).set_support_level(1)
_op.register_stateful(op_name, False)
def broadcast_add(x, y):
return relay.Call(_op.get(op_name), [x, y])
x = relay.var("x", shape=(10, 4))
y = relay.var("y", shape=(5, 10, 1))
z = broadcast_add(x, y)
func = relay.Function([x, y], z)
t1 = relay.TensorType((10, 4), "float32")
t2 = relay.TensorType((5, 10, 1), "float32")
t3 = relay.TensorType((5, 10, 4), "float32")
expected_ty = relay.FuncType([t1, t2], t3)
assert_has_type(func, expected_ty)
def test_custom_op_rel_infer():
"""Tests infer type for custom_op"""
def custom_log1_rel(arg_types, attrs):
assert len(arg_types) == 1, "type relation arg number mismatch!"
if attrs:
assert isinstance(attrs, DictAttrs)
inputa_type = arg_types[0]
return relay.TensorType(inputa_type.shape, inputa_type.dtype)
op_name = "custom_log1"
_op.register(op_name, r"code(cal log of a tensor.)code")
_op.get(op_name).set_num_inputs(1)
_op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
_op.get(op_name).set_attrs_type_key("DictAttrs")
# call customized relation functions
_op.get(op_name).add_type_rel("custom_log1", custom_log1_rel)
_op.get(op_name).set_support_level(1)
_op.register_pattern(op_name, _op.OpPattern.ELEMWISE)
_op.register_stateful(op_name, False)
def clog(x):
return relay.Call(_op.get(op_name), [x])
tp = relay.TensorType((10, 10), "float32")
x = relay.var("x", tp)
sb = relay.ScopeBuilder()
t1 = sb.let("t1", clog(x))
t2 = sb.let("t2", relay.add(t1, x))
sb.ret(t2)
f = relay.Function([x], sb.get())
fchecked = infer_expr(f)
assert fchecked.checked_type == relay.FuncType([tp], tp)
def test_custom_op_rel_infer_exception():
"""Tests infer type for custom_op"""
def custom_log1_rel(arg_types, attrs):
assert len(arg_types) == 2, "type relation arg number mismatch!"
return None
op_name = "custom_log2"
_op.register(op_name, r"code(cal log of a tensor.)code")
_op.get(op_name).set_num_inputs(1)
_op.get(op_name).add_argument("data_0", "Tensor", "The input data tensor.")
_op.get(op_name).set_attrs_type_key("DictAttrs")
# call customized relation functions
_op.get(op_name).add_type_rel("custom_log2", custom_log1_rel)
_op.get(op_name).set_support_level(1)
_op.register_pattern(op_name, _op.OpPattern.ELEMWISE)
_op.register_stateful(op_name, False)
def clog(x):
return relay.Call(_op.get(op_name), [x])
tp = relay.TensorType((10, 10), "float32")
x = relay.var("x", tp)
sb = relay.ScopeBuilder()
t1 = sb.let("t1", clog(x))
t2 = sb.let("t2", relay.add(t1, x))
sb.ret(t2)
f = relay.Function([x], sb.get())
with pytest.raises(tvm.error.TVMError) as cm:
fchecked = infer_expr(f)
assert "type relation arg number mismatch" in str(cm.execption)
def test_repeat_register():
op_name = "custom_log3"
_op.register(op_name, r"code(cal log of a tensor.)code")
with pytest.raises(tvm.error.TVMError) as cm:
_op.register(op_name)
assert "Operator custom_log3 is registered before" in str(cm.execption)
def test_argreduce_infer_return_type():
x_shape = (1, 1)
broadcast_shape = [1, 1]
shape_dtypes = [("int32", lambda x: np.int32(x)), ("int64", lambda x: np.int64(x))]
# Testing with argmax
for (sdtype, conv) in shape_dtypes:
x = relay.var("data", relay.TensorType(x_shape, "float32"))
broadcast_to = relay.op.broadcast_to(x, relay.const(broadcast_shape, dtype=sdtype))
argmax = relay.op.argmax(broadcast_to, axis=[1])
f = relay.Function([x], argmax)
assert_has_type(
f,
relay.FuncType(
[relay.TensorType(broadcast_shape, "float32")],
relay.TensorType([conv(1)], dtype=sdtype),
),
)
# Testing with argmin
for (sdtype, conv) in shape_dtypes:
x = relay.var("data", relay.TensorType(x_shape, "float32"))
broadcast_to = relay.op.broadcast_to(x, relay.const(broadcast_shape, dtype=sdtype))
argmin = relay.op.argmin(broadcast_to, axis=[1])
f = relay.Function([x], argmin)
assert_has_type(
f,
relay.FuncType(
[relay.TensorType(broadcast_shape, "float32")],
relay.TensorType([conv(1)], dtype=sdtype),
),
)
if __name__ == "__main__":
tvm.testing.main()
| 18,613 | 30.284034 | 100 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_mul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import relay
from tvm.contrib import graph_executor
import tvm.topi.testing
# "unquantize" a quantized tensor
def recover(data, scale, zp):
return scale * (np.asarray(data) - zp)
def generate_golden_output(x_recovered, y_recovered, scale, zp):
mul = x_recovered * y_recovered
output = np.around(mul / scale + zp)
q_min = np.iinfo(np.uint8).min
q_max = np.iinfo(np.uint8).max
return np.clip(output, q_min, q_max)
def test_tflite_same_io_qnn_params():
data_dtype = "uint8"
lhs_scale = rhs_scale = output_scale = 0.00784314
lhs_zero_point = rhs_zero_point = output_zero_point = 127
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_datas = [
np.array((1, 153, 2, 178)).reshape((1, 4)),
np.array((25, 1, 178, 216)).reshape((1, 4)),
np.array((25, 153, 1, 165)).reshape((1, 4)),
]
y_datas = [
np.array((204, 178, 1, 8)).reshape((1, 4)),
np.array((204, 178, 191, 1)).reshape((1, 4)),
np.array((204, 178, 1, 191)).reshape((1, 4)),
]
for i in range(0, 3):
x_data = x_datas[i]
y_data = y_datas[i]
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
def test_tflite_different_io_qnn_params():
data_dtype = "uint8"
lhs_scale = 0.0156863
lhs_zero_point = 127
rhs_scale = 0.0117647
rhs_zero_point = 85
output_scale = 0.0235294
output_zero_point = 128
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_datas = [
np.array((76, 140, 153, 172)).reshape((1, 4)),
np.array((133, 140, 146, 153)).reshape((1, 4)),
np.array((76, 140, 172, 146)).reshape((1, 4)),
]
y_datas = [
np.array((136, 119, 128, 17)).reshape((1, 4)),
np.array((136, 119, 111, 94)).reshape((1, 4)),
np.array((136, 119, 17, 128)).reshape((1, 4)),
]
for i in range(0, 3):
x_data = x_datas[i]
y_data = y_datas[i]
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
def test_saturation():
# Same params
data_dtype = "uint8"
lhs_scale = rhs_scale = output_scale = 0.125
lhs_zero_point = rhs_zero_point = output_zero_point = 0
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 1, 1, 0)).reshape((1, 4))
y_data = np.array((255, 255, 128, 0)).reshape((1, 4))
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
# Same params, different scale
lhs_scale = rhs_scale = 0.125
output_scale = 0.25
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 1, 1, 0)).reshape((1, 4))
y_data = np.array((255, 255, 127, 0)).reshape((1, 4))
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
# All params different
lhs_scale = 0.5
rhs_scale = 0.25
output_scale = 0.125
z = relay.qnn.op.mul(
lhs=x,
rhs=y,
lhs_scale=relay.const(lhs_scale, "float32"),
lhs_zero_point=relay.const(lhs_zero_point, "int32"),
rhs_scale=relay.const(rhs_scale, "float32"),
rhs_zero_point=relay.const(rhs_zero_point, "int32"),
output_scale=relay.const(output_scale, "float32"),
output_zero_point=relay.const(output_zero_point, "int32"),
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
x_data = np.array((255, 0, 1, 0)).reshape((1, 4))
y_data = np.array((0, 128, 64, 0)).reshape((1, 4))
x_rec = recover(x_data, lhs_scale, lhs_zero_point)
y_rec = recover(y_data, rhs_scale, rhs_zero_point)
golden = generate_golden_output(x_rec, y_rec, output_scale, output_zero_point)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), np.uint8(golden))
if __name__ == "__main__":
test_tflite_same_io_qnn_params()
test_tflite_different_io_qnn_params()
test_saturation()
| 8,788 | 33.065891 | 97 | py |
tvm | tvm-main/tests/python/relay/test_pass_legalize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test legalize pass"""
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm.contrib import graph_executor
from tvm.relay import transform, analysis
from tvm.relay.testing.temp_op_attr import TempOpAttr
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_legalize():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def legalize_conv2d(attrs, inputs, types):
data, weight = inputs
weight = relay.multiply(weight, relay.const(2.0, "float32"))
return relay.nn.conv2d(data, weight, **attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
relay.multiply(weight, relay.const(2.0, "float32")),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.conv2d", "FTVMLegalize", legalize_conv2d):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_legalize_none():
"""Test doing nothing by returning 'None'"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
y = relay.nn.global_max_pool2d(x)
y = relay.Function([x], y)
return y
called = [False]
def legalize_conv2d(attrs, inputs, types):
called[0] = True
return None
with TempOpAttr("nn.global_max_pool2d", "FTVMLegalize", legalize_conv2d):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(before(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
assert called[0]
def test_legalize_multiple_ops():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def legalize_conv2d(attrs, inputs, types):
data, weight = inputs
weight = relay.multiply(weight, relay.const(2.0, "float32"))
return relay.nn.conv2d(data, weight, **attrs)
def legalize_relu(attrs, inputs, types):
data = inputs[0]
add = relay.add(tvm.relay.const(0, "float32"), data)
return relay.nn.relu(add)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
relay.multiply(weight, relay.const(2.0, "float32")),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.add(tvm.relay.const(0, "float32"), y)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.conv2d", "FTVMLegalize", legalize_conv2d):
with TempOpAttr("nn.relu", "FTVMLegalize", legalize_relu):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_legalize_multi_input():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
y = relay.var("y", shape=(1, 64, 56, 20))
z = relay.var("z", shape=(1, 64, 56, 10))
func = relay.concatenate([x, y, z], axis=3)
func = relay.Function([x, y, z], func)
return func
def legalize_concatenate(attrs, inputs, types):
# Check that the correct multi-input case is handled.
assert len(inputs) == 1
assert isinstance(inputs[0], tvm.relay.expr.Tuple)
assert len(types) == 2
assert isinstance(types[0], tvm.relay.ty.TupleType)
assert isinstance(types[1], tvm.relay.ty.TensorType)
return None
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
y = relay.var("y", shape=(1, 64, 56, 20))
z = relay.var("z", shape=(1, 64, 56, 10))
func = relay.concatenate([x, y, z], axis=3)
func = relay.Function([x, y, z], func)
return func
with TempOpAttr("concatenate", "FTVMLegalize", legalize_concatenate):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
if __name__ == "__main__":
test_legalize()
test_legalize_none()
test_legalize_multiple_ops()
test_legalize_multi_input()
| 6,343 | 33.107527 | 87 | py |
tvm | tvm-main/tests/python/relay/test_to_mixed_precision.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for testing ToMixedPrecision pass"""
from typing import Any, Dict, List
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay.testing import lstm
from tvm.relay.transform import InferType, ToMixedPrecision, mixed_precision
target_precision = tvm.testing.parameter(
pytest.param("float16"),
pytest.param("bfloat16"),
ids=["float16", "bfloat16"],
)
def run_module(mod: tvm.runtime.Module, mod_params: Dict[str, Any]) -> List:
dev = tvm.device("llvm", 0)
result = relay.create_executor("debug", mod, device=dev, target="llvm").evaluate()(**mod_params)
if isinstance(result, tvm.runtime.container.ADT):
result = [r.numpy() for r in result]
return result
else:
return [result.numpy()]
def verify_mixed_precision_output_close(
mod: tvm.runtime.Module,
mod_params: Dict[str, Any],
mixed_precision_dtype="float16",
rtol: float = 1e-3,
atol: float = 0,
keep_orig_output_dtype=False,
) -> tvm.runtime.Module:
mod = InferType()(mod)
result_fp32 = run_module(mod, mod_params)
if not keep_orig_output_dtype:
amp_mod = ToMixedPrecision(mixed_precision_dtype)(mod)
result_amp = run_module(amp_mod, mod_params)
else:
with tvm.transform.PassContext(
config={"relay.ToMixedPrecision.keep_orig_output_dtype": True}
):
amp_mod = ToMixedPrecision(mixed_precision_dtype)(mod)
result_amp = run_module(amp_mod, mod_params)
# Ensure the results are close
if mixed_precision_dtype != "bfloat16":
for fp32, amp in zip(result_fp32, result_amp):
np.testing.assert_allclose(fp32, amp, rtol=rtol, atol=atol)
if keep_orig_output_dtype:
assert (
np.array(result_amp).dtype == np.array(result_fp32).dtype
), "output type and original type mismatch"
return amp_mod
def test_lstm(target_precision):
"""A small stress test on a single unrolled lstm unit.
Has internal functions and let statements the pass must work on.
"""
# TODO(AndrewZhaoLuo): investigate why non-even units cause failure in codegen for CUDA
# See discussion here: https://github.com/apache/tvm/issues/8294#issuecomment-866190408
units = 4
iterations = 5
mod, mod_params = lstm.get_workload(iterations=iterations, num_hidden=units)
# This is an unrolled lstm so each data should be the previous results but
# we don't care, we just want to stress test things.
for i in range(iterations):
mod_params["data" if i == 0 else f"data{i}"] = np.random.uniform(
-10, 10, (1, units)
).astype("float32")
verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, rtol=0.01, atol=0.01
)
def test_lstm_float64():
"""Tests if can handle other mixed precision types.
As a toy example show can convert graph to float64 and have it run.
It doesn't really make sense to do it, this just shows we can change
the target mixed_precision_dtype.
"""
units = 3
iterations = 5
mod, mod_params = lstm.get_workload(iterations=iterations, num_hidden=units)
# This is an unrolled lstm so each data should be the previous results but
# we don't care, we just want to stress test things.
for i in range(iterations):
mod_params["data" if i == 0 else f"data{i}"] = np.random.uniform(
-10, 10, (1, units)
).astype("float32")
verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype="float64", rtol=0.01, atol=0.01
)
def test_convert_single_conv(target_precision):
"""Conv is a green listed operation meaning it will always use fp16 workload.
By default it accumulates to fp32 and outputs fp16.
"""
data_shape = (1, 3, 32, 32)
weight_shape = (5, 3, 3, 3)
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype="float32")
mod = tvm.IRModule.from_expr(conv)
mod = tvm.relay.transform.InferType()(mod)
mod_params = {
"data": np.random.uniform(-1, 1, size=data_shape).astype("float32"),
"weight": np.random.uniform(-1, 1, size=weight_shape).astype("float32"),
}
amp_mod = verify_mixed_precision_output_close(
mod,
mod_params,
mixed_precision_dtype=target_precision,
atol=0.01,
rtol=1e-3,
keep_orig_output_dtype=True,
)
expected_mod = tvm.IRModule.from_expr(
relay.cast(
relay.nn.conv2d(
relay.cast(data, target_precision),
relay.cast(weight, target_precision),
strides=(1, 1),
padding=(1, 1),
out_dtype=target_precision,
),
"float32",
)
)
expected_mod = tvm.relay.transform.InferType()(expected_mod)
assert not tvm.ir.structural_equal(amp_mod, mod)
assert tvm.ir.structural_equal(amp_mod, expected_mod)
def test_convert_single_conv_fp64():
"""As above but checks choosing a mixed_precision_type other than FP16 works"""
data_shape = (1, 3, 32, 32)
weight_shape = (5, 3, 3, 3)
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype="float32")
mod = tvm.IRModule.from_expr(conv)
mod = tvm.relay.transform.InferType()(mod)
mod_params = {
"data": np.random.uniform(-1, 1, size=data_shape).astype("float32"),
"weight": np.random.uniform(-1, 1, size=weight_shape).astype("float32"),
}
amp_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype="float64", atol=0.01, rtol=1e-3
)
# Note we still accumulate to FP32 by default, a user would need to overwrite default
# behavior to make this make more sense.
expected_mod = tvm.IRModule.from_expr(
relay.nn.conv2d(
relay.cast(data, "float64"),
relay.cast(weight, "float64"),
strides=(1, 1),
padding=(1, 1),
out_dtype="float64",
),
)
expected_mod = tvm.relay.transform.InferType()(expected_mod)
assert not tvm.ir.structural_equal(amp_mod, mod)
assert tvm.ir.structural_equal(amp_mod, expected_mod)
def test_convert_conv_bn(target_precision):
"""Conv is green and batch norm is gray. As Conv should output fp16 batch_norm should be green."""
data_shape = (1, 3, 32, 32)
weight_shape = (5, 3, 3, 3)
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype="float32")
bn_shape = [5]
gamma = relay.var("gamma", shape=bn_shape)
beta = relay.var("beta", shape=bn_shape)
moving_mean = relay.var("moving_mean", shape=bn_shape)
moving_var = relay.var("moving_var", shape=bn_shape)
bn = relay.nn.batch_norm(conv, gamma, beta, moving_mean, moving_var)
mod = tvm.IRModule.from_expr(bn[0])
mod = tvm.relay.transform.InferType()(mod)
mod_params = {
"data": np.random.uniform(-1, 1, size=data_shape).astype("float32"),
"weight": np.random.uniform(-1, 1, size=weight_shape).astype("float32"),
"gamma": np.random.uniform(-1, 1, size=bn_shape).astype("float32"),
"beta": np.random.uniform(-1, 1, size=bn_shape).astype("float32"),
"moving_mean": np.random.uniform(-1, 1, size=bn_shape).astype("float32"),
"moving_var": np.random.uniform(-1, 1, size=bn_shape).astype("float32"),
}
amp_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.025, rtol=0.01
)
# Creating expected module
data = relay.cast(relay.var("data", shape=data_shape), target_precision)
weight = relay.cast(relay.var("weight", shape=weight_shape), target_precision)
conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype=target_precision)
bn_shape = [5]
gamma = relay.cast(relay.var("gamma", shape=bn_shape), target_precision)
beta = relay.cast(relay.var("beta", shape=bn_shape), target_precision)
moving_mean = relay.cast(relay.var("moving_mean", shape=bn_shape), target_precision)
moving_var = relay.cast(relay.var("moving_var", shape=bn_shape), target_precision)
bn = relay.nn.batch_norm(conv, gamma, beta, moving_mean, moving_var)
expected_mod = tvm.IRModule.from_expr(bn[0])
expected_mod = tvm.relay.transform.InferType()(expected_mod)
assert not tvm.ir.structural_equal(amp_mod, mod)
assert tvm.ir.structural_equal(amp_mod, expected_mod)
def test_do_not_convert_softmax(target_precision):
"""Softmax is a red listed operation and therefore should never be fp16."""
shape = [1, 2, 3]
a = relay.var("a", shape=shape)
b = relay.nn.softmax(a)
mod = tvm.IRModule.from_expr(b)
mod = tvm.relay.transform.InferType()(mod)
out_mod = ToMixedPrecision(target_precision)(mod)
orig_mod = tvm.relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(orig_mod, out_mod)
def test_do_not_convert_arange(target_precision):
"""Arange is a red listed operation and therefore should never be fp16."""
dtype = "float32"
arange = relay.arange(relay.const(1, dtype), relay.const(128, dtype))
mod = tvm.IRModule.from_expr(arange)
out_mod = ToMixedPrecision(target_precision)(mod)
orig_mod = tvm.relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(orig_mod, out_mod)
def test_do_not_convert_summation(target_precision):
"""Ops that could involve a large summation are not allowed in fp16."""
shape = [1, 3, 16, 16]
a = relay.var("a", shape=shape)
ops = [
relay.sum,
relay.mean,
relay.nn.global_avg_pool2d,
lambda inp: relay.nn.adaptive_avg_pool2d(inp, (1, 1)),
]
for op in ops:
mod = tvm.IRModule.from_expr(op(a))
out_mod = ToMixedPrecision(target_precision)(mod)
orig_mod = tvm.relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(orig_mod, out_mod)
def test_green_gray_propagates_simple(target_precision):
"""Conv is a green listed operation, while addition is gray.
As Conv outputs fp16 the add should be done in fp16.
"""
data_shape = (1, 3, 32, 32)
weight_shape = (5, 3, 3, 3)
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype="float32")
conv = conv + conv
mod = tvm.IRModule.from_expr(conv)
mod = tvm.relay.transform.InferType()(mod)
mod_params = {
"data": np.random.uniform(-1, 1, size=data_shape).astype("float32"),
"weight": np.random.uniform(-1, 1, size=weight_shape).astype("float32"),
}
amp_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01
)
conv_expr = relay.nn.conv2d(
relay.cast(data, target_precision),
relay.cast(weight, target_precision),
strides=(1, 1),
padding=(1, 1),
out_dtype=target_precision,
)
expected_mod = tvm.IRModule.from_expr(conv_expr + conv_expr)
expected_mod = tvm.relay.transform.InferType()(expected_mod)
assert not tvm.ir.structural_equal(amp_mod, mod)
assert tvm.ir.structural_equal(amp_mod, expected_mod)
def test_green_red_not_use_extraneous_cast(target_precision):
"""Conv. is a green listed operation, while softmax is red.
Conv. also by default accumulates to fp32 but outputs fp16.
We want to avoid a situation where we have extraneous casts.
E.g. because softmax wants to operate on FP32 we might have
conv (FP32) -> cast (FP16) -> cast (FP32) -> softmax (FP32)
To get around this internally when we cast in the pass we cache
the output nodes and the reverse of the cast back to the original
node. For example casting the `conv (FP32)` to FP16 would produce:
`conv (FP32) -> cast (FP16)`
As the outputs. Now anytime we try to cast the `conv (FP32)` node
to FP16 it would return the cached result instead of a new cast node:
`conv (FP32) -> cast (FP16)`
Furthermore, if we try to cast the `cast (FP16)` node back to FP32 it
would just return
`conv (FP32)`.
This test makes sure this behavior occurs.
"""
data_shape = (1, 3, 32, 32)
weight_shape = (5, 3, 3, 3)
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=weight_shape, dtype="float32")
conv = relay.nn.conv2d(data, weight, strides=(1, 1), padding=(1, 1), out_dtype="float32")
result = relay.nn.softmax(conv)
mod = tvm.IRModule.from_expr(result)
mod_params = {
"data": np.random.uniform(-1, 1, size=data_shape).astype("float32"),
"weight": np.random.uniform(-1, 1, size=weight_shape).astype("float32"),
}
amp_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=1e-3
)
# Construct expected structure
conv = relay.cast(
relay.nn.conv2d(
relay.cast(data, target_precision),
relay.cast(weight, target_precision),
strides=(1, 1),
padding=(1, 1),
out_dtype=target_precision,
),
"float32",
)
result = relay.nn.softmax(conv)
expected_mod = tvm.IRModule.from_expr(result)
expected_mod = InferType()(expected_mod)
assert tvm.ir.structural_equal(expected_mod, amp_mod)
def test_red_gray_propagates_simple(target_precision):
"""Everything after a softmax should be in FP32 (exception green colored ops)"""
shape = [1, 2, 3]
a = relay.var("a", shape=shape)
b = relay.nn.softmax(a)
c = b + b
mod = tvm.IRModule.from_expr(c)
mod = tvm.relay.transform.InferType()(mod)
mod_params = {
"a": np.random.uniform(-1, 1, size=shape).astype("float32"),
}
output_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.0, rtol=0.0
)
assert tvm.ir.structural_equal(mod, output_mod)
def test_let_statement_simple(target_precision):
"""A 'simple' let statement example.
Noticeable is the mutation of the bound variable types.
"""
var1 = relay.var("var1", shape=[1, 20])
var2 = relay.var("var2", shape=[1, 20])
data = relay.var("data", shape=[1, 20])
weight = relay.var("weight", shape=[20, 20])
r1 = var1 + var1
r2 = var2 + var2
let2 = relay.Let(var2, relay.nn.dense(r1, weight, units=20), r2)
let1 = relay.Let(var1, relay.nn.dense(data, weight, units=20), let2)
mod = tvm.IRModule.from_expr(let1)
mod_params = {
"data": np.random.uniform(-1, 1, size=[1, 20]).astype("float32"),
"weight": np.random.uniform(-1, 1, size=[20, 20]).astype("float32"),
}
output_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.05, rtol=0.15
)
# Construct expected structure
var1 = relay.var("var1", shape=[1, 20], dtype=target_precision)
var2 = relay.var("var2", shape=[1, 20], dtype=target_precision)
data = relay.cast(relay.var("data", shape=[1, 20]), target_precision)
weight = relay.cast(relay.var("weight", shape=[20, 20]), target_precision)
r1 = var1 + var1
r2 = var2 + var2
let2 = relay.Let(
var2,
relay.nn.dense(r1, weight, units=20, out_dtype=target_precision),
r2,
)
let1 = relay.Let(
var1,
relay.nn.dense(data, weight, units=20, out_dtype=target_precision),
let2,
)
expected_mod = tvm.IRModule.from_expr(let1)
expected_mod = InferType()(expected_mod)
assert tvm.ir.structural_equal(expected_mod, output_mod)
def test_where_simple(target_precision):
data = relay.var("data", shape=[1, 20])
weight = relay.var("weight", shape=[20, 20])
a = relay.nn.dense(data, weight, units=20)
b = relay.where(data, a, a)
mod = tvm.IRModule.from_expr(b)
mod_params = {
"data": np.random.uniform(-1, 1, size=[1, 20]).astype("float32"),
"weight": np.random.uniform(-1, 1, size=[20, 20]).astype("float32"),
}
output_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01
)
# Create expected module
data = relay.cast(relay.var("data", shape=[1, 20]), target_precision)
weight = relay.cast(relay.var("weight", shape=[20, 20]), target_precision)
a = relay.nn.dense(data, weight, units=20, out_dtype=target_precision)
b = relay.where(data, a, a)
expected_mod = tvm.IRModule.from_expr(b)
expected_mod = InferType()(expected_mod)
assert tvm.ir.structural_equal(expected_mod, output_mod)
def test_batch_matmul_simple(target_precision):
"""Batch matmul is a special case where we try to accumulate to fp16.
This is due to the fact heterogenous accumulation dtypes does not work
on all platforms at the moment.
"""
data = relay.var("data", shape=[1, 1, 20])
weight = relay.var("weight", shape=[1, 20, 20])
a = relay.nn.batch_matmul(data, weight)
mod = tvm.IRModule.from_expr(a)
mod_params = {
"data": np.random.uniform(-1, 1, size=[1, 1, 20]).astype("float32"),
"weight": np.random.uniform(-1, 1, size=[1, 20, 20]).astype("float32"),
}
output_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01
)
# Create expected module
data = relay.cast(relay.var("data", shape=[1, 1, 20]), target_precision)
weight = relay.cast(relay.var("weight", shape=[1, 20, 20]), target_precision)
a = relay.nn.batch_matmul(data, weight, out_dtype=target_precision)
expected_mod = tvm.IRModule.from_expr(a)
expected_mod = InferType()(expected_mod)
assert tvm.ir.structural_equal(expected_mod, output_mod)
def test_convert_follow_node_with_integer_arguments(target_precision):
"""Tests the conversion of a follow op with integer arguments + constant float args.
The follow op should convert the floating point argument into fp16 as constants/vars
will always be converted if safe to do so.
"""
data = relay.var("data", shape=[1, 10], dtype="float32")
# We use an addition to make sure the input indices are not a var
# (which are always casted if safe)
indices = relay.var("indices", shape=[1, 1], dtype="int32") + relay.const(0, dtype="int32")
take = relay.take(data, indices, axis=0)
mod = tvm.IRModule.from_expr(take)
mod_params = {
"data": np.random.uniform(-1, 1, size=[1, 10]).astype("float32"),
"indices": np.array([[0]]).astype("int32"),
}
output_mod = verify_mixed_precision_output_close(
mod, mod_params, mixed_precision_dtype=target_precision, atol=0.01, rtol=0.01
)
# Create expected module
data = relay.cast(relay.var("data", shape=[1, 10]), target_precision)
take = relay.take(data, indices, axis=0)
expected_mod = tvm.IRModule.from_expr(take)
expected_mod = InferType()(expected_mod)
assert tvm.ir.structural_equal(expected_mod, output_mod)
if __name__ == "__main__":
tvm.testing.main()
| 20,545 | 36.907749 | 102 | py |
tvm | tvm-main/tests/python/relay/test_json_runtime.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for JSON codegen and runtime."""
import os
import sys
import numpy as np
import tvm
import tvm.relay.op as reg
import tvm.relay.testing
from tvm import relay, runtime
from tvm.contrib import utils
from tvm.relay import transform
from tvm.relay.backend import te_compiler
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.contrib.register import get_pattern_table
def set_func_attr(func, compile_name, symbol_name):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compile_name)
func = func.with_attr("global_symbol", symbol_name)
return func
def check_result(
mod, ref_mod, map_inputs, out_shape, tol=1e-5, target="llvm", device=tvm.cpu(), params=None
):
if sys.platform == "win32":
print("Skip test on Windows for now")
return
# Run the reference result
te_compiler.get().clear()
with tvm.transform.PassContext(opt_level=3):
json, lib, param = relay.build(ref_mod, target=target, params=params)
rt_mod = tvm.contrib.graph_executor.create(json, lib, device)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**param)
rt_mod.run()
out = tvm.nd.empty(out_shape, device=device)
out = rt_mod.get_output(0, out)
ref_result = out.numpy()
def check_vm_result():
te_compiler.get().clear()
with relay.build_config(opt_level=3):
exe = relay.vm.compile(mod, target=target, params=params)
code, lib = exe.save()
exe = runtime.vm.Executable.load_exec(code, lib)
vm = runtime.vm.VirtualMachine(exe, device)
out = vm.run(**map_inputs)
tvm.testing.assert_allclose(out.numpy(), ref_result, rtol=tol, atol=tol)
def check_graph_executor_result():
te_compiler.get().clear()
with relay.build_config(opt_level=3):
json, lib, param = relay.build(mod, target=target, params=params)
rt_mod = tvm.contrib.graph_executor.create(json, lib, device)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**param)
rt_mod.run()
out = tvm.nd.empty(out_shape, device=device)
out = rt_mod.get_output(0, out)
tvm.testing.assert_allclose(out.numpy(), ref_result, rtol=tol, atol=tol)
check_vm_result()
check_graph_executor_result()
def test_conv2d():
"""Test a subgraph with a single conv2d operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
def conv2d_direct():
dtype = "float32"
ishape = (1, 1, 99, 12)
w1shape = (54, 1, 3, 3)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=w1shape, dtype=dtype)
out = relay.nn.conv2d(
data0, weight0, kernel_size=(3, 3), strides=(2, 2), padding=(1, 0, 1, 1)
)
func = relay.Function([data0, weight0], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data = relay.var("data", shape=(ishape), dtype=dtype)
weight = relay.var("weight", shape=(w1shape), dtype=dtype)
main_f = relay.Function([data, weight], glb_var(data, weight))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=w1shape, dtype=dtype)
out = relay.nn.conv2d(
data0, weight0, kernel_size=(3, 3), strides=(2, 2), padding=(1, 0, 1, 1)
)
main_f = relay.Function([data0, weight0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w1_data}, (1, 54, 50, 6)
def group_conv2d():
dtype = "float32"
ishape = (1, 32, 14, 14)
w2shape = (32, 1, 3, 3)
data0 = relay.var("data", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight", shape=(w2shape), dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=32)
func = relay.Function([data0, weight0], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data = relay.var("data", shape=(ishape), dtype=dtype)
weight = relay.var("weight", shape=(w2shape), dtype=dtype)
main_f = relay.Function([data, weight], glb_var(data, weight))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight", shape=(w2shape), dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=32)
main_f = relay.Function([data0, weight0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w2shape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w_data}, (1, 32, 14, 14)
for mod, ref_mod, map_inputs, out_shape in [conv2d_direct(), group_conv2d()]:
check_result(mod, ref_mod, map_inputs, out_shape, tol=1e-5)
def test_add():
"""Test a subgraph with a single add operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
shape = (10, 10)
def gen_add():
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
out = relay.add(data0, data1)
func = relay.Function([data0, data1], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
main_f = relay.Function([data0, data1], glb_var(data0, data1))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
out = relay.add(data0, data1)
main_f = relay.Function([data0, data1], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_add()
data0 = np.random.uniform(0, 1, shape).astype(dtype)
data1 = np.random.uniform(0, 1, shape).astype(dtype)
check_result(mod, ref_mod, {"data0": data0, "data1": data1}, shape, tol=1e-5)
def test_multiply():
"""Test a subgraph with a single add operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
shape = (10, 10)
def gen_multiply():
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
out = relay.multiply(data0, data1)
func = relay.Function([data0, data1], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
main_f = relay.Function([data0, data1], glb_var(data0, data1))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
data1 = relay.var("data1", shape=shape, dtype=dtype)
out = relay.multiply(data0, data1)
main_f = relay.Function([data0, data1], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_multiply()
data0 = np.random.uniform(0, 1, shape).astype(dtype)
data1 = np.random.uniform(0, 1, shape).astype(dtype)
check_result(mod, ref_mod, {"data0": data0, "data1": data1}, shape, tol=1e-5)
def test_relu():
"""Test a subgraph with a single ReLU operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
shape = (1, 32, 14, 14)
def gen_relu(shape):
data0 = relay.var("data0", shape=shape, dtype=dtype)
out = relay.nn.relu(data0)
func = relay.Function([data0], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
main_f = relay.Function([data0], glb_var(data0))
mod["main"] = main_f
mod = transform.InferType()(mod)
data0 = relay.var("data0", shape=shape, dtype=dtype)
out = relay.nn.relu(data0)
main_f = relay.Function([data0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
def check(shape):
mod, ref_mod = gen_relu(shape)
data0 = np.random.uniform(-1, 1, shape).astype(dtype)
check_result(
mod,
ref_mod,
{
"data0": data0,
},
shape,
tol=1e-5,
)
check(shape=(1, 32, 14, 14))
check(shape=(1, 32))
def test_dense():
"""Test a subgraph with a single dense operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
a_shape = (1, 512)
b_shape = (1024, 512)
def gen_dense():
a = relay.var("A", shape=a_shape, dtype=dtype)
b = relay.var("B", shape=b_shape, dtype=dtype)
out = relay.nn.dense(a, b)
func = relay.Function([a, b], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
a = relay.var("A", shape=a_shape, dtype=dtype)
b = relay.var("B", shape=b_shape, dtype=dtype)
main_f = relay.Function([a, b], glb_var(a, b))
mod["main"] = main_f
mod = transform.InferType()(mod)
a = relay.var("A", shape=a_shape, dtype=dtype)
b = relay.var("B", shape=b_shape, dtype=dtype)
out = relay.nn.dense(a, b)
main_f = relay.Function([a, b], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_dense()
data_a = np.random.uniform(0, 1, a_shape).astype(dtype)
data_b = np.random.uniform(0, 1, b_shape).astype(dtype)
check_result(mod, ref_mod, {"A": data_a, "B": data_b}, (1, 1024), tol=1e-5)
def test_bn():
"""Test a subgraph with a single batch_norm operator."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
d_shape = (1, 8)
c_shape = (8,)
def gen_bn():
data = relay.var("data", shape=d_shape)
gamma = relay.var("gamma", shape=c_shape)
beta = relay.var("beta", shape=c_shape)
moving_mean = relay.var("moving_mean", shape=c_shape)
moving_var = relay.var("moving_var", shape=c_shape)
bn = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var)
out = bn[0]
func = relay.Function([data, gamma, beta, moving_mean, moving_var], out)
func = set_func_attr(func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = func
mod = transform.InferType()(mod)
data = relay.var("data", shape=d_shape)
gamma = relay.var("gamma", shape=c_shape)
beta = relay.var("beta", shape=c_shape)
moving_mean = relay.var("moving_mean", shape=c_shape)
moving_var = relay.var("moving_var", shape=c_shape)
main_f = relay.Function(
[data, gamma, beta, moving_mean, moving_var],
glb_var(data, gamma, beta, moving_mean, moving_var),
)
mod["main"] = main_f
mod = transform.InferType()(mod)
data = relay.var("data", shape=d_shape)
gamma = relay.var("gamma", shape=c_shape)
beta = relay.var("beta", shape=c_shape)
moving_mean = relay.var("moving_mean", shape=c_shape)
moving_var = relay.var("moving_var", shape=c_shape)
bn = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var)
out = bn[0]
main_f = relay.Function([data, gamma, beta, moving_mean, moving_var], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_f
ref_mod = transform.InferType()(ref_mod)
return mod, ref_mod
mod, ref_mod = gen_bn()
data = np.random.uniform(-1, 1, d_shape).astype(dtype)
gamma = np.random.uniform(-1, 1, c_shape).astype(dtype)
beta = np.random.uniform(-1, 1, c_shape).astype(dtype)
moving_mean = np.random.uniform(-1, 1, c_shape).astype(dtype)
moving_var = np.random.uniform(-1, 1, c_shape).astype(dtype)
check_result(
mod,
ref_mod,
{
"data": data,
"gamma": gamma,
"beta": beta,
"moving_mean": moving_mean,
"moving_var": moving_var,
},
d_shape,
tol=1e-5,
)
def test_multiple_ops():
"""Test a subgraph with multiple operators."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 32, 3, 3)
w2shape = (64, 32, 5, 5)
def get_net():
data = relay.var("data", relay.TensorType(ishape, dtype))
w1 = relay.var("w1", relay.TensorType(w1shape, dtype))
w2 = relay.var("w2", relay.TensorType(w2shape, dtype))
layer = relay.nn.conv2d(data=data, weight=w1, kernel_size=(3, 3), padding=(1, 1))
layer = relay.nn.relu(layer)
layer = relay.nn.conv2d(data=layer, weight=w2, kernel_size=(5, 5), padding=(2, 2))
layer = relay.nn.relu(layer)
main_f = relay.Function([data, w1, w2], layer)
mod = tvm.IRModule()
mod["main"] = main_f
return mod
def get_partitoned_mod(mod):
remove_bn_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
byoc_pass = tvm.transform.Sequential(
[
remove_bn_pass,
transform.AnnotateTarget("dnnl"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
return byoc_pass(mod)
ref_mod = get_net()
mod = get_partitoned_mod(ref_mod)
data = np.random.uniform(0, 1, ishape).astype(dtype)
w1 = np.random.uniform(0, 1, w1shape).astype(dtype)
w2 = np.random.uniform(0, 1, w2shape).astype(dtype)
check_result(
mod,
ref_mod,
{
"data": data,
"w1": w1,
"w2": w2,
},
(1, 64, 14, 14),
tol=1e-5,
)
def test_composite():
"""Test DNNL patterns and there composite functions."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
def conv2d_relu():
ishape = (1, 32, 14, 14)
w1shape = (32, 32, 3, 3)
# Composite function
in_1 = relay.var("in_1", shape=ishape, dtype=dtype)
in_2 = relay.var("in_2", shape=w1shape, dtype=dtype)
conv2d = relay.nn.conv2d(in_1, in_2, kernel_size=(3, 3), padding=(1, 1))
relu = relay.nn.relu(conv2d)
func = relay.Function([in_1, in_2], relu)
func = func.with_attr("Composite", "dnnl.conv2d_relu")
func = func.with_attr("PartitionedFromPattern", "nn.conv2d_nn.relu_")
# Partition function
arg_1 = relay.var("arg_1", shape=ishape, dtype=dtype)
arg_2 = relay.var("arg_2", shape=w1shape, dtype=dtype)
call = relay.Call(func, [arg_1, arg_2])
p_func = relay.Function([arg_1, arg_2], call)
p_func = set_func_attr(p_func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = p_func
mod = transform.InferType()(mod)
# Main function
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
main_func = relay.Function([data, weight], glb_var(data, weight))
mod["main"] = main_func
mod = transform.InferType()(mod)
# Reference module
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
conv2d = relay.nn.conv2d(data, weight, kernel_size=(3, 3), padding=(1, 1))
relu = relay.nn.relu(conv2d)
main_func = relay.Function([data, weight], relu)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_func
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w1_data}, (1, 32, 14, 14)
def conv2d_bias_relu():
ishape = (1, 32, 14, 14)
w1shape = (32, 32, 3, 3)
bshape = (32, 1, 1)
# Composite function
in_1 = relay.var("in_1", shape=ishape, dtype=dtype)
in_2 = relay.var("in_2", shape=w1shape, dtype=dtype)
in_3 = relay.var("in_3", shape=bshape, dtype=dtype)
conv2d = relay.nn.conv2d(in_1, in_2, kernel_size=(3, 3), padding=(1, 1))
add = relay.add(conv2d, in_3)
relu = relay.nn.relu(add)
func = relay.Function([in_1, in_2, in_3], relu)
func = func.with_attr("Composite", "dnnl.conv2d_bias_relu")
func = func.with_attr("PartitionedFromPattern", "nn.conv2d_add_nn.relu_")
# Partition function
arg_1 = relay.var("arg_1", shape=ishape, dtype=dtype)
arg_2 = relay.var("arg_2", shape=w1shape, dtype=dtype)
arg_3 = relay.var("arg_3", shape=bshape, dtype=dtype)
call = relay.Call(func, [arg_1, arg_2, arg_3])
p_func = relay.Function([arg_1, arg_2, arg_3], call)
p_func = set_func_attr(p_func, "dnnl", "tvmgen_default_dnnl_0")
glb_var = relay.GlobalVar("tvmgen_default_dnnl_0")
mod = tvm.IRModule()
mod[glb_var] = p_func
mod = transform.InferType()(mod)
# Main function
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
bias = relay.var("bias", shape=bshape, dtype=dtype)
main_func = relay.Function([data, weight, bias], glb_var(data, weight, bias))
mod["main"] = main_func
mod = transform.InferType()(mod)
# Reference module
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=w1shape, dtype=dtype)
bias = relay.var("bias", shape=bshape, dtype=dtype)
conv2d = relay.nn.conv2d(data, weight, kernel_size=(3, 3), padding=(1, 1))
add = relay.add(conv2d, bias)
relu = relay.nn.relu(add)
main_func = relay.Function([data, weight, bias], relu)
ref_mod = tvm.IRModule()
ref_mod["main"] = main_func
ref_mod = transform.InferType()(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, w1shape).astype(dtype)
b_data = np.random.uniform(0, 1, bshape).astype(dtype)
return mod, ref_mod, {"data": i_data, "weight": w1_data, "bias": b_data}, (1, 32, 14, 14)
for mod, ref_mod, input_maps, out_shape in [conv2d_relu(), conv2d_bias_relu()]:
check_result(mod, ref_mod, input_maps, out_shape, tol=1e-5)
def test_constant():
"""Test the subgraph with (var, const, ...) arguments."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, 32, 3, 3)
data = relay.var("data", shape=ishape, dtype=dtype)
weight = relay.var("weight", shape=wshape, dtype=dtype)
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
layer = relay.nn.conv2d(data=data, weight=weight, kernel_size=(3, 3), padding=(1, 1))
bn_output = relay.nn.batch_norm(layer, bn_gamma, bn_beta, bn_mmean, bn_mvar)
out = bn_output[0]
out = relay.nn.relu(out)
func = relay.Function(relay.analysis.free_vars(out), out)
ref_mod, params = tvm.relay.testing.create_workload(func)
ref_mod["main"] = bind_params_by_name(ref_mod["main"], params)
remove_bn_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
]
)
dnnl_patterns = get_pattern_table("dnnl")
composite_partition = tvm.transform.Sequential(
[
transform.MergeComposite(dnnl_patterns),
transform.AnnotateTarget("dnnl"),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
ref_mod = remove_bn_pass(ref_mod)
mod = composite_partition(ref_mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
check_result(mod, ref_mod, {"data": i_data}, (1, 32, 14, 14), tol=1e-5)
def test_partial_constant():
"""Test the subgraph with (const, var, const, var) arguments."""
if not tvm.get_global_func("runtime.DNNLJSONRuntimeCreate", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (10, 10)
in_1 = relay.var("in_1", shape=ishape, dtype=dtype)
in_2 = relay.var("in_2", shape=ishape, dtype=dtype)
in_3 = relay.var("in_3", shape=ishape, dtype=dtype)
in_4 = relay.var("in_4", shape=ishape, dtype=dtype)
add1 = relay.add(in_1, in_2)
add2 = relay.add(add1, in_3)
add3 = relay.add(add2, in_3)
add4 = relay.add(add3, in_3)
func = relay.Function([in_1, in_2, in_3, in_4], add4)
ref_mod = tvm.IRModule.from_expr(func)
ref_mod = relay.transform.InferType()(ref_mod)
data1 = np.random.uniform(0, 1, ishape).astype(dtype)
data3 = np.random.uniform(0, 1, ishape).astype(dtype)
params = {
"in_1": tvm.nd.array(data1, device=tvm.cpu(0)),
"in_3": tvm.nd.array(data3, device=tvm.cpu(0)),
}
ref_mod["main"] = bind_params_by_name(ref_mod["main"], params)
opt_pass = tvm.transform.Sequential(
[
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
transform.AnnotateTarget("dnnl"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
mod = opt_pass(ref_mod)
data2 = np.random.uniform(0, 1, ishape).astype(dtype)
data4 = np.random.uniform(0, 1, ishape).astype(dtype)
check_result(mod, ref_mod, {"in_2": data2, "in_4": data4}, (10, 10), tol=1e-5)
if __name__ == "__main__":
test_conv2d()
test_add()
test_multiply()
test_relu()
test_dense()
test_bn()
test_multiple_ops()
test_composite()
test_constant()
test_partial_constant()
| 26,272 | 35.138927 | 97 | py |
tvm | tvm-main/tests/python/relay/test_const.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
from tvm import relay
from tvm.relay.frontend.common import infer_type
from tvm.relay import op as _op
def test_const_dtype():
strides = (1, 1)
np_array = np.array(strides).astype("int32")
strides = _op.const(np_array, dtype="int64")
# strides needs to be autoconverted to int64 on Windows
assert infer_type(strides).checked_type.dtype == np.dtype(np.int64)
a = tvm.nd.array(np.random.randint(0, high=255, size=(2, 3), dtype="uint8"))
a = _op.const(a, dtype="uint8")
aa = a.data.numpy()
assert aa.dtype == np.dtype(np.uint8)
b = _op.const(1, dtype="int8")
bb = b.data.numpy()
assert bb.dtype == np.dtype(np.int8)
kshape = (3, 10, 3, 3)
w = relay.const(np.zeros(kshape, dtype="float32"))
assert w.data.numpy().dtype == np.dtype(np.float32)
| 1,621 | 35.044444 | 80 | py |
tvm | tvm-main/tests/python/relay/test_pipeline_executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import os
import time
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.relay import transform, build_module
from tvm.relay.testing import run_opt_pass
from tvm.contrib import graph_executor, pipeline_executor, pipeline_executor_build
from tvm._ffi import get_global_func
from tvm.contrib import cc as _cc
def graph_split(expr, split_conf, params=None):
"""Splitting the graph into a list of subgraphs"""
def get_dep_var(sub_var_dep):
return [var for var in sub_var_dep[len(sub_var_dep) - 1]["ref_nodes"]]
def parse_dependency(value, snode_dep, new_input_idx):
new_args = []
need_update = False
for var in value.args:
is_free_var = False
for dep in snode_dep[:-1]:
if var in dep["nodes"]:
# Mark the previous subgraph node as a dependency.
dep["nodes"][var] += 1
dep["ref_nodes"][var] = dep["nodes"][var]
# The var of this call is a free_var
is_free_var = True
# if the var of this call is a free_var, recreate it and give it a fixed input name.
if is_free_var:
need_update = True
new_args.append(relay.var(f"data_n_{new_input_idx}", var.checked_type))
new_input_idx += 1
else:
new_args.append(var)
# if the 'tvm.relay.expr.Call' has a free_var, recreate it with new name as 'data_n_*'.
if need_update:
value = tvm.relay.expr.Call(
value.op, new_args, value.attrs, value.type_args, value.span
)
return value, snode_dep, new_input_idx
def merge_constant_expr(constant_expr, expr):
# merge constant express with a express
if not isinstance(constant_expr.body, tvm.relay.expr.Let):
return tvm.relay.expr.Let(constant_expr.var, constant_expr.value, expr)
return tvm.relay.expr.Let(
constant_expr.var, constant_expr.value, merge_constant_expr(constant_expr.body, expr)
)
def _recursion(anf, pipeline_mods, split_conf, constant_expr):
# Enumurate all operators of compute graph, then split the compute graph into a group of
# subgraph.
nonlocal operator_index_map
nonlocal new_input_idx
nonlocal snode_dep
cur_node_dep = snode_dep[len(snode_dep) - 1]
if isinstance(anf, tvm.relay.Function):
return tvm.relay.Function(
anf.params,
_recursion(anf.body, pipeline_mods, split_conf, constant_expr),
anf.ret_type,
anf.type_params,
anf.attrs,
)
if isinstance(anf, tvm.relay.expr.Let):
value = anf.value
# record the constant expr to make sure all sugraphs can find correct constant.
if isinstance(value, tvm.relay.expr.Constant):
if not constant_expr:
constant_expr = tvm.relay.expr.Let(anf.var, value, anf.var)
else:
constant_expr = tvm.relay.expr.Let(anf.var, value, constant_expr)
if isinstance(value, tvm.relay.expr.Call):
new_args = []
# build current var list
cur_node_dep["nodes"][anf.var] = 0
# Get the dependency information of the nodes.
value, snode_dep, new_input_idx = parse_dependency(value, snode_dep, new_input_idx)
if isinstance(value.op, tvm.ir.Op):
if value.op.name in operator_index_map:
operator_index_map[value.op.name] += 1
else:
operator_index_map[value.op.name] = 0
split_operator_name = split_conf[0]["op_name"] if split_conf else ""
split_operator_index = split_conf[0]["op_index"] if split_conf else ""
# if a operator name and repeating count in the network match with the values
# of the 'split configuration', then this place is where we should do the
# graph splitting.
if (
split_conf
and split_operator_name in operator_index_map
and operator_index_map[split_operator_name] >= split_operator_index
):
# Do graph splitting.
split_conf.pop(0)
snode_dep.append({"nodes": {}, "ref_nodes": {}})
ann = _recursion(
anf.body,
pipeline_mods,
split_conf,
constant_expr,
)
snode_dep.pop()
dep_vars = get_dep_var(snode_dep)
# When the nodes of the current subgraph are the depedency node of another
# subgraph, we need to set them as the output of current subgraph.
body = relay.Tuple(dep_vars) if len(dep_vars) > 1 else anf.var
# when the operator of current subgraph uses previous subgraph constant
# as the argument of a "relay.expr.call", such constant may become a free
# varaible if the constant does not exist in the current subgraph.
# merge the previous constant with current subgraph to avoid such issue.
if constant_expr:
ann = merge_constant_expr(constant_expr, ann)
ann = run_opt_pass(ann, transform.ToGraphNormalForm())
mod = tvm.IRModule.from_expr(ann)
pipeline_mods.insert(0, mod)
# Return the last node of the current subgraph.
return tvm.relay.expr.Let(anf.var, value, body)
return tvm.relay.expr.Let(
anf.var,
value,
_recursion(anf.body, pipeline_mods, split_conf, constant_expr),
)
else:
return anf
snode_dep = [{"nodes": {}, "ref_nodes": {}}]
pipeline_mods = []
operator_index_map = {}
# Used to tracking new input which caused by graph splitting.
new_input_idx = 0
constant_expr = None
subgraph_split_conf = split_conf.copy()
# Binding the parameters.
if params:
expr = build_module.bind_params_by_name(expr, params)
anf = run_opt_pass(expr, transform.ToANormalForm())
anf = run_opt_pass(anf, transform.InferType())
ann = _recursion(
anf,
pipeline_mods,
subgraph_split_conf,
constant_expr,
)
ann = run_opt_pass(ann.body, transform.ToGraphNormalForm())
mod = tvm.IRModule.from_expr(ann)
pipeline_mods.insert(0, mod)
return pipeline_mods
def get_network():
# Get a list of modules representing subgraphs.
mods = []
dshape = (3, 3)
data = relay.var("data_0", relay.TensorType(dshape, "float32"))
data21 = relay.var("data_1", relay.TensorType(dshape, "float32"))
data_net1_output_1 = relay.var("data_0", relay.TensorType(dshape, "float32"))
data_net1_output_2 = relay.var("data_1", relay.TensorType(dshape, "float32"))
data_net2_output_1 = relay.var("data_0", relay.TensorType(dshape, "float32"))
mvalue1 = np.full((1), 1).astype("float32")
mvalue2 = np.full((1), 2).astype("float32")
mvalue3 = np.full((1), 3).astype("float32")
mv1 = relay.Constant(tvm.nd.array(mvalue1))
mv2 = relay.Constant(tvm.nd.array(mvalue2))
mv3 = relay.Constant(tvm.nd.array(mvalue3))
# There are three outputs in the first model.
net1_output1 = relay.add(data, mv1)
net1_output2 = relay.subtract(data, mv2)
net1_output3 = relay.concatenate((net1_output1, net1_output2), axis=0)
(net1_output3, _) = relay.split(net1_output3, indices_or_sections=2, axis=0)
net1_output3 = relay.add(net1_output3, mv2)
# The second model uses the output named net1_output3 of the first model as the first input,
# the second input of the second model is data21.
net2 = relay.add(net1_output3, mv2)
net2 = relay.add(net2, data21)
net2_output = relay.add(net2, mv3)
# The third model uses the output named net2_output of the second model as the first input
# and uses the output named net1_output2 of the first model as the second input.
net3 = relay.multiply(net2_output, mv3)
net3 = relay.add(net3, net1_output2)
return tvm.IRModule.from_expr(relay.Function([data, data21], relay.Tuple([net3]))), dshape
def get_split_mod():
mod, dshape = get_network()
split_conf = [{"op_name": "add", "op_index": 1}, {"op_name": "add", "op_index": 4}]
mods = graph_split(mod["main"], split_conf)
return mods, dshape
def get_mannual_mod():
# Get a list of modules representing subgraphs.
mods = []
dshape = (3, 3)
data = relay.var("data_0", relay.TensorType(dshape, "float32"))
data21 = relay.var("data_1", relay.TensorType(dshape, "float32"))
data_net1_output_1 = relay.var("data_0", relay.TensorType(dshape, "float32"))
data_net1_output_2 = relay.var("data_1", relay.TensorType(dshape, "float32"))
data_net2_output_1 = relay.var("data_0", relay.TensorType(dshape, "float32"))
mvalue1 = np.full((1), 1).astype("float32")
mvalue2 = np.full((1), 2).astype("float32")
mvalue3 = np.full((1), 3).astype("float32")
mv1 = relay.Constant(tvm.nd.array(mvalue1))
mv2 = relay.Constant(tvm.nd.array(mvalue2))
mv3 = relay.Constant(tvm.nd.array(mvalue3))
# There are three outputs in the first model.
net1_output1 = relay.add(data, mv1)
net1_output2 = relay.subtract(data, mv2)
net1_output3 = relay.multiply(data, mv3)
# The second model use output named net1_output1 of the first model as the first input,
# the second input of the second model is data21.
net2 = relay.add(data_net1_output_1, mv2)
net2 = relay.add(net2, data21)
net2_output = relay.add(net2, mv3)
# The third model use the output named net2_output of the second model as the first input
# and use the output named net1_output2 of the first model as the second input.
net3 = relay.multiply(data_net2_output_1, mv3)
net3 = relay.add(net3, data_net1_output_2)
mods.append(
tvm.IRModule.from_expr(
relay.Function([data], relay.Tuple([net1_output1, net1_output2, net1_output3]))
)
)
mods.append(tvm.IRModule.from_expr(relay.Function([data_net1_output_1, data21], net2_output)))
mods.append(
tvm.IRModule.from_expr(relay.Function([data_net1_output_2, data_net2_output_1], net3))
)
return mods, dshape
def get_manual_conf(mods, target):
# This function is used to generate manual pipeline configuration.
mod_config = {}
# The third output is the final output, the second output is for mod3, the first output
# is for mod2 input.
pipe_config1 = {
"mod_idx": 0,
"cpu_affinity": "0",
"output": [
{"output_idx": 0, "dependencies": [{"mod_idx": 1, "input_name": "data_n_0"}]},
{"output_idx": 1, "dependencies": [{"mod_idx": 2, "input_name": "data_n_2"}]},
],
}
mod_config[mods[0]] = {
"pipeline": pipe_config1,
"target_host": None,
"mod_name": "default",
"build": None,
"params": None,
"target": target[0],
"fcompile": _cc.create_shared,
"dev": target[1],
}
pipe_config2 = {
"mod_idx": 1,
"cpu_affinity": "0",
"output": [
{"output_idx": 0, "dependencies": [{"mod_idx": 2, "input_name": "data_n_1"}]},
],
}
mod_config[mods[1]] = {
"pipeline": pipe_config2,
"target_host": None,
"mod_name": "default",
"build": None,
"params": None,
"target": "llvm",
"fcompile": None,
"dev": tvm.cpu(0),
}
pipe_config3 = {
"mod_idx": 2,
"cpu_affinity": "0",
"output": [{"output_idx": 0, "dependencies": [{"global_output_index": 0}]}],
}
mod_config[mods[2]] = {
"pipeline": pipe_config3,
"target_host": None,
"mod_name": "default",
"build": None,
"params": None,
"target": "llvm",
"fcompile": None,
"dev": tvm.cpu(0),
}
return mod_config
def recreate_parameters(mod):
# Get the binding parameters from a module, then create the same parameters with different data.
# This function is used to test the "parameter" connection.
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, "llvm")
mod_customized_params = {}
for key, value in lib.params.items():
new_value = value.numpy() + np.full(value.shape, 10).astype(value.dtype)
mod_customized_params[key] = tvm.nd.array(new_value)
return mod_customized_params, mod
def run_modules(
mod_configs,
dev,
target,
global_input_name,
global_input_data,
mod_set_input,
input_name,
input_data,
params_mod=None,
params=None,
):
# Running modules in serialized model. The returnning data are used to verify the pipeline
# executor result.
mod_input = {}
final_output = {}
idx = 0
for mod in mod_configs:
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target)
m = graph_executor.GraphModule(lib["default"](dev))
# Getting the input data then setting the input data into the module.
if idx in mod_input:
for input in mod_input[idx]:
input = mod_input[idx][input]
m.set_input(input["index"], input["data"])
else:
m.set_input(global_input_name, global_input_data)
# Setting the "input_data" into the module.
if mod == mod_set_input:
m.set_input(input_name, input_data)
# If the module is "params_mod" then setting the parameters to this module.
if params_mod == mod:
m.set_input(None, None, **params)
m.run()
n = m.get_num_outputs()
# Setting current output data as the input of next module.
mconfig = mod_configs[mod]
for output in mconfig["pipeline"]["output"]:
output_data = m.get_output(output["output_idx"]).numpy()
for dep in output["dependencies"]:
is_global = False
if "global_output_index" in dep:
is_global = True
name = dep["global_output_index"]
else:
mod_idx = dep["mod_idx"]
name = dep["input_name"]
if is_global:
final_output[name] = output_data
else:
if mod_idx in mod_input:
mod_input[mod_idx][name] = {"index": name, "data": output_data}
else:
mod_input[mod_idx] = {name: {"index": name, "data": output_data}}
idx = idx + 1
return final_output
def reset_cpu_affinity(affinity):
# Restore the CPU affinity into the default value.
config_threadpool = get_global_func("runtime.config_threadpool")
config_threadpool(-2, 0)
os.sched_setaffinity(0, affinity)
def test_pipe_runtime_error_check():
# This function is used to trigger runtime error by applying wrong logic.
if pipeline_executor_build.pipeline_executor_build_enabled():
# Get three pipeline modules here.
(mod1, mod2, mod3), dshape = get_split_mod()
# The input or output name is illegal and expects a runtime error.
pipe_error = pipeline_executor_build.PipelineConfig()
with pytest.raises(RuntimeError):
pipe_error[mod1]["output"][9]
with pytest.raises(RuntimeError):
pipe_error[mod1]["input"]["data_9"]
# The module connection will cause a cycle in DAG and expects runtime error.
with pytest.raises(RuntimeError):
pipe_error[mod1]["output"][0].connect(pipe_error[mod2]["input"]["data_0"])
pipe_error[mod2]["output"][0].connect(pipe_error[mod1]["input"]["data_0"])
# The module connection is illegal and expects runtime error.
with pytest.raises(RuntimeError):
pipe_error[mod1]["output"][0].connect(pipe_error[mod1]["input"]["data_0"])
with pytest.raises(RuntimeError):
pipe_error[mod1]["input"]["data_0"].connect(pipe_error[mod1]["input"]["data_0"])
with pytest.raises(RuntimeError):
pipe_error[mod1]["input"]["data_0"].connect(pipe_error[mod2]["input"]["data_0"])
with pytest.raises(RuntimeError):
pipe_error[mod1]["output"][0].connect(pipe_error["input"]["data_0"])
with pytest.raises(RuntimeError):
pipe_error["input"]["data_0"].connect(pipe_error[mod1]["output"][0])
with pytest.raises(RuntimeError):
pipe_error["output"]["0"].connect(pipe_error[mod1]["output"][0])
# Create pipeline executor to check the executor runtime errors.
pipe_config = pipeline_executor_build.PipelineConfig()
pipe_config[mod1].target = "llvm"
pipe_config[mod1].dev = tvm.cpu(0)
pipe_config["param_group"]["param_0"].connect(pipe_config[mod1]["param"])
pipe_config[mod1]["output"][0].connect(pipe_config["output"]["0"])
# Build and create a pipeline module.
with tvm.transform.PassContext(opt_level=3):
pipeline_mod_factory = pipeline_executor_build.build(pipe_config)
pipeline_module = pipeline_executor.PipelineModule(pipeline_mod_factory)
customized_parameters, _ = recreate_parameters(mod1)
# Checking the pipeline executor runtime errors.
with pytest.raises(RuntimeError):
pipeline_module.set_params("param_0", None)
with pytest.raises(RuntimeError):
pipeline_module.set_params("param_1", customized_parameters)
def test_pipeline():
if pipeline_executor_build.pipeline_executor_build_enabled():
target_list = tvm.testing.enabled_targets()
for target in target_list:
affinity = os.sched_getaffinity(0)
# Get the three pipeline modules here.
(mod1, mod2, mod3), dshape = get_split_mod()
# Prepare batch data for pipeline computation.
datas = []
for i in range(5):
datas.append(np.full(dshape, 3 + i).astype("float32"))
pipe_config = pipeline_executor_build.PipelineConfig()
customized_parameters, customized_parameters_mod = recreate_parameters(mod1)
assert customized_parameters_mod == mod1
# The global parameters group named "param_0" will be connected to "mod1" as parameters.
pipe_config["param_group"]["param_0"].connect(pipe_config[mod1]["param"])
# The pipeline input named "data_a" will be connected to a input named "data_0"
# of mod1.
pipe_config["input"]["data_a"].connect(pipe_config[mod1]["input"]["data_0"])
# The pipeline Input named "data_b" will be connected to a input named "data_1"
# of mod2.
pipe_config["input"]["data_b"].connect(pipe_config[mod2]["input"]["data_1"])
# The mod1 output[0] will be connected to a input named "data_n_0" of mod2.
pipe_config[mod1]["output"][0].connect(pipe_config[mod2]["input"]["data_n_0"])
# The mod1 output[1] will be connected to a input named "data_n_2" of mod3.
pipe_config[mod1]["output"][1].connect(pipe_config[mod3]["input"]["data_n_2"])
# The mod2 output[2] will be connected to a input named "data_n_1" of mod3.
pipe_config[mod2]["output"][0].connect(pipe_config[mod3]["input"]["data_n_1"])
# The mod3 output[0] will be connected to pipeline output[0].
pipe_config[mod3]["output"][0].connect(pipe_config["output"]["0"])
# Print configuration (print(pipe_config)), the result looks like following.
#
# Params
# |param_0: mod0:param
#
# Inputs
# |data_a: mod0:data_0
# |data_b: mod1:data_1
#
# output
# |output(0) : mod2.output(0)
#
# connections
# |mod0.output(0)-> mod1.data_n_0
# |mod0.output(1)-> mod2.data_n_2
# |mod1.output(0)-> mod2.data_n_1
# Set other parameters.
pipe_config[mod1].target = target[0]
pipe_config[mod1].dev = target[1]
pipe_config[mod1].cpu_affinity = "0"
pipe_config[mod1].fcompile = _cc.create_shared
pipe_config[mod2].target = "llvm"
pipe_config[mod2].dev = tvm.cpu(0)
pipe_config[mod2].cpu_affinity = "0"
pipe_config[mod3].target = "llvm"
pipe_config[mod3].dev = tvm.cpu(0)
pipe_config[mod3].cpu_affinity = "0"
# Checking the configuration of modules dependency.
mconfig = pipe_config.get_config()
assert mconfig["module_connection"] == get_manual_conf([mod1, mod2, mod3], target)
# Build and create a pipeline module.
with tvm.transform.PassContext(opt_level=3):
pipeline_mod_factory = pipeline_executor_build.build(pipe_config)
# Export the parameter configuration to a file.
directory_path = tvm.contrib.utils.tempdir().temp_dir
# If the directory does not exist, create it.
if not os.path.exists(directory_path):
os.makedirs(directory_path)
config_file_name = pipeline_mod_factory.export_library(directory_path)
# Use the output of build to create and initialize PipelineModule.
pipeline_module = pipeline_executor.PipelineModule(pipeline_mod_factory)
assert pipeline_module
# Use the import function to create and initialize PipelineModule.
pipeline_module_test = pipeline_executor.PipelineModule.load_library(config_file_name)
assert pipeline_module_test.num_outputs == 1
input_map = pipeline_module_test.get_input_pipeline_map("data_b")
assert input_map[0] == "1" and input_map[1] == "data_1"
input_map = pipeline_module_test.get_input_pipeline_map("data_a")
assert input_map[0] == "0" and input_map[1] == "data_0"
module_index = pipeline_module_test.get_params_group_pipeline_map("param_0")
assert module_index == 0
# Using the parameters group name to set parameters.
pipeline_module_test.set_params("param_0", customized_parameters)
normal_outputs = []
for round in range(0, len(datas)):
data = datas[round]
# Getting the result without setting customized parameters.
wrong_output = run_modules(
mconfig["module_connection"],
tvm.cpu(),
"llvm",
"data_0",
data,
mod2,
"data_1",
data,
)
# Getting the result with setting customized parameters.
normal_output = run_modules(
mconfig["module_connection"],
tvm.cpu(),
"llvm",
"data_0",
data,
mod2,
"data_1",
data,
customized_parameters_mod,
customized_parameters,
)
# Appending the normal output into the list in order to do future correctness
# checking.
normal_outputs.append(normal_output)
# Setting the input data into the pipeline executor.
pipeline_module_test.set_input("data_a", tvm.nd.array(data))
pipeline_module_test.set_input("data_b", tvm.nd.array(data))
input_map = pipeline_module_test.get_input_pipeline_map("data_a")
# Checking whether the input setting of the first runtime is successful.
# The input of the rest of runtime will go into a queue and we can not check
# these input data here.
if input_map[0] == "0":
input_data = pipeline_module_test.get_input("data_a")
tvm.testing.assert_allclose(data, input_data.numpy())
assert pipeline_module_test.num_inputs == 2
# Running the pipeline executor in the pipeline mode.
pipeline_module_test.run()
for k in range(0, len(datas)):
statistic_time = 0
outputs = pipeline_module_test.get_output()
while len(outputs) == 0:
outputs = pipeline_module_test.get_output()
statistic_time = statistic_time + 1
# Setting the timeout to 10 seconds.
assert statistic_time < 5
time.sleep(1)
for i in range(len(outputs)):
tvm.testing.assert_allclose(normal_outputs[k][i], outputs[i].numpy())
assert not (normal_output[i] == wrong_output[i]).all()
assert pipeline_module_test.num_executing_pipeline == round + 1
# Reset the cpu affinity after a test.
reset_cpu_affinity(affinity)
if __name__ == "__main__":
tvm.testing.main()
| 26,791 | 41.66242 | 100 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_concatenate.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import relay
from tvm.contrib import graph_executor
import tvm.topi.testing
def test_same_io_qnn_params():
data_dtype = "int32"
axis = 0
x_data = np.arange(-32, 32, 1).reshape(1, 64).astype(data_dtype)
y_data = np.arange(-64, 64, 2).reshape(1, 64).astype(data_dtype)
x_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
y_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
zero = relay.const(0, "int32")
x = relay.var("x", shape=(1, 64), dtype=data_dtype)
y = relay.var("y", shape=(1, 64), dtype=data_dtype)
z = relay.qnn.op.concatenate(
(x, y),
input_scales=(x_scale, y_scale),
input_zero_points=(zero, zero),
output_scale=y_scale,
output_zero_point=zero,
axis=axis,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
golden_output = np.concatenate((x_data, y_data), axis=axis)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_different_io_qnn_params():
data_dtype = "int32"
axis = 0
x_data = np.arange(-32, 32, 1).reshape(1, 64).astype(data_dtype)
y_data = np.arange(-64, 64, 2).reshape(1, 64).astype(data_dtype)
x_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
y_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
x_zero_point = relay.const(3, "int32")
y_zero_point = relay.const(4, "int32")
x = relay.var("x", shape=(1, 64), dtype=data_dtype)
y = relay.var("y", shape=(1, 64), dtype=data_dtype)
z = relay.qnn.op.concatenate(
(x, y),
input_scales=(x_scale, y_scale),
input_zero_points=(x_zero_point, y_zero_point),
output_scale=y_scale,
output_zero_point=relay.const(1, "int32"),
axis=axis,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
golden_output = np.concatenate((x_data - 2, y_data - 3), axis=axis)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_few_same_io_qnn_params():
data_dtype = "int32"
axis = 0
x_data = np.arange(-32, 32, 1).reshape(1, 64).astype(data_dtype)
y_data = np.arange(-64, 64, 2).reshape(1, 64).astype(data_dtype)
x_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
y_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
x_zero_point = relay.const(0, "int32")
y_zero_point = relay.const(1, "int32")
x = relay.var("x", shape=(1, 64), dtype=data_dtype)
y = relay.var("y", shape=(1, 64), dtype=data_dtype)
z = relay.qnn.op.concatenate(
(x, y),
input_scales=(x_scale, y_scale),
input_zero_points=(x_zero_point, y_zero_point),
output_scale=y_scale,
output_zero_point=relay.const(1, "int32"),
axis=axis,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
golden_output = np.concatenate((x_data + 1, y_data), axis=axis)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_same_i_qnn_params():
data_dtype = "int32"
axis = 0
x_data = np.arange(-32, 32, 1).reshape(1, 64).astype(data_dtype)
y_data = np.arange(-64, 64, 2).reshape(1, 64).astype(data_dtype)
x_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
y_scale = relay.const((62 + 64) / (np.power(2, 32) - 1.0), "float32")
x_zero_point = relay.const(0, "int32")
y_zero_point = relay.const(0, "int32")
x = relay.var("x", shape=(1, 64), dtype=data_dtype)
y = relay.var("y", shape=(1, 64), dtype=data_dtype)
z = relay.qnn.op.concatenate(
(x, y),
input_scales=(x_scale, y_scale),
input_zero_points=(x_zero_point, y_zero_point),
output_scale=y_scale,
output_zero_point=relay.const(1, "int32"),
axis=axis,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
golden_output = np.concatenate((x_data + 1, y_data + 1), axis=axis)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_call_input():
# This tests the case where the input to concatenate is not explicitly a
# tuple node but is instead a call node.
x_data = np.ones(shape=(64,)).astype("uint8")
x = relay.var("x", shape=(64,), dtype="uint8")
x_scale = relay.const(1, "float32")
y_scale = relay.const(1, "float32")
x_zero_point = relay.const(0, "int32")
y_zero_point = relay.const(0, "int32")
tup = relay.split(x, 2, axis=0)
z = relay.qnn.op.concatenate(
tup,
input_scales=(x_scale, y_scale),
input_zero_points=(x_zero_point, y_zero_point),
output_scale=y_scale,
output_zero_point=relay.const(0, "int32"),
axis=0,
)
func = relay.Function([x], z)
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(x_data)
np.testing.assert_equal(op_res.numpy(), x_data)
if __name__ == "__main__":
test_call_input()
test_same_io_qnn_params()
test_different_io_qnn_params()
test_few_same_io_qnn_params()
test_same_i_qnn_params()
| 6,943 | 33.72 | 100 | py |
tvm | tvm-main/tests/python/relay/test_executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tvm import TVMError
from tvm.relay.backend import Executor
def test_create_executor():
executor = Executor("aot")
assert executor.name == "aot"
def test_create_executor_with_options():
executor = Executor("aot", {"interface-api": "c"})
assert executor.name == "aot"
assert executor["interface-api"] == "c"
def test_create_executor_with_default():
executor = Executor("graph")
assert not executor["link-params"]
def test_attr_check():
executor = Executor("aot", {"interface-api": "c"})
assert "woof" not in executor
assert "interface-api" in executor
def test_create_executor_not_found():
with pytest.raises(TVMError, match='Executor "woof" is not defined'):
Executor("woof", {})
def test_create_executor_attr_not_found():
with pytest.raises(TVMError, match='Attribute "woof" is not available on this Executor'):
Executor("aot", {"woof": "bark"})
def test_create_executor_attr_type_incorrect():
with pytest.raises(
TVMError,
match='Attribute "interface-api" should have type "runtime.String"'
' but instead found "IntImm"',
):
Executor("aot", {"interface-api": True})
def test_list_executors():
assert "aot" in Executor.list_registered()
@pytest.mark.parametrize("executor", [Executor("aot").name, "aot"])
def test_list_executor_options(executor):
aot_options = Executor.list_registered_options(executor)
assert "interface-api" in aot_options
assert aot_options["interface-api"] == "runtime.String"
def test_list_executor_options_not_found():
with pytest.raises(TVMError, match='Executor "woof" is not defined'):
Executor.list_registered_options("woof")
| 2,511 | 30.797468 | 93 | py |
tvm | tvm-main/tests/python/relay/test_op_level10.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level10 operator test cases.
"""
import sys
import pytest
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import relay, te, topi
from tvm.relay import transform
from tvm.relay.testing import run_infer_type
executor_kind = tvm.testing.parameter("graph", "vm")
@tvm.testing.uses_gpu
def test_checkpoint(executor_kind):
dtype = "float32"
xs = [relay.var("x{}".format(i), dtype) for i in range(4)]
f = relay.multiply(relay.add(xs[0], xs[1]), relay.add(xs[2], xs[3]))
f_checkpoint = relay.annotation.checkpoint(f)
func, func_checkpoint = relay.Function(xs, f), relay.Function(xs, f_checkpoint)
f, f_checkpoint = run_infer_type(func), run_infer_type(func_checkpoint)
assert f.checked_type == f_checkpoint.checked_type
inputs = [np.random.uniform() for _ in range(len(xs))]
for target, dev in tvm.testing.enabled_targets():
f_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(f)(*inputs)
f_checkpoint_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
f_checkpoint
)(*inputs)
tvm.testing.assert_allclose(f_res.numpy(), f_checkpoint_res.numpy(), 0, 0)
def test_checkpoint_alpha_equal():
xs = [relay.var("x{}".format(i), relay.TensorType((1,), "float32")) for i in range(4)]
f = relay.Function(
xs,
relay.annotation.checkpoint(
relay.multiply(relay.add(xs[0], xs[1]), relay.add(xs[2], xs[3]))
),
)
df = transform.gradient(run_infer_type(f))
# run PE and DCE
with tvm.transform.PassContext(opt_level=3):
# The expected output assumes DCE can elide 'dead writes' to references. At the time this unit test was
# written DCE would elide all writes, which though unsound in general happens to work for this case. Preserve
# that legacy behaviour here using 'ignore_impurity=True'.
# TODO(mbs): Revisit once DCE supports dead reference writes.
passes = [
transform.PartialEvaluate(),
transform.DeadCodeElimination(inline_once=True, ignore_impurity=True),
]
mod = tvm.transform.Sequential(passes)(tvm.IRModule.from_expr(df))
df = mod["main"]
df_parsed = tvm.relay.parse_expr(
"""
#[version = "0.0.5"]
fn (%x: Tensor[(1), float32], %y: Tensor[(1), float32],
%z: Tensor[(1), float32], %w: Tensor[(1), float32])
-> (Tensor[(1), float32],
(Tensor[(1), float32], Tensor[(1), float32],
Tensor[(1), float32], Tensor[(1), float32])) {
%0 = add(%x, %y);
%1 = add(%z, %w);
let %x1: Tensor[(1), float32] = multiply(%0, %1);
let %x2: Tensor[(1), float32] = ones_like(%x1);
let %x3: Tensor[(1), float32] = add(%x, %y);
let %x4: Tensor[(1), float32] = add(%z, %w);
%2 = zeros_like(%x3);
%3 = multiply(%x2, %x4);
%4 = collapse_sum_like(%3, %x3);
let %x5: Tensor[(1), float32] = add(%2, %4);
%5 = zeros_like(%x4);
%6 = multiply(%x2, %x3);
%7 = collapse_sum_like(%6, %x4);
let %x6: Tensor[(1), float32] = add(%5, %7);
%8 = zeros_like(%x);
%9 = collapse_sum_like(%x5, %x);
%10 = add(%8, %9);
%11 = zeros_like(%y);
%12 = collapse_sum_like(%x5, %y);
%13 = add(%11, %12);
%14 = zeros_like(%z);
%15 = collapse_sum_like(%x6, %z);
%16 = add(%14, %15);
%17 = zeros_like(%w);
%18 = collapse_sum_like(%x6, %w);
%19 = add(%17, %18);
%20 = (%10, %13, %16, %19);
(%x1, %20)
}
"""
)
tvm.ir.assert_structural_equal(df, df_parsed)
def test_checkpoint_alpha_equal_tuple():
xs = [relay.var("x{}".format(i), relay.TensorType((1,), "float32")) for i in range(4)]
f = relay.Function(
xs,
relay.annotation.checkpoint(
relay.Tuple([relay.add(xs[0], xs[1]), relay.add(xs[2], xs[3])])
),
)
df = transform.gradient(run_infer_type(f))
# run PE and DCE
with tvm.transform.PassContext(opt_level=3):
# See comment in test_checkpoint_alpha_equal above.
# TODO(mbs): Revisit once DCE supports dead reference writes.
passes = [
transform.PartialEvaluate(),
transform.DeadCodeElimination(inline_once=True, ignore_impurity=True),
]
mod = tvm.transform.Sequential(passes)(tvm.IRModule.from_expr(df))
df = mod["main"]
df_parsed = tvm.relay.parse_expr(
"""
#[version = "0.0.5"]
fn (%x: Tensor[(1), float32], %y: Tensor[(1), float32],
%z: Tensor[(1), float32], %w: Tensor[(1), float32])
-> ((Tensor[(1), float32], Tensor[(1), float32]),
(Tensor[(1), float32], Tensor[(1), float32],
Tensor[(1), float32], Tensor[(1), float32])) {
let %x1: Tensor[(1), float32] = add(%x, %y) /* ty=Tensor[(1), float32] */;
let %x2: Tensor[(1), float32] = add(%z, %w) /* ty=Tensor[(1), float32] */;
let %x3: Tensor[(1), float32] = zeros_like(%x2) /* ty=Tensor[(1), float32] */;
let %x4: Tensor[(1), float32] = ones_like(%x1) /* ty=Tensor[(1), float32] */;
%0 = (%x1, %x2);
%1 = zeros_like(%x) /* ty=Tensor[(1), float32] */;
%2 = collapse_sum_like(%x4, %x) /* ty=Tensor[(1), float32] */;
%3 = add(%1, %2) /* ty=Tensor[(1), float32] */;
%4 = zeros_like(%y) /* ty=Tensor[(1), float32] */;
%5 = collapse_sum_like(%x4, %y) /* ty=Tensor[(1), float32] */;
%6 = add(%4, %5) /* ty=Tensor[(1), float32] */;
%7 = zeros_like(%z) /* ty=Tensor[(1), float32] */;
%8 = collapse_sum_like(%x3, %z) /* ty=Tensor[(1), float32] */;
%9 = add(%7, %8) /* ty=Tensor[(1), float32] */;
%10 = zeros_like(%w) /* ty=Tensor[(1), float32] */;
%11 = collapse_sum_like(%x3, %w) /* ty=Tensor[(1), float32] */;
%12 = add(%10, %11) /* ty=Tensor[(1), float32] */;
%13 = (%3, %6, %9, %12);
(%0, %13)
}
"""
)
tvm.ir.assert_structural_equal(df, df_parsed)
@tvm.testing.uses_gpu
def test_collapse_sum_like(executor_kind):
shape = (3, 4, 5, 6)
shape_like = (4, 5, 6)
dtype = "float32"
x = relay.Var("x", relay.ty.TensorType(shape, dtype))
y = relay.Var("y", relay.ty.TensorType(shape_like, dtype))
z = relay.collapse_sum_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)
func = relay.Function([x, y], z)
x = np.random.uniform(size=shape).astype(dtype)
y = np.random.uniform(size=shape_like).astype(dtype)
ref_res = np.sum(x, 0)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x, y
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_collapse_sum_to(executor_kind):
shape = (3, 4, 5, 6)
shape_to = (4, 5, 6)
dtype = "float32"
x = relay.Var("x", relay.ty.TensorType(shape, dtype))
z = relay.collapse_sum_to(x, shape_to)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(shape_to, dtype)
func = relay.Function([x], z)
x = np.random.uniform(size=shape).astype(dtype)
ref_res = np.sum(x, 0)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_broadcast_to(executor_kind):
shape = (4, 1, 6)
shape_like = (3, 4, 5, 6)
dtype = "float32"
x = relay.Var("x", relay.ty.TensorType(shape, dtype))
z = relay.broadcast_to(x, shape=shape_like)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)
func = relay.Function([x], z)
x = np.random.uniform(size=shape).astype(dtype)
ref_res = np.broadcast_to(x, shape_like)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_broadcast_to_const_shape_int64(executor_kind):
shape_like = relay.const(np.array([1, 5]), dtype="int64")
x = relay.var("x", shape=(1,), dtype="int64")
z = relay.broadcast_to(x, shape=shape_like)
z = relay.sum(z, axis=0)
f = relay.Function([x], z)
x = np.random.randint(10, size=(1,), dtype="int64")
ref_res = np.broadcast_to(x, (5,))
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(f)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
def test_broadcast_concat_shape_int64(executor_kind):
x_shape = (1, 2, 1, 1)
broadcast_shape = [1, 2, 2, 1]
x = relay.var("data", relay.TensorType(x_shape, "float32"))
broadcast_to = relay.op.broadcast_to(x, relay.const(broadcast_shape, dtype="int64"))
concate = relay.op.concatenate((broadcast_to,), axis=0)
f = relay.Function([x], concate)
x = np.zeros(x_shape).astype("float32")
ref_res = np.concatenate((np.broadcast_to(x, broadcast_shape),), axis=0)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(f)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
def test_broadcast_pool2d_shape_int64(executor_kind):
x_shape = (1, 3, 32, 32)
out_shape = (2, 3, 32, 32)
x = relay.var("data", shape=x_shape, dtype="float32")
broadcast_to = relay.broadcast_to(x, shape=relay.const([2, 3, 32, 32], dtype="int64"))
pool2d = relay.nn.max_pool2d(broadcast_to, pool_size=(3, 3), padding=(1, 1, 1, 1))
sub = relay.subtract(broadcast_to, pool2d)
f = relay.Function([x], sub)
x = np.ones(x_shape).astype("float32")
ref_res = np.zeros(out_shape).astype("float32")
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(f)(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
@tvm.testing.uses_gpu
def test_broadcast_to_like(executor_kind):
shape = (4, 1, 6)
shape_like = (3, 4, 5, 6)
dtype = "float32"
x = relay.Var("x", relay.ty.TensorType(shape, dtype))
y = relay.Var("y", relay.ty.TensorType(shape_like, dtype))
z = relay.broadcast_to_like(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(shape_like, dtype)
func = relay.Function([x, y], z)
x = np.random.uniform(size=shape).astype(dtype)
y = np.random.uniform(size=shape_like).astype(dtype)
ref_res = np.broadcast_to(x, shape_like)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x, y
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def np_slice_like(np_data, np_shape_like, axis=None):
begin_idx = [0 for _ in np_data.shape]
end_idx = list(np_data.shape)
if axis:
for i in axis:
if i < 0:
i = len(np_data.shape) + i
end_idx[i] = np_shape_like.shape[i]
else:
for i in range(len(np_data.shape)):
if i < len(np_shape_like.shape):
end_idx[i] = np_shape_like.shape[i]
slice_idx = []
for b, e in zip(begin_idx, end_idx):
slice_idx.append(slice(b, e))
np_result = np_data[tuple(slice_idx)]
return np_result
def verify_slice_like(executor_kind, data, slice_like, axes, output, dtype="float32"):
x = relay.var("data", relay.TensorType(data, dtype))
y = relay.var("slice_like", relay.TensorType(slice_like, dtype))
z = relay.slice_like(x, y, axes)
zz = run_infer_type(z)
if axes:
assert "axes" in z.astext()
assert zz.checked_type == relay.ty.TensorType(output, dtype)
if all(isinstance(v, int) == 0 for v in data) or all(
isinstance(v, int) == 0 for v in slice_like
):
return
func = relay.Function([x, y], z)
x_data = np.random.uniform(size=data).astype(dtype)
y_data = np.random.uniform(size=slice_like).astype(dtype)
ref_res = np_slice_like(x_data, y_data, axes)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_slice_like(executor_kind):
d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4")
verify_slice_like(
executor_kind, data=(d1, d2, d3), slice_like=(1, 2, 3), axes=None, output=(1, 2, 3)
)
verify_slice_like(
executor_kind, data=(1, 2, 3), slice_like=(d1, d2, d3), axes=None, output=(d1, d2, d3)
)
verify_slice_like(
executor_kind, data=(d2, d3, d4), slice_like=(d1, d2, d3), axes=(1, 2), output=(d2, d2, d3)
)
verify_slice_like(
executor_kind, data=(3, 4, 5), slice_like=(1, 2, 3), axes=None, output=(1, 2, 3)
)
verify_slice_like(executor_kind, data=(3, 4, 5), slice_like=(1, 2), axes=None, output=(1, 2, 5))
verify_slice_like(
executor_kind, data=(3, 4, 5), slice_like=(1, 2, 3), axes=(1, 2), output=(3, 2, 3)
)
verify_slice_like(
executor_kind, data=(3, 4, 5), slice_like=(1, 2, 3), axes=(-1, -3), output=(1, 4, 3)
)
verify_slice_like(
executor_kind,
data=(1, 3, 224, 224),
slice_like=(1, 3, 112, 112),
axes=(2, 3),
output=(1, 3, 112, 112),
)
@tvm.testing.uses_gpu
def test_reverse_reshape(executor_kind):
def verify_reverse_reshape(executor_kind, shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.reverse_reshape(x, newshape=newshape)
zz = run_infer_type(z)
assert "newshape=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(oshape, "float32")
func = relay.Function([x], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = np.reshape(x_data, oshape)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_reverse_reshape(executor_kind, (2, 3, 4), (4, 0, 2), (4, 3, 2))
verify_reverse_reshape(executor_kind, (2, 3, 4), (2, 0, 0), (2, 3, 4))
verify_reverse_reshape(executor_kind, (2, 3, 4), (0, -1), (3, 8))
verify_reverse_reshape(executor_kind, (2, 3, 4), (-1, 0), (6, 4))
verify_reverse_reshape(executor_kind, (2, 3, 4), (0, -3), (2, 12))
def verify_batch_matmul_with_inputs(
executor_kind, x, y, x_np, y_np, out_shape, dtype="float32", trans_x=False, trans_y=True
):
z = relay.nn.batch_matmul(x, y, transpose_a=trans_x, transpose_b=trans_y)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(out_shape, dtype)
input_vars = relay.analysis.free_vars(z)
func = relay.Function(input_vars, z)
z_np = tvm.topi.testing.batch_matmul(x_np, y_np, trans_x=trans_x, trans_y=trans_y)
for target, dev in tvm.testing.enabled_targets():
if len(input_vars) == 2:
z = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_np, y_np
)
else:
z = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x_np)
tvm.testing.assert_allclose(z.numpy(), z_np, rtol=1e-5, atol=1e-5)
def verify_batch_matmul(
executor_kind, x_shape, y_shape, out_shape, dtype="float32", trans_x=False, trans_y=True
):
x = relay.var("x", relay.TensorType(x_shape, dtype))
y = relay.var("y", relay.TensorType(y_shape, dtype))
x_np = np.random.uniform(size=x_shape).astype(dtype)
y_np = np.random.uniform(size=y_shape).astype(dtype)
verify_batch_matmul_with_inputs(
executor_kind, x, y, x_np, y_np, out_shape, dtype, trans_x, trans_y
)
@tvm.testing.uses_gpu
def test_batch_matmul(executor_kind):
b, m, n, k = te.size_var("b"), te.size_var("m"), te.size_var("n"), te.size_var("k")
x = relay.var("x", relay.TensorType((b, m, k), "float32"))
y = relay.var("y", relay.TensorType((b, n, k), "float32"))
z = relay.nn.batch_matmul(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((b, m, n), "float32")
verify_batch_matmul(
executor_kind, (1, 16, 32), (1, 16, 32), (1, 16, 16), trans_x=False, trans_y=True
)
verify_batch_matmul(
executor_kind, (5, 16, 32), (5, 16, 32), (5, 16, 16), trans_x=False, trans_y=True
)
verify_batch_matmul(
executor_kind, (5, 16, 32), (5, 20, 32), (5, 16, 20), trans_x=False, trans_y=True
)
verify_batch_matmul(
executor_kind, (30, 16, 32), (30, 20, 32), (30, 16, 20), trans_x=False, trans_y=True
)
verify_batch_matmul(
executor_kind, (1, 32, 16), (1, 16, 32), (1, 16, 16), trans_x=True, trans_y=True
)
verify_batch_matmul(
executor_kind, (5, 16, 32), (5, 32, 16), (5, 16, 16), trans_x=False, trans_y=False
)
verify_batch_matmul(
executor_kind, (5, 32, 16), (5, 32, 20), (5, 16, 20), trans_x=True, trans_y=False
)
x_np = np.random.randn(10, 27, 64).astype("float32")
x = relay.var("x", shape=x_np.shape)
verify_batch_matmul_with_inputs(executor_kind, x, x, x_np, x_np, (10, 27, 27))
def batch_matmul_x86_test(b, m, n, k, target="llvm -mcpu=cascadelake", intrins=["vpdpbusd"]):
x_shape = (b, m, k)
y_shape = (b, n, k)
z_shape = (b, m, n)
for lhs_dtype in ["uint8", "int8"]:
x = relay.var("x", shape=x_shape, dtype=lhs_dtype)
y = relay.var("y", shape=y_shape, dtype="int8")
z = relay.var("z", shape=z_shape, dtype="int32")
bmm = relay.nn.batch_matmul(x, y, out_dtype="int32")
out = bmm + z
mod = tvm.IRModule.from_expr(out)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
# TODO(vvchernov): needs for avx512 arch, can be extended
if n % 16 == 0 and k % 4 == 0:
asm = lib.lib.get_source("asm")
for intrin in intrins:
assert intrin in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
x_np = np.random.uniform(1, 10, size=x_shape).astype(lhs_dtype)
y_np = np.random.uniform(1, 10, size=y_shape).astype("int8")
z_np = np.random.uniform(1, 10, size=z_shape).astype("int32")
runtime.set_input("x", x_np)
runtime.set_input("y", y_np)
runtime.set_input("z", z_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = tvm.topi.testing.batch_matmul(x_np, y_np, out_dtype="int32") + z_np
np.testing.assert_equal(out, ref)
@pytest.mark.skip("skip due to AMX feature not avaliable yet")
@pytest.mark.parametrize(
"b,m,n,k",
[
(16, 32, 32, 128),
(16, 32, 32, 127),
(16, 32, 31, 128),
],
)
def test_batch_matmul_amx(b, m, n, k):
amx_init = tvm.get_global_func("runtime.amx_init")
amx_tileconfig = tvm.get_global_func("runtime.amx_tileconfig")
assert amx_init()
assert amx_tileconfig(16, 64) # config tile size to 16 rows by 64 columns.
x_shape = (b, m, k)
y_shape = (b, n, k)
z_shape = (b, m, n)
for lhs_dtype in ["uint8", "int8"]:
x = relay.var("x", shape=x_shape, dtype=lhs_dtype)
y = relay.var("y", shape=y_shape, dtype="int8")
z = relay.var("z", shape=z_shape, dtype="int32")
bmm = relay.nn.batch_matmul(x, y, out_dtype="int32")
out = bmm + z
mod = tvm.IRModule.from_expr(out)
target = "llvm -mcpu=sapphirerapids"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
asm = lib.lib.get_source("asm")
assert "tilezero" in asm
assert "tileloaddt1" in asm
assert "tdpbusd" in asm
assert "tilestored" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
x_np = np.random.uniform(1, 10, size=x_shape).astype(lhs_dtype)
y_np = np.random.uniform(1, 10, size=y_shape).astype("int8")
z_np = np.random.uniform(1, 10, size=z_shape).astype("int32")
runtime.set_input("x", x_np)
runtime.set_input("y", y_np)
runtime.set_input("z", z_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = tvm.topi.testing.batch_matmul(x_np, y_np, out_dtype="int32") + z_np
np.testing.assert_equal(out, ref)
@tvm.testing.requires_cascadelake
@pytest.mark.parametrize(
"b,m,n,k",
[
(16, 32, 128, 96),
(16, 32, 128, 97),
(16, 32, 129, 96),
],
)
def test_batch_matmul_vnni(b, m, n, k):
batch_matmul_x86_test(b, m, n, k)
@tvm.testing.requires_skylake_avx512
@pytest.mark.parametrize(
"b,m,n,k",
[
(16, 32, 128, 96),
(16, 32, 128, 97),
(16, 32, 129, 96),
],
)
def test_batch_matmul_skylake_avx512(b, m, n, k):
batch_matmul_x86_test(b, m, n, k, "llvm -mcpu=skylake-avx512", ["pmaddubs", "pmaddw", "vpaddd"])
@pytest.mark.skip("Requires GFX10 AMDGPU")
def test_batch_matmul_rocm_sdot4():
x_shape = (16, 32, 96)
y_shape = (16, 128, 96)
lhs_dtype = "int8"
x = relay.var("x", shape=x_shape, dtype=lhs_dtype)
y = relay.var("y", shape=y_shape, dtype="int8")
bmm = relay.nn.batch_matmul(x, y, out_dtype="int32")
mod = tvm.IRModule.from_expr(bmm)
target = "rocm -mattr=+dotprod"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
asm = lib.lib.imported_modules[0].get_source("asm")
assert "v_dot4_i32_i8" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
x_np = np.random.uniform(1, 10, size=x_shape).astype(lhs_dtype)
y_np = np.random.uniform(1, 10, size=y_shape).astype("int8")
runtime.set_input("x", x_np)
runtime.set_input("y", y_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = tvm.topi.testing.batch_matmul(x_np, y_np, out_dtype="int32")
np.testing.assert_equal(out, ref)
@tvm.testing.uses_gpu
def test_shape_of():
shape = (10, 5, 12)
x = relay.var("x", shape=shape)
func = relay.Function([x], relay.op.shape_of(x))
func = run_infer_type(func)
x_data = np.random.rand(*shape).astype("float32")
for target, dev in tvm.testing.enabled_targets():
# Because using graph executor, this op will be optimized after
# constant folding pass, here we only test with interpreter
for kind in ["vm"]:
op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.numpy(), np.array(shape).astype("int32"))
@tvm.testing.uses_gpu
def test_ndarray_size(executor_kind):
def verify_ndarray_size(shape):
x = relay.var("x", shape=shape)
func = relay.Function([x], relay.op.ndarray_size(x))
func = run_infer_type(func)
x_data = np.random.uniform(size=shape).astype("float32")
ref_res = np.size(x_data)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
verify_ndarray_size((2, 3, 5))
verify_ndarray_size((2, 3, 5, 7))
def verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc):
for shape_dtype in ["int32", "int64"]:
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in dshape], dtype=dtype)
y = opfunc(x, out_size, layout)
func = relay.Function([x], y)
np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
for target, dev in tvm.testing.enabled_targets():
relay_out = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
np_data
)
tvm.testing.assert_allclose(relay_out.numpy(), np_out, rtol=1e-5, atol=1e-5)
def verify_adaptive_pool1d(dshape, out_size, pool_type, layout="NCW", dtype="float32"):
opfunc = relay.nn.adaptive_avg_pool1d if pool_type == "avg" else relay.nn.adaptive_max_pool1d
verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc)
def verify_adaptive_pool2d(dshape, out_size, pool_type, layout="NCHW", dtype="float32"):
opfunc = relay.nn.adaptive_avg_pool2d if pool_type == "avg" else relay.nn.adaptive_max_pool2d
verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc)
def verify_adaptive_pool3d(dshape, out_size, pool_type, layout="NCDHW", dtype="float32"):
opfunc = relay.nn.adaptive_avg_pool3d if pool_type == "avg" else relay.nn.adaptive_max_pool3d
verify_adaptive_pool(dshape, out_size, pool_type, layout, dtype, opfunc)
@tvm.testing.uses_gpu
def test_adaptive_pool():
verify_adaptive_pool1d((1, 9, 224), (1), "max")
verify_adaptive_pool1d((1, 3, 224), (3), "avg")
verify_adaptive_pool1d((1, 3, 224), (3), "avg", dtype="int32")
verify_adaptive_pool1d((1, 14, 78), (13), "max")
verify_adaptive_pool1d((1, 5, 97), (96), "avg")
verify_adaptive_pool1d((1, 224, 3), (1), "max", layout="NWC")
verify_adaptive_pool1d((1, 3, 224), (3), "avg", layout="NWC")
verify_adaptive_pool2d((1, 9, 224, 224), (1, 1), "max")
verify_adaptive_pool2d((1, 3, 224, 224), (2, 3), "avg")
verify_adaptive_pool2d((1, 3, 224, 224), (2, 3), "avg", dtype="int32")
verify_adaptive_pool2d((1, 14, 56, 78), (34, 13), "max")
verify_adaptive_pool2d((1, 5, 46, 97), (4, 96), "avg")
verify_adaptive_pool2d((1, 224, 224, 3), (1, 1), "max", layout="NHWC")
verify_adaptive_pool2d((1, 3, 224, 224), (2, 3), "avg", layout="NHWC")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "max", layout="NCDHW")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NCDHW")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NDHWC")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NCDHW", dtype="int32")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NDHWC", dtype="int32")
verify_adaptive_pool3d((1, 16, 32, 32, 32), (2, 4, 4), "max", layout="NDHWC")
@tvm.testing.uses_gpu
def test_sequence_mask(executor_kind):
def _verify(data_shape, mask_value, axis, dtype, itype):
max_length = data_shape[axis]
nbatch = data_shape[1 - axis]
data = relay.var("data", relay.TensorType(data_shape, dtype))
valid_length = relay.var("valid_length", relay.TensorType((nbatch,), itype))
out = relay.sequence_mask(data, valid_length, mask_value, axis)
checked = run_infer_type(out)
assert checked.checked_type == relay.ty.TensorType(data_shape, dtype)
func = relay.Function([data, valid_length], out)
data_np = np.random.uniform(size=data_shape).astype(dtype)
valid_length_np = np.random.randint(0, max_length, size=nbatch).astype(itype)
gt_out_np = tvm.topi.testing.sequence_mask(data_np, valid_length_np, mask_value, axis)
for target, dev in tvm.testing.enabled_targets():
out_relay = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
func
)(data_np, valid_length_np)
tvm.testing.assert_allclose(out_relay.numpy(), gt_out_np)
_verify((5, 10), 0.0, 1, "float32", "int32")
_verify((2, 3, 5, 3), 0.0, 0, "float32", "int64")
_verify((5, 8, 3), 0.1, 1, "float64", "float32")
@tvm.testing.uses_gpu
def test_one_hot(executor_kind):
def _get_oshape(indices_shape, depth, axis):
oshape = []
true_axis = len(indices_shape) if axis == -1 else axis
ndim = len(indices_shape) + 1
indices_index = 0
for i in range(0, ndim):
if i == true_axis:
oshape.append(depth)
else:
oshape.append(indices_shape[indices_index])
indices_index += 1
return oshape
def _verify(indices_shape, depth, on_value, off_value, axis, dtype):
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
on_value_const = relay.const(on_value)
off_value_const = relay.const(off_value)
out = relay.one_hot(indices, on_value_const, off_value_const, depth, axis, dtype)
checked = run_infer_type(out)
assert checked.checked_type == relay.ty.TensorType(
_get_oshape(indices_shape, depth, axis), dtype
)
func = relay.Function([indices], out)
indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32")
out_np = tvm.topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype)
for target, dev in tvm.testing.enabled_targets():
out_relay = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
func
)(indices_np)
tvm.testing.assert_allclose(out_relay.numpy(), out_np)
_verify((3,), 3, 1, 0, -1, "int32")
_verify((3,), 3, 1.0, 0.0, -1, "float32")
_verify((2, 2), 5, 2, -2, 0, "int32")
_verify((2, 2), 5, 0.5, -0.5, 1, "float32")
_verify((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_verify((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
@tvm.testing.uses_gpu
def test_matrix_set_diag(executor_kind):
def _verify(input_shape, diagonal_shape, dtype, k=0, align="RIGHT_LEFT"):
input = relay.var("input", relay.TensorType(input_shape, dtype))
diagonal = relay.var("diagonal", relay.TensorType(diagonal_shape, dtype))
out = relay.matrix_set_diag(input, diagonal, k, align)
in_type = run_infer_type(input)
out_type = run_infer_type(out)
assert in_type.checked_type == out_type.checked_type
func = relay.Function([input, diagonal], out)
input_np = np.random.randint(-100, 100, size=input_shape).astype(dtype)
diagonal_np = np.random.randint(-100, 100, size=diagonal_shape).astype(dtype)
out_np = tvm.topi.testing.matrix_set_diag(input_np, diagonal_np, k, align)
for target, dev in tvm.testing.enabled_targets():
out_relay = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
func
)(input_np, diagonal_np)
tvm.testing.assert_allclose(out_relay.numpy(), out_np)
_verify((2, 2), (2,), "float32")
_verify((4, 3, 3), (4, 3), "int32")
_verify((2, 3, 4), (2, 3), "float32", 1)
_verify((2, 3, 4), (2, 4, 3), "int32", (-1, 2), "LEFT_RIGHT")
_verify((2, 3, 4), (2, 4, 3), "int32", (-1, 2), "LEFT_LEFT")
_verify((2, 3, 4), (2, 4, 3), "int32", (-1, 2), "RIGHT_RIGHT")
@tvm.testing.parametrize_targets
def test_nll_loss(executor_kind, dev, target):
def _get_oshape(target_shape, reduction):
if reduction == "none":
return target_shape
else:
return []
def _verify(prediction_shape, reduction="mean", ignore_index=-100, dtype="float32"):
C = prediction_shape[1]
target_shape = prediction_shape[:1] + prediction_shape[2:]
predictions = relay.var("predictions", relay.TensorType(prediction_shape, dtype))
targets = relay.var("targets", relay.TensorType(target_shape, "int32"))
weights = relay.var("weights", relay.TensorType((C,), dtype))
out = relay.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)
checked = run_infer_type(out)
assert checked.checked_type == relay.ty.TensorType(
_get_oshape(target_shape, reduction), dtype
)
func = relay.Function([predictions, targets, weights], out)
predictions_np = np.random.uniform(size=prediction_shape).astype(dtype)
targets_np = np.random.randint(0, C, target_shape).astype("int32")
weights_np = np.random.uniform(size=(C,)).astype(dtype)
out_np = tvm.topi.testing.nll_loss(
predictions_np, targets_np, weights_np, reduction, ignore_index
)
out_relay = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
predictions_np, targets_np, weights_np
)
tvm.testing.assert_allclose(out_relay.numpy(), out_np, rtol=1e-6, atol=1e-6)
_verify((10, 5))
_verify((10, 5, 2, 2))
_verify((10, 5), reduction="sum")
_verify((10, 5), reduction="none")
_verify((10, 5), ignore_index=3)
_verify((10, 5), dtype="float64")
if __name__ == "__main__":
tvm.testing.main()
| 34,465 | 38.937428 | 117 | py |
tvm | tvm-main/tests/python/relay/test_layer_count.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.relay.testing import resnet
from tvm.relay.analysis import count_layers
def test_layer_count():
def verify(num_layers):
# Load a resnet with a known number of layers.
mod, _ = resnet.get_workload(num_layers=num_layers)
# Count the number of conv and dense layers.
count = count_layers(mod, valid_ops=["nn.conv2d", "nn.dense"])
assert count == num_layers
verify(18)
verify(50)
if __name__ == "__main__":
test_layer_count()
| 1,277 | 35.514286 | 70 | py |
tvm | tvm-main/tests/python/relay/test_op_level6.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level6 operator test cases.
"""
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.topi.testing import searchsorted_ref
import tvm.testing
executor_kind = tvm.testing.parameter("graph", "vm")
@tvm.testing.uses_gpu
def test_sort():
def verify_sort(shape, axis, is_ascend, is_dyn=False, in_dtype="float32"):
if is_dyn:
x = relay.var("x", relay.TensorType([relay.Any()] * len(shape), in_dtype))
else:
x = relay.var("x", relay.TensorType(shape, in_dtype))
z = relay.sort(x, axis=axis, is_ascend=is_ascend)
func = relay.Function([x], z)
x_data = np.random.uniform(size=shape).astype(in_dtype)
if is_ascend:
ref_res = np.sort(x_data, axis=axis)
else:
ref_res = -np.sort(-x_data, axis=axis)
if is_dyn:
backend = "vm"
else:
backend = "graph"
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(backend, mod=mod, device=dev, target=target).evaluate()(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
for is_dyn in [False, True]:
verify_sort((2, 3, 4), axis=0, is_ascend=False, is_dyn=is_dyn)
verify_sort((1, 4, 6), axis=1, is_ascend=True, is_dyn=is_dyn)
verify_sort((3, 5, 6), axis=-1, is_ascend=False, is_dyn=is_dyn)
verify_sort((3, 2000, 6), axis=1, is_ascend=False, is_dyn=is_dyn)
verify_sort((1, 122640), axis=1, is_ascend=False, is_dyn=is_dyn)
verify_sort((1, 122640), axis=1, is_ascend=False, is_dyn=is_dyn, in_dtype="float16")
@tvm.testing.uses_gpu
def test_argsort():
def verify_argsort(shape, axis, is_ascend, dtype, is_dyn=False, in_dtype="float32"):
if is_dyn:
x = relay.var("x", relay.TensorType([relay.Any()] * len(shape), in_dtype))
else:
x = relay.var("x", relay.TensorType(shape, in_dtype))
z = relay.argsort(x, axis=axis, is_ascend=is_ascend, dtype=dtype)
func = relay.Function([x], z)
x_data = np.random.uniform(size=shape).astype(in_dtype)
if is_ascend:
ref_res = np.argsort(x_data, axis=axis, kind="stable")
else:
ref_res = np.argsort(-x_data, axis=axis, kind="stable")
if is_dyn:
backend = "vm"
else:
backend = "graph"
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(backend, mod=mod, device=dev, target=target).evaluate()(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.astype(dtype), rtol=1e-5)
for is_dyn in [False, True]:
for dtype in ["int32", "int64", "float32", "float64"]:
verify_argsort((2, 3, 4), axis=0, is_ascend=False, dtype=dtype, is_dyn=is_dyn)
verify_argsort((1, 4, 6), axis=1, is_ascend=True, dtype=dtype, is_dyn=is_dyn)
dtype = "int32"
verify_argsort((3, 5, 6), axis=-1, is_ascend=False, dtype=dtype, is_dyn=is_dyn)
verify_argsort((3, 6000, 6), axis=1, is_ascend=False, dtype=dtype, is_dyn=is_dyn)
verify_argsort((1000, 1, 1), axis=0, is_ascend=False, dtype=dtype, is_dyn=is_dyn)
verify_argsort((1, 122640), axis=1, is_ascend=False, dtype=dtype, is_dyn=is_dyn)
verify_argsort(
(1, 122640), axis=1, is_ascend=False, dtype=dtype, is_dyn=is_dyn, in_dtype="float16"
)
@tvm.testing.uses_gpu
def test_topk(executor_kind):
def verify_topk(k, axis, ret_type, is_ascend, dtype, in_dtype="float32"):
shape = (20, 100)
x = relay.var("x", relay.TensorType(shape, in_dtype))
out = relay.topk(x, k, axis, ret_type, is_ascend, dtype)
if isinstance(out, relay.expr.TupleWrapper):
out = out.astuple()
func = relay.Function([x], out)
np_data = np.random.uniform(size=shape).astype(in_dtype)
if is_ascend:
np_indices = np.argsort(np_data, axis=axis, kind="stable")
else:
np_indices = np.argsort(-np_data, axis=axis, kind="stable")
kk = k if k >= 1 else shape[axis]
if axis == 0:
np_indices = np_indices[:kk, :]
np_values = np.zeros(np_indices.shape).astype(in_dtype)
for i in range(shape[1]):
np_values[:, i] = np_data[np_indices[:, i], i]
else:
np_indices = np_indices[:, :kk]
np_values = np.zeros(np_indices.shape).astype(in_dtype)
for i in range(shape[0]):
np_values[i, :] = np_data[i, np_indices[i, :]]
np_indices = np_indices.astype(dtype)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_data
)
if ret_type == "both":
tvm.testing.assert_allclose(op_res[0].numpy(), np_values)
tvm.testing.assert_allclose(op_res[1].numpy(), np_indices)
elif ret_type == "values":
tvm.testing.assert_allclose(op_res.numpy(), np_values)
else:
tvm.testing.assert_allclose(op_res.numpy(), np_indices)
np.random.seed(0)
for k in [0, 1, 5]:
for axis in [0, -1, 1]:
for ret_type in ["both", "values", "indices"]:
verify_topk(k, axis, ret_type, True, "int64")
verify_topk(k, axis, ret_type, False, "float32")
verify_topk(k, axis, ret_type, False, "int64", "float16")
@tvm.testing.uses_gpu
def test_searchsorted():
def verify_searchsorted(right, dtype):
shape = (8, 9, 10)
values_shape = shape[:-1] + (10,)
sorted_sequence = relay.var("sorted_sequence", relay.TensorType(shape, "float32"))
values = relay.var("sorted_sequence", relay.TensorType(values_shape, "float32"))
out = relay.searchsorted(sorted_sequence, values, right, dtype)
func = relay.Function([sorted_sequence, values], out)
sorted_sequence_np = np.sort(np.random.randn(*shape).astype("float32"), axis=-1)
values_np = np.random.randn(*values_shape).astype("float32")
np_indices = searchsorted_ref(sorted_sequence_np, values_np, right, dtype)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
sorted_sequence_np, values_np
)
np.testing.assert_equal(op_res.numpy(), np_indices)
verify_searchsorted(False, "int32")
verify_searchsorted(True, "int64")
if __name__ == "__main__":
tvm.testing.main()
| 7,648 | 42.214689 | 100 | py |
tvm | tvm-main/tests/python/relay/test_simplify_fc_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import itertools
import numpy as np
import scipy.sparse as sp
import tvm
from tvm.ir import IRModule
from tvm import relay
from tvm.relay.data_dep_optimization import simplify_fc_transpose
def run_func(func, params, x):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(func, "llvm", params=params)
from tvm.contrib import graph_executor
dev = tvm.cpu(0)
dtype = "float32"
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
m.set_input("data", tvm.nd.array(x.astype(dtype)))
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
return tvm_output.numpy()
def test_simplify_fc_transpose():
data = relay.var("data", shape=(1, 32), dtype="float32")
x = relay.nn.relu(data)
w1 = relay.var("w1", shape=(32, 64), dtype="float32")
y = relay.nn.dense(x, relay.transpose(w1, axes=[1, 0]))
z = relay.nn.relu(y)
w2 = relay.var("w2", shape=(64, 16), dtype="float32")
zz = relay.nn.dense(z, relay.transpose(w2, axes=[1, 0]))
func = relay.Function(relay.analysis.free_vars(zz), zz)
params = {
"w1": tvm.nd.array(np.random.uniform(-1, 1, (32, 64)).astype("float32")),
"w2": tvm.nd.array(np.random.uniform(-1, 1, (64, 16)).astype("float32")),
}
x_np = np.random.randn(1, 32).astype("float32")
old_result = run_func(func, params, x_np)
new_func, new_params = simplify_fc_transpose.convert(func, params)
new_result = run_func(new_func, new_params, x_np)
np.testing.assert_allclose(old_result, new_result, atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_simplify_fc_transpose()
| 2,433 | 33.28169 | 81 | py |
tvm | tvm-main/tests/python/relay/test_sparse_conv2d_convert.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import itertools
import numpy as np
import scipy.sparse as sp
import tvm
from tvm.ir import IRModule
from tvm import relay
from tvm.topi.sparse.utils import random_bsr_matrix
from tvm.relay.build_module import bind_params_by_name
def run_func(func, params, x):
with tvm.transform.PassContext(opt_level=3):
graph, lib, new_params = relay.build(func, "llvm", params=params)
from tvm.contrib import graph_executor
dev = tvm.cpu(0)
dtype = "float32"
m = graph_executor.create(graph, lib, dev)
# set inputs
m.set_input("data", tvm.nd.array(x.astype(dtype)))
m.set_input(**new_params)
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
return tvm_output.numpy()
def test_bsr_sparse_conv2d_nchw():
data = relay.var("data", shape=(1, 64, 32, 32), dtype="float32")
x = relay.nn.relu(data)
w = relay.var("weight", shape=(128, 64, 1, 1), dtype="float32")
y = relay.nn.conv2d(x, w, channels=128, kernel_size=1, data_layout="NCHW", kernel_layout="OIHW")
z = relay.nn.relu(y)
func = relay.Function(relay.analysis.free_vars(z), z)
params = {
"weight": tvm.nd.array(
np.array(random_bsr_matrix(128, 64, 8, 1, 0.1, "float32").todense()).reshape(
128, 64, 1, 1
)
)
}
x_np = np.random.randn(1, 64, 32, 32).astype("float32")
# dense output
dense_output = run_func(func, params, x_np)
# sparse
sparse_func, params = relay.data_dep_optimization.bsr_conv2d.convert(
func, params, (8, 1), 0.2, "NCHW"
)
sparse_output = run_func(sparse_func, params, x_np)
np.testing.assert_allclose(sparse_output, dense_output, atol=1e-5, rtol=1e-5)
def test_bsr_sparse_conv2d_nhwc():
data = relay.var("data", shape=(1, 32, 32, 64), dtype="float32")
x = relay.nn.relu(data)
w = relay.var("weight", shape=(1, 1, 64, 128), dtype="float32")
y = relay.nn.conv2d(x, w, channels=128, kernel_size=1, data_layout="NHWC", kernel_layout="HWIO")
z = relay.nn.relu(y)
func = relay.Function(relay.analysis.free_vars(z), z)
params = {
"weight": tvm.nd.array(
np.array(random_bsr_matrix(128, 64, 8, 1, 0.1, "float32").todense()).T.reshape(
1, 1, 64, 128
)
)
}
x_np = np.random.randn(1, 32, 32, 64).astype("float32")
# dense output
dense_output = run_func(func, params, x_np)
# sparse
sparse_func, params = relay.data_dep_optimization.bsr_conv2d.convert(
func, params, (8, 1), 0.2, "NHWC"
)
sparse_output = run_func(sparse_func, params, x_np)
np.testing.assert_allclose(sparse_output, dense_output, atol=1e-5, rtol=1e-5)
def test_bsr_sparse_conv2d_3x3_nchw():
data = relay.var("data", shape=(1, 64, 32, 32), dtype="float32")
x = relay.nn.relu(data)
w = relay.var("weight", shape=(128, 64, 3, 3), dtype="float32")
y = relay.nn.conv2d(
x, w, channels=128, kernel_size=3, padding=1, data_layout="NCHW", kernel_layout="OIHW"
)
z = relay.nn.relu(y)
func = relay.Function(relay.analysis.free_vars(z), z)
params = {
"weight": tvm.nd.array(
np.array(random_bsr_matrix(128, 64 * 9, 16, 1, 0.1, "float32").todense()).reshape(
128, 64, 3, 3
)
)
}
x_np = np.random.randn(1, 64, 32, 32).astype("float32")
# dense output
dense_output = run_func(func, params, x_np)
# sparse
func = bind_params_by_name(func, params)
sparse_func, params = relay.data_dep_optimization.bsr_conv2d.convert2(
func, {}, (16, 1), 0.2, "NCHW", 3
)
sparse_output = run_func(sparse_func, params, x_np)
np.testing.assert_allclose(sparse_output, dense_output, atol=1e-5, rtol=1e-5)
def test_bsr_sparse_conv2d_3x3_nhwc():
data = relay.var("data", shape=(1, 32, 32, 64), dtype="float32")
x = relay.nn.relu(data)
w = relay.var("weight", shape=(3, 3, 64, 128), dtype="float32")
y = relay.nn.conv2d(
x, w, channels=128, kernel_size=3, padding=1, data_layout="NHWC", kernel_layout="HWIO"
)
z = relay.nn.relu(y)
func = relay.Function(relay.analysis.free_vars(z), z)
params = {
"weight": tvm.nd.array(
np.array(random_bsr_matrix(128, 64 * 9, 16, 1, 0.1, "float32").todense()).T.reshape(
3, 3, 64, 128
)
)
}
x_np = np.random.randn(1, 32, 32, 64).astype("float32")
# dense output
dense_output = run_func(func, params, x_np)
# sparse
func = bind_params_by_name(func, params)
sparse_func, params = relay.data_dep_optimization.bsr_conv2d.convert2(
func, {}, (16, 1), 0.2, "NHWC", 3
)
sparse_output = run_func(sparse_func, params, x_np)
np.testing.assert_allclose(sparse_output, dense_output, atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_bsr_sparse_conv2d_nhwc()
test_bsr_sparse_conv2d_nchw()
test_bsr_sparse_conv2d_3x3_nhwc()
test_bsr_sparse_conv2d_3x3_nchw()
| 5,796 | 33.301775 | 100 | py |
tvm | tvm-main/tests/python/relay/test_pass_inline.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, missing-docstring, too-many-statements
import tvm
from tvm import relay
def get_recursive_count_loop():
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
sb = relay.ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype="int32"))
rec_call = relay.Call(sum_up, [one_less])
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
mod[sum_up] = func
iarg = relay.var("i", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg], sum_up(iarg))
return mod, sum_up
def test_call_chain_inline_leaf():
"""Test when only leaf call is inlined.
The call graph is like the following:
main
/ \
g1 g2
/
g11(inline)
"""
def get_mod():
mod = tvm.IRModule({})
x11 = relay.var("x11", shape=(3, 5))
g11 = relay.GlobalVar("g11")
fn11 = relay.Function([x11], x11)
fn11 = fn11.with_attr("Inline", tvm.tir.IntImm("int32", 1))
mod[g11] = fn11
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1 + g11(x1))
fn1 = relay.Function([x1, y1], sb.get())
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
def expected():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1 + x1)
fn1 = relay.Function([x1, y1], sb.get())
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_call_chain_inline_multiple_levels():
"""Test when only leaf call is inlined.
The call graph is like the following:
main
/ \
g1(inline) g2
/
g11(inline)
"""
def get_mod():
mod = tvm.IRModule({})
x11 = relay.var("x11", shape=(3, 5))
g11 = relay.GlobalVar("g11")
fn11 = relay.Function([x11], x11)
fn11 = fn11.with_attr("Inline", tvm.tir.IntImm("int32", 1))
mod[g11] = fn11
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1 + g11(x1))
fn1 = relay.Function([x1, y1], sb.get())
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
def expected():
mod = tvm.IRModule({})
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = p0 + p1 + p0
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_call_chain_inline_multiple_levels_extern_compiler():
"""Test when only leaf call is inlined.
The call graph is like the following:
main
/ \
g1(inline) g2
/
g11(inline, external compiler)
"""
def get_mod():
mod = tvm.IRModule({})
x11 = relay.var("x11", shape=(3, 5))
g11 = relay.GlobalVar("g11")
fn11 = relay.Function([x11], x11)
fn11 = fn11.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn11 = fn11.with_attr("Compiler", "a")
mod[g11] = fn11
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1 + g11(x1))
fn1 = relay.Function([x1, y1], sb.get())
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
def expected():
mod = tvm.IRModule({})
x11 = relay.var("x11", shape=(3, 5))
fn11 = relay.Function([x11], x11)
fn11 = fn11.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn11 = fn11.with_attr("Compiler", "a")
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = p0 + p1 + fn11(p0)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_recursive_call_with_global():
def get_mod():
mod = tvm.IRModule({})
x = relay.var("x", shape=[], dtype="int32")
fn0 = relay.Function([x], x)
fn0 = fn0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
gx = relay.GlobalVar("gx")
mod[gx] = fn0
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
sb = relay.ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype="int32"))
global_call = gx(i)
rec_call = relay.Call(sum_up, [one_less]) + global_call
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
mod[sum_up] = func
iarg = relay.var("i", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg], sum_up(iarg))
return mod
def expected():
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
sb = relay.ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype="int32"))
rec_call = relay.Call(sum_up, [one_less]) + i
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
mod[sum_up] = func
iarg = relay.var("i", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg], sum_up(iarg))
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_recursive_called():
mod, sum_up = get_recursive_count_loop()
iarg = relay.var("i", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg], sum_up(iarg))
ref_mod = mod
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, ref_mod, map_free_vars=True)
def test_recursive_not_called():
def get_mod():
mod, sum_up = get_recursive_count_loop()
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
x1 = relay.var("x1", shape=(2, 2))
fn1 = relay.Function([x1], x1)
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
mod["main"] = relay.Function([x, y], x + y + g1(x))
return mod
def expected():
mod, sum_up = get_recursive_count_loop()
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
mod["main"] = relay.Function([x, y], x + y + x)
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
ref_mod = expected()
assert tvm.ir.structural_equal(mod, ref_mod, map_free_vars=True)
def test_recursive_not_called_extern_compiler():
def get_mod():
mod, sum_up = get_recursive_count_loop()
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
x1 = relay.var("x1", shape=(2, 2))
fn1 = relay.Function([x1], x1)
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn1 = fn1.with_attr("Compiler", "a")
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
mod["main"] = relay.Function([x, y], x + y + g1(x))
return mod
def expected():
mod, sum_up = get_recursive_count_loop()
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
x1 = relay.var("x1", shape=(2, 2))
fn1 = relay.Function([x1], x1)
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn1 = fn1.with_attr("Compiler", "a")
mod["main"] = relay.Function([x, y], x + y + fn1(x))
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
ref_mod = expected()
assert tvm.ir.structural_equal(mod, ref_mod, map_free_vars=True)
def test_globalvar_as_call_arg():
def get_mod():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1)
fn1 = relay.Function([x1, y1], sb.get())
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
def expected():
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = p0 + p1
call_fn2 = p2 - p3
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_globalvar_as_call_arg_extern_compiler():
def get_mod():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1)
fn1 = relay.Function([x1, y1], sb.get())
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn1 = fn1.with_attr("Compiler", "a")
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn2 = fn2.with_attr("Compiler", "b")
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
def expected():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1)
fn1 = relay.Function([x1, y1], sb.get())
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn1 = fn1.with_attr("Compiler", "a")
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn2 = fn2.with_attr("Compiler", "b")
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = relay.Call(fn1, [p0, p1])
call_fn2 = relay.Call(fn2, [p2, p3])
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_inline_globalvar_without_args():
def get_mod():
mod = tvm.IRModule({})
fn1 = relay.Function([], relay.const(1))
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn2 = relay.Function([], relay.const(2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g1 = relay.GlobalVar("g1")
g2 = relay.GlobalVar("g2")
mod[g1] = fn1
mod = relay.transform.InferType()(mod)
mod[g2] = fn2
p = relay.var("p", "bool")
mod["main"] = relay.Function([p], relay.Call(relay.If(p, g1, g2), []))
return relay.transform.InferType()(mod)
def expected():
mod = tvm.IRModule({})
fn1 = relay.Function([], relay.const(1))
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn2 = relay.Function([], relay.const(2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
p = relay.var("p", "bool")
mod["main"] = relay.Function([p], relay.Call(relay.If(p, fn1, fn2), []))
return relay.transform.InferType()(mod)
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_inline_globalvar_without_args_extern_compiler():
def get_mod():
mod = tvm.IRModule({})
fn1 = relay.Function([], relay.const(1))
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn1 = fn1.with_attr("Compiler", "a")
fn2 = relay.Function([], relay.const(2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn2 = fn2.with_attr("Compiler", "b")
g1 = relay.GlobalVar("g1")
g2 = relay.GlobalVar("g2")
mod[g1] = fn1
mod[g2] = fn2
p = relay.var("p", "bool")
mod["main"] = relay.Function([p], relay.Call(relay.If(p, g1, g2), []))
return mod
def expected():
mod = tvm.IRModule({})
fn1 = relay.Function([], relay.const(1))
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn1 = fn1.with_attr("Compiler", "a")
fn2 = relay.Function([], relay.const(2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn2 = fn2.with_attr("Compiler", "b")
p = relay.var("p", "bool")
mod["main"] = relay.Function([p], relay.Call(relay.If(p, fn1, fn2), []))
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_globalvar_called_by_multiple_functions():
"""Test when only leaf call is inlined.
The call graph is like the following:
main g0
/ \ /
g1 g2(inline)
"""
def get_mod():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1)
fn1 = relay.Function([x1, y1], sb.get())
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
sb1 = relay.ScopeBuilder()
sb1.ret(x2 - y2)
fn2 = relay.Function([x2, y2], sb1.get())
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
x0 = relay.var("x0", shape=(3, 5))
y0 = relay.var("y0", shape=(3, 5))
z0 = relay.var("z0", shape=(3, 5))
fn0 = relay.Function([x0, y0, z0], g2(x0, y0) + z0)
g0 = relay.GlobalVar("g0")
mod[g0] = fn0
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn1 = g1(p0, p1)
call_fn2 = g2(p2, p3)
mod["main"] = relay.Function([p0, p1, p2, p3], call_fn1 * call_fn2)
return mod
def expected():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
sb = relay.ScopeBuilder()
sb.ret(x1 + y1)
fn1 = relay.Function([x1, y1], sb.get())
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
p0 = relay.var("p0", shape=(3, 5))
p1 = relay.var("p1", shape=(3, 5))
p2 = relay.var("p2", shape=(3, 5))
p3 = relay.var("p3", shape=(3, 5))
call_fn2 = p2 - p3
mod["main"] = relay.Function([p0, p1, p2, p3], g1(p0, p1) * call_fn2)
x0 = relay.var("x0", shape=(3, 5))
y0 = relay.var("y0", shape=(3, 5))
z0 = relay.var("z0", shape=(3, 5))
fn0 = relay.Function([x0, y0, z0], x0 - y0 + z0)
g0 = relay.GlobalVar("g0")
mod[g0] = fn0
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_entry_with_inline():
"""Test entry function with inline
The call graph is like the following:
g1(inline) g2(inline)
"""
def get_mod():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
fn1 = relay.Function([x1, y1], x1 + y1)
fn1 = fn1.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
fn2 = relay.Function([x2, y2], x2 - y2)
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, get_mod(), map_free_vars=True)
def test_callee_not_inline():
"""Test entry function with inline
The call graph is like the following:
main
|
g2(inline)
|
g1
"""
def get_mod():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
fn1 = relay.Function([x1, y1], x1 + y1)
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
fn2 = relay.Function([x2, y2], x2 - g1(x2, y2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, get_mod(), map_free_vars=True)
def test_callee_not_inline_leaf_inline():
"""Test entry function with inline
The call graph is like the following:
main
|
g2(inline)
|
g1
|
g0(inline)
"""
def get_mod():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(3, 5))
y0 = relay.var("y0", shape=(3, 5))
fn0 = relay.Function([x0, y0], x0 * y0)
fn0 = fn0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g0 = relay.GlobalVar("g0")
mod[g0] = fn0
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
fn1 = relay.Function([x1, y1], x1 + g0(x1, y1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
fn2 = relay.Function([x2, y2], x2 - g1(x2, y2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
return mod
def expected():
mod = tvm.IRModule({})
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
fn1 = relay.Function([x1, y1], x1 + x1 * y1)
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
fn2 = relay.Function([x2, y2], x2 - g1(x2, y2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
def test_callee_not_inline_leaf_inline_extern_compiler():
"""Test entry function with inline
The call graph is like the following:
main
|
g2(inline)
|
g1
|
g0(inline, external compiler)
"""
def get_mod():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(3, 5))
y0 = relay.var("y0", shape=(3, 5))
fn0 = relay.Function([x0, y0], x0 * y0)
fn0 = fn0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn0 = fn0.with_attr("Compiler", "aa")
g0 = relay.GlobalVar("g0")
mod[g0] = fn0
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
fn1 = relay.Function([x1, y1], x1 + g0(x1, y1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
fn2 = relay.Function([x2, y2], x2 - g1(x2, y2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
return mod
def expected():
mod = tvm.IRModule({})
x0 = relay.var("x0", shape=(3, 5))
y0 = relay.var("y0", shape=(3, 5))
fn0 = relay.Function([x0, y0], x0 * y0)
fn0 = fn0.with_attr("Inline", tvm.tir.IntImm("int32", 1))
fn0 = fn0.with_attr("Compiler", "aa")
x1 = relay.var("x1", shape=(3, 5))
y1 = relay.var("y1", shape=(3, 5))
fn1 = relay.Function([x1, y1], x1 + fn0(x1, y1))
g1 = relay.GlobalVar("g1")
mod[g1] = fn1
x2 = relay.var("x2", shape=(3, 5))
y2 = relay.var("y2", shape=(3, 5))
fn2 = relay.Function([x2, y2], x2 - g1(x2, y2))
fn2 = fn2.with_attr("Inline", tvm.tir.IntImm("int32", 1))
g2 = relay.GlobalVar("g2")
mod[g2] = fn2
return mod
mod = get_mod()
mod = relay.transform.Inline()(mod)
assert tvm.ir.structural_equal(mod, expected(), map_free_vars=True)
if __name__ == "__main__":
tvm.testing.main()
| 27,484 | 32.074609 | 84 | py |
tvm | tvm-main/tests/python/relay/test_pass_collage_partition.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
import pytest
from tvm.relay.transform import CollagePartition, InferType, CapturePostDfsIndexInSpans
from tvm.target import make_compilation_config
from tvm.relay.collage import MockCostEstimator
from unittest.mock import patch
from tvm.relay.dataflow_pattern import is_op, wildcard
# We'll reuse the target kind "example_target_hook" (registered in
# src/relay/backend/contrib/example_target_hooks/target.cc) as our
# example external codegen target.
def test_pattern_table():
def relu_pattern():
return is_op("nn.relu")(wildcard())
def add_pattern():
return is_op("add")(wildcard(), wildcard())
def concatenate_pattern():
return is_op("concatenate")(wildcard())
def predicate(expr):
return True
return [
("relu", relu_pattern(), predicate),
("add", add_pattern(), predicate),
("concatenate", concatenate_pattern(), predicate),
]
def _mock_get_pattern_table(target):
if target == "example_target_hook":
return test_pattern_table()
def run_collage(
input_mod, targets, cost_estimator, expected_mod, tvm_max_depth=8, byoc_max_depth=8
):
ctxt = {
"relay.collage.tvm_max_depth": tvm_max_depth,
"relay.collage.byoc_max_depth": byoc_max_depth,
}
expected_mod = InferType()(expected_mod)
pass_ctxt = tvm.transform.PassContext(config=ctxt)
with pass_ctxt:
config = make_compilation_config(pass_ctxt, targets)
actual_mod = InferType()(input_mod)
# Capture indexes only to help debug failing tests
actual_mod = CapturePostDfsIndexInSpans()(actual_mod)
actual_mod = CollagePartition(config, cost_estimator)(actual_mod)
if not tvm.ir.structural_equal(actual_mod, expected_mod, map_free_vars=True):
# Print everything in full so we can see what's going on when things fail.
print("Input module:")
print(input_mod)
print("Actual module:")
print(actual_mod)
print("Expected module:")
print(expected_mod)
# Assert again so as to see the actual disagreeing sub-expressions.
tvm.ir.assert_structural_equal(actual_mod, expected_mod, map_free_vars=True)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_partition_single_op_llvm(mock_get_pattern_table):
mod_txt = """
#[version = "0.0.5"]
def @main(%x: Tensor[(10, 10), float32]) {
nn.relu(%x)
}
"""
mod = tvm.relay.fromtext(mod_txt)
expected_txt = """
#[version = "0.0.5"]
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
nn.relu(%x)
}
"""
expected_mod = tvm.relay.fromtext(expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 1,
"example_target_hook": 2,
}
)
run_collage(mod, targets, cost_estimator, expected_mod)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_partition_single_op_byoc(mock_get_pattern_table):
mod_txt = """
#[version = "0.0.5"]
def @main(%x: Tensor[(10, 10), float32]) {
nn.relu(%x)
}
"""
mod = tvm.relay.fromtext(mod_txt)
expected_txt = """
#[version = "0.0.5"]
def @collage_example_target_hook_nn_relu(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%0(%FunctionVar_0)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
@collage_example_target_hook_nn_relu(%x)
}
"""
expected_mod = tvm.relay.fromtext(expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 2,
"example_target_hook": 1,
}
)
run_collage(mod, targets, cost_estimator, expected_mod)
@pytest.mark.parametrize("byoc_max_depth", [1, 3])
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_partition_diamond_valid_topology(mock_get_pattern_table, byoc_max_depth):
mod_txt = """
#[version = "0.0.5"]
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = abs(%0);
%2 = nn.relu(%1);
add(%1, %2)
}
"""
mod = tvm.relay.fromtext(mod_txt)
expected_3_txt = """
#[version = "0.0.5"]
def @collage_example_target_hook_nn_relu(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%0(%FunctionVar_0)
}
def @collage_example_target_hook_nn_relu_add(%FunctionVar_02: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu_add") -> Tensor[(10, 10), float32] {
%1 = fn (%FunctionVar_04: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_04)
};
%2 = %1(%FunctionVar_02);
%3 = fn (%FunctionVar_03: Tensor[(10, 10), float32], %FunctionVar_1: Tensor[(10, 10), float32], Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_03, %FunctionVar_1)
};
%3(%FunctionVar_02, %2)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%4 = @collage_example_target_hook_nn_relu(%x);
%5 = abs(%4);
@collage_example_target_hook_nn_relu_add(%5)
}
"""
expected_1_txt = """
#[version = "0.0.5"]
def @collage_example_target_hook(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_02: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_01: Tensor[(10, 10), float32], %FunctionVar_1: Tensor[(10, 10), float32], Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_01, %FunctionVar_1)
};
%2(%FunctionVar_0, %1)
}
def @collage_example_target_hook_nn_relu(%FunctionVar_03: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu") -> Tensor[(10, 10), float32] {
%3 = fn (%FunctionVar_04: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_04)
};
%3(%FunctionVar_03)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%4 = @collage_example_target_hook_nn_relu(%x);
%5 = abs(%4);
@collage_example_target_hook(%5)
}
"""
expected_mod = tvm.relay.fromtext(expected_1_txt if byoc_max_depth == 1 else expected_3_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 2,
"example_target_hook": 1,
}
)
run_collage(
mod, targets, cost_estimator, expected_mod, tvm_max_depth=1, byoc_max_depth=byoc_max_depth
)
@pytest.mark.parametrize("tvm_max_depth", [1, 2, 3])
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_tvm_max_depth(mock_get_pattern_table, tvm_max_depth):
mod_txt = """
#[version = "0.0.5"]
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
nn.relu(%1)
}
"""
mod = tvm.relay.fromtext(mod_txt)
expected_txts = {
1: """
#[version = "0.0.5"]
def @collage_example_target_hook(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_03: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_03)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_02: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
};
%3 = %2(%1);
%4 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%4(%3)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
@collage_example_target_hook(%x)
}
""",
2: """
#[version = "0.0.5"]
def @collage_example_target_hook_nn_relu(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%0(%FunctionVar_0)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%1 = @collage_example_target_hook_nn_relu(%x);
%2 = nn.relu(%1);
nn.relu(%2)
}
""",
3: """
#[version = "0.0.5"]
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
nn.relu(%1)
}
""",
}
expected_mod = tvm.relay.fromtext(expected_txts[tvm_max_depth])
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 100,
"example_target_hook": 99,
}
)
run_collage(
mod, targets, cost_estimator, expected_mod, tvm_max_depth=tvm_max_depth, byoc_max_depth=1
)
@pytest.mark.parametrize("byoc_max_depth", [1, 2, 3])
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_byoc_max_depth(mock_get_pattern_table, byoc_max_depth):
mod_txt = """
#[version = "0.0.5"]
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
nn.relu(%1)
}
"""
mod = tvm.relay.fromtext(mod_txt)
expected_txts = {
1: """
#[version = "0.0.5"]
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
nn.relu(%1)
}
""",
2: """
#[version = "0.0.5"]
def @collage_example_target_hook_nn_relu_nn_relu(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu_nn_relu") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_02: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%2(%1)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%3 = nn.relu(%x);
@collage_example_target_hook_nn_relu_nn_relu(%3)
}
""",
3: """
#[version = "0.0.5"]
def @collage_example_target_hook_nn_relu_nn_relu_nn_relu(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu_nn_relu_nn_relu") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_03: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_03)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_02: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
};
%3 = %2(%1);
%4 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%4(%3)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
@collage_example_target_hook_nn_relu_nn_relu_nn_relu(%x)
}
""",
}
expected_mod = tvm.relay.fromtext(expected_txts[byoc_max_depth])
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 99,
"example_target_hook": 100,
}
)
run_collage(
mod, targets, cost_estimator, expected_mod, tvm_max_depth=1, byoc_max_depth=byoc_max_depth
)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_partition_output_tuple(mock_get_pattern_table):
mod_txt = """
#[version = "0.0.5"]
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
%2 = abs(%1);
(%0, %1, %2)
}
"""
mod = tvm.relay.fromtext(mod_txt)
expected_txt = """
#[version = "0.0.5"]
def @collage_example_target_hook(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook") -> (Tensor[(10, 10), float32], Tensor[(10, 10), float32]) {
%0 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_02: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
};
%3 = %2(%1);
(%1, %3)
}
def @main(%x: Tensor[(10, 10), float32]) -> (Tensor[(10, 10), float32], Tensor[(10, 10), float32], Tensor[(10, 10), float32]) {
%4 = @collage_example_target_hook(%x);
%5 = %4.1;
%6 = %4.0;
%7 = abs(%5);
(%6, %5, %7)
}
"""
expected_mod = tvm.relay.fromtext(expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 2,
"example_target_hook": 1,
}
)
run_collage(mod, targets, cost_estimator, expected_mod, tvm_max_depth=2, byoc_max_depth=2)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_partition_intermediate_tuple(mock_get_pattern_table):
mod_txt = """
#[version = "0.0.5"]
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
%2 = (%0, %1);
concatenate(%2)
}
"""
mod = tvm.relay.fromtext(mod_txt)
expected_txt = """
#[version = "0.0.5"]
def @collage_example_target_hook(%FunctionVar_0: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook") -> (Tensor[(10, 10), float32], Tensor[(10, 10), float32]) {
%0 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_02: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
};
%3 = %2(%1);
(%1, %3)
}
def @collage_example_target_hook_concatenate(%FunctionVar_03: (Tensor[(10, 10), float32], Tensor[(10, 10), float32]), Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_concatenate") -> Tensor[(20, 10), float32] {
%4 = fn (%FunctionVar_04: (Tensor[(10, 10), float32], Tensor[(10, 10), float32]), Composite="concatenate") -> Tensor[(20, 10), float32] {
concatenate(%FunctionVar_04)
};
%4(%FunctionVar_03)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(20, 10), float32] {
%5 = @collage_example_target_hook(%x);
%6 = %5.0;
%7 = %5.1;
%8 = (%6, %7);
@collage_example_target_hook_concatenate(%8)
}
"""
expected_mod = tvm.relay.fromtext(expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 2,
"example_target_hook": 1,
}
)
run_collage(mod, targets, cost_estimator, expected_mod, tvm_max_depth=3, byoc_max_depth=5)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_fusion_benefit(mock_get_pattern_table):
mod_txt = """
#[version = "0.0.5"]
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
%2 = abs(%x);
%3 = nn.relu(%2);
%4 = add(%1, %3);
%5 = nn.relu(%4);
abs(%5)
}
"""
mod = tvm.relay.fromtext(mod_txt)
expected_txt = """
#[version = "0.0.5"]
def @collage_example_target_hook_nn_relu_nn_relu_nn_relu_add_nn_relu(%FunctionVar_0: Tensor[(10, 10), float32], %FunctionVar_1: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu_nn_relu_nn_relu_add_nn_relu") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_04: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_04)
};
%1 = %0(%FunctionVar_0);
%2 = fn (%FunctionVar_03: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_03)
};
%3 = fn (%FunctionVar_05: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_05)
};
%4 = %2(%1);
%5 = %3(%FunctionVar_1);
%6 = fn (%FunctionVar_02: Tensor[(10, 10), float32], %FunctionVar_11: Tensor[(10, 10), float32], Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_02, %FunctionVar_11)
};
%7 = %6(%4, %5);
%8 = fn (%FunctionVar_01: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_01)
};
%8(%7)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%9 = abs(%x);
%10 = @collage_example_target_hook_nn_relu_nn_relu_nn_relu_add_nn_relu(%x, %9);
abs(%10)
}
"""
expected_mod = tvm.relay.fromtext(expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 5,
"example_target_hook": 6,
}
)
run_collage(mod, targets, cost_estimator, expected_mod, tvm_max_depth=1, byoc_max_depth=5)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_double_residual(mock_get_pattern_table):
mod_txt = """
#[version = "0.0.5"]
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = abs(%0);
%2 = add(%0, %1);
add(%1, %2)
}
"""
mod = tvm.relay.fromtext(mod_txt)
expected_txt = """
#[version = "0.0.5"]
def @collage_example_target_hook_add_add(%FunctionVar_0: Tensor[(10, 10), float32], %FunctionVar_1: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_add_add") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_02: Tensor[(10, 10), float32], %FunctionVar_12: Tensor[(10, 10), float32], Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_02, %FunctionVar_12)
};
%1 = %0(%FunctionVar_1, %FunctionVar_0);
%2 = fn (%FunctionVar_01: Tensor[(10, 10), float32], %FunctionVar_11: Tensor[(10, 10), float32], Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_01, %FunctionVar_11)
};
%2(%FunctionVar_0, %1)
}
def @collage_example_target_hook_nn_relu(%FunctionVar_03: Tensor[(10, 10), float32], Primitive=1, Compiler="example_target_hook", global_symbol="collage_example_target_hook_nn_relu") -> Tensor[(10, 10), float32] {
%3 = fn (%FunctionVar_04: Tensor[(10, 10), float32], Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_04)
};
%3(%FunctionVar_03)
}
def @main(%x: Tensor[(10, 10), float32]) -> Tensor[(10, 10), float32] {
%4 = @collage_example_target_hook_nn_relu(%x);
%5 = abs(%4);
@collage_example_target_hook_add_add(%5, %4)
}
"""
expected_mod = tvm.relay.fromtext(expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 2,
"example_target_hook": 1,
}
)
run_collage(mod, targets, cost_estimator, expected_mod, tvm_max_depth=4, byoc_max_depth=4)
@patch("tvm.relay.op.contrib.get_pattern_table", wraps=_mock_get_pattern_table)
def test_pruning_heuristic(mock_get_pattern_table):
# In this example both the default TVM partition spec and the 'example_target_hook' partition
# spec will yield the same set of candidates, and those candidates will include all 7
# partitions of the four operators (ie 14 in total).
#
# However, the pruning heuristics will reduce those back to just two 'maximal' candidates
# which have all four operators fused. We'll then just estimate those for the two targets.
mod_txt = """
#[version = "0.0.5"]
def @main(%x: Tensor[(10, 10), float32]) {
%0 = nn.relu(%x);
%1 = nn.relu(%0);
%2 = add(%0, %1);
add(%1, %2)
}
"""
mod = tvm.relay.fromtext(mod_txt)
expected_txt = """
#[version = "0.0.5"]
def @collage_example_target_hook_nn_relu_nn_relu_add_add(
%FunctionVar_0: Tensor[(10, 10), float32],
Primitive=1,
Compiler="example_target_hook",
global_symbol="collage_example_target_hook_nn_relu_nn_relu_add_add") -> Tensor[(10, 10), float32] {
%0 = fn (%FunctionVar_03: Tensor[(10, 10), float32] , Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_03)
};
%1 = %0(%FunctionVar_0) ;
%2 = fn (%FunctionVar_02: Tensor[(10, 10), float32] , Composite="relu") -> Tensor[(10, 10), float32] {
nn.relu(%FunctionVar_02)
};
%3 = %2(%1);
%4 = fn (%FunctionVar_04: Tensor[(10, 10), float32] , %FunctionVar_11: Tensor[(10, 10), float32] , Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_04, %FunctionVar_11)
};
%5 = %4(%1, %3);
%6 = fn (%FunctionVar_01: Tensor[(10, 10), float32] , %FunctionVar_1: Tensor[(10, 10), float32] , Composite="add") -> Tensor[(10, 10), float32] {
add(%FunctionVar_01, %FunctionVar_1)
};
%6(%3, %5)
}
def @main(%x: Tensor[(10, 10), float32] ) -> Tensor[(10, 10), float32] {
@collage_example_target_hook_nn_relu_nn_relu_add_add(%x)
}
"""
expected_mod = tvm.relay.fromtext(expected_txt)
targets = [
tvm.target.Target("llvm"),
tvm.target.Target("example_target_hook"),
]
cost_estimator = MockCostEstimator(
{
"llvm": 2,
"example_target_hook": 1,
},
# Limit the number of cost estimations to 2 to assert pruning did its job.
max_estimates=2,
)
run_collage(mod, targets, cost_estimator, expected_mod, tvm_max_depth=4, byoc_max_depth=4)
if __name__ == "__main__":
tvm.testing.main()
| 25,845 | 36.78655 | 317 | py |
tvm | tvm-main/tests/python/relay/test_analysis_feature.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay.analysis import detect_feature, Feature
from tvm.relay.transform import gradient
from tvm.relay.prelude import Prelude
from tvm.relay.testing import run_infer_type
def test_prelude():
p = Prelude()
feats = detect_feature(p.mod)
assert feats == set(
[
Feature.fVar,
Feature.fGlobalVar,
Feature.fConstant,
Feature.fTuple,
Feature.fTupleGetItem,
Feature.fFunction,
Feature.fOp,
Feature.fCall,
Feature.fLet,
Feature.fIf,
Feature.fConstructor,
Feature.fMatch,
]
)
def test_ad():
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
func = relay.Function([x], x + x)
func = run_infer_type(func)
mod = tvm.IRModule.from_expr(gradient(func))
mod = relay.transform.InferType()(mod)
back_func = mod["main"]
feats = detect_feature(back_func)
assert feats == set(
[
Feature.fVar,
Feature.fTuple,
Feature.fTupleGetItem,
Feature.fFunction,
Feature.fOp,
Feature.fCall,
Feature.fLet,
Feature.fRefCreate,
Feature.fRefRead,
Feature.fRefWrite,
]
)
if __name__ == "__main__":
test_prelude()
test_ad()
| 2,247 | 27.820513 | 62 | py |
tvm | tvm-main/tests/python/relay/test_pass_dead_code_elimination.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm.relay import Function, transform
from tvm.relay.testing import inception_v3
import pytest
cpu_scope = tvm.target.VirtualDevice(tvm.cpu(), tvm.target.Target("llvm"))
metatable = {"VirtualDevice": [cpu_scope]}
core = tvm.IRModule()
core.import_from_std("core.rly")
def optimize_and_check(before_program, after_program, passes):
if isinstance(before_program, str):
before_program = tvm.relay.parse(before_program)
if isinstance(after_program, str):
after_program = tvm.relay.parse(after_program)
if not isinstance(passes, list):
passes = [passes]
optimize = tvm.transform.Sequential(passes)
optimized_program = optimize(before_program)
print("Actual:")
print(optimized_program)
print("Expected:")
print(after_program)
assert tvm.ir.structural_equal(optimized_program, after_program, map_free_vars=True)
def test_dead_let():
before_program = """
#[version = "0.0.5"]
def @main(%z: int) {
let %x = 1;
%z
}
"""
after_program = """
#[version = "0.0.5"]
def @main(%z: int) {
%z
}
"""
optimize_and_check(before_program, after_program, transform.DeadCodeElimination())
def test_one_live_let():
before_program = """
#[version = "0.0.5"]
def @main(%z: int) {
let %x = 1;
let %y = 2;
%x + %x
}
"""
after_program = """
#[version = "0.0.5"]
def @main(%z: int) {
let %x = 1;
%x + %x
}
"""
optimize_and_check(before_program, after_program, transform.DeadCodeElimination())
def test_nested_let():
before_program = """
#[version = "0.0.5"]
def @main(%d: int, %b: int) {
let %a = %b;
let %c = %d;
%c
}
"""
after_program = """
#[version = "0.0.5"]
def @main(%d: int, %b: int) {
let %c = %d;
%c
}
"""
optimize_and_check(before_program, after_program, transform.DeadCodeElimination())
def test_live_recursion():
before_program = """
#[version = "0.0.5"]
def @main() {
let %f = fn (%n: int, %data: int) -> int {
if (%n == 0) {
%data
} else {
%f(%n - 1, log(%data))
}
};
%f(2, 10000)
}
"""
after_program = """
#[version = "0.0.5"]
def @main() {
let %f = fn (%n: int, %data: int) -> int {
if (%n == 0) {
%data
} else {
%f(%n - 1, log(%data))
}
};
%f(2, 10000)
}
"""
optimize_and_check(
before_program, after_program, [transform.DeadCodeElimination(), transform.InferType()]
)
def test_dead_recursion():
before_program = """
#[version = "0.0.5"]
def @main() {
let %f = fn (%n: int, %data: int) -> int {
if (%n == 0) {
%data
} else {
%f(%n - 1, log(%data))
}
};
()
}
"""
after_program = """
#[version = "0.0.5"]
def @main() {
()
}
"""
optimize_and_check(
before_program, after_program, [transform.DeadCodeElimination(), transform.InferType()]
)
def test_add_with_let():
before_program = """
#[version = "0.0.5"]
def @main() {
(let %a = 1; 3) + 2
}
"""
after_program = """
#[version = "0.0.5"]
def @main() {
3 + 2
}
"""
optimize_and_check(
before_program, after_program, [transform.DeadCodeElimination(), transform.InferType()]
)
def test_tuple_get_item():
before_program = """
#[version = "0.0.5"]
def @main() {
let %a = 100;
(1, 2, 3, 4).0
}
"""
after_program = """
#[version = "0.0.5"]
def @main() {
(1, 2, 3, 4).0
}
"""
optimize_and_check(before_program, after_program, transform.DeadCodeElimination())
def test_inline_into_function():
"""Don't inline across function boundaries."""
before_program = """
#[version = "0.0.5"]
def @main() {
let %x = 1 + 1;
let %f = fn (%y: int) -> int {
let %z = %y + %y;
%x + %z
};
(%f(2), %f(3))
}
"""
after_program = """
#[version = "0.0.5"]
def @main() {
let %x = 1 + 1;
let %f = fn (%y: int) -> int {
%x + (%y + %y)
};
(%f(2), %f(3))
}
"""
optimize_and_check(
before_program, after_program, transform.DeadCodeElimination(inline_once=True)
)
def test_impure_op():
"""Don't elide calls to side-effecting operators."""
before_program = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main() {
let %size: int64 = cast(1024, dtype="int64");
let %alignment: int64 = cast(64, dtype="int64");
let %x = memory.alloc_storage(%size, %alignment, virtual_device=meta[VirtualDevice][0]);
let %_ = memory.kill(%x);
0
}
""",
"from_string",
core,
metatable,
)
after_program = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main() {
%0 = memory.alloc_storage(cast(1024, dtype="int64"),
cast(64, dtype="int64"),
virtual_device=meta[VirtualDevice][0]);
let %_ = memory.kill(%0);
0
}
""",
"from_string",
core,
metatable,
)
optimize_and_check(
before_program, after_program, transform.DeadCodeElimination(inline_once=True)
)
def test_impure_func():
"""Don't elide calls to side-effecting functions."""
before_program = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @f() -> int {
let %size: int64 = cast(1024, dtype="int64");
let %alignment: int64 = cast(64, dtype="int64");
let %x = memory.alloc_storage(%size, %alignment, virtual_device=meta[VirtualDevice][0]);
let %_ = memory.kill(%x);
0
}
def @main() -> int {
let %y = @f();
0
}
""",
"from_string",
core,
metatable,
)
after_program = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @f() -> int {
%0 = memory.alloc_storage(cast(1024, dtype="int64"),
cast(64, dtype="int64"),
virtual_device=meta[VirtualDevice][0]);
let %_ = memory.kill(%0);
0
}
def @main() -> int {
let %y = @f();
0
}
""",
"from_string",
core,
metatable,
)
optimize_and_check(
before_program, after_program, transform.DeadCodeElimination(inline_once=True)
)
def test_refs():
"""Don't elide expressions with reference create/read/write side effects"""
before_program = """
#[version = "0.0.5"]
def @f(%r) -> int {
let %v = ref_read(%r);
let %u = ref_write(%r, %v + 1);
%v
}
def @main() -> int {
let %r = ref(0);
let %y = @f(%r);
let %z = @f(%r);
%z
}
"""
after_program = before_program
optimize_and_check(
before_program,
after_program,
[transform.InferType(), transform.DeadCodeElimination(inline_once=True)],
)
def test_complexity():
mod = transform.InferType()(
tvm.IRModule.from_expr(inception_v3.get_net(1, 1000, (3, 299, 299), "float32"))
)
optimize_and_check(mod, mod, transform.DeadCodeElimination(inline_once=True))
if __name__ == "__main__":
tvm.testing.main()
| 8,615 | 23.477273 | 99 | py |
tvm | tvm-main/tests/python/relay/test_py_converter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm.relay.testing import run_as_python
from tvm.relay.prelude import Prelude
from tvm.runtime.container import ADT
from tvm.relay.backend.interpreter import RefValue, ConstructorValue
# helper: uses a dummy let binding to sequence a list
# of expressions: expr1; expr2; expr3, etc.
def seq(*exprs):
ret = exprs[0]
for expr in exprs[1:]:
ret = relay.Let(relay.var("_"), ret, expr)
return ret
# creates a dummy ADT for testing
def init_box_adt(mod):
box = relay.GlobalTypeVar("box")
a = relay.TypeVar("a")
box_ctor = relay.Constructor("box", [a], box)
mod[box] = relay.TypeData(box, [a], [box_ctor])
return (box, box_ctor)
# assert that the candidate is a NDArray with value val
def assert_tensor_value(candidate, val):
assert isinstance(candidate, tvm.nd.NDArray)
assert np.array_equal(candidate.numpy(), np.array(val))
# assert that the candidate is an ADT with the indicated number of fields
def assert_adt_len(candidate, fields):
assert isinstance(candidate, ADT)
assert len(candidate) == fields
# assert that the candidate is a ConstructorValue with the approrpaite constructor
# and number of fields
def assert_constructor_value(candidate, constructor, fields):
assert isinstance(candidate, ConstructorValue)
assert candidate.tag == constructor.tag
assert len(candidate.fields) == fields
def test_create_empty_tuple():
empty = relay.Tuple([])
tup_val = run_as_python(empty)
assert_adt_len(tup_val, 0)
def test_create_scalar():
scalar = relay.const(1)
tensor_val = run_as_python(scalar)
assert_tensor_value(tensor_val, 1)
def test_create_tensor():
tensor = relay.const([[1, 1], [2, 2]])
tensor_val = run_as_python(tensor)
assert_tensor_value(tensor_val, [[1, 1], [2, 2]])
def test_create_nested_tuple():
relay_tup = relay.Tuple(
[relay.const(1), relay.const(2), relay.Tuple([relay.const(3), relay.const(4)])]
)
tup_val = run_as_python(relay_tup)
assert_adt_len(tup_val, 3)
for i in range(2):
assert_tensor_value(tup_val[i], i + 1)
assert_adt_len(tup_val[2], 2)
for i in range(2):
assert_tensor_value(tup_val[2][i], i + 3)
def test_tuple_get_item():
relay_tup = relay.Tuple(
[relay.const(1), relay.const(2), relay.Tuple([relay.const(3), relay.const(4)])]
)
for i in range(2):
index = relay.TupleGetItem(relay_tup, i)
val = run_as_python(index)
assert_tensor_value(val, i + 1)
# try the inner value too
for i in range(2):
index = relay.TupleGetItem(relay.TupleGetItem(relay_tup, 2), i)
val = run_as_python(index)
assert_tensor_value(val, i + 3)
def test_create_let():
v = relay.Var("v")
let = relay.Let(v, relay.Tuple([]), relay.Tuple([v, v]))
tup_val = run_as_python(let)
assert_adt_len(tup_val, 2)
assert_adt_len(tup_val[0], 0)
assert_adt_len(tup_val[1], 0)
def test_create_ref():
relay_ref = relay.RefCreate(relay.Tuple([]))
ref_val = run_as_python(relay_ref)
assert isinstance(ref_val, RefValue)
assert_adt_len(ref_val.value, 0)
def test_ref_read():
v = relay.Var("v")
assign = relay.Let(v, relay.RefCreate(relay.Tuple([])), relay.RefRead(v))
read_val = run_as_python(assign)
assert_adt_len(read_val, 0)
def test_ref_write():
# check that the result of a ref write is an empty tuple
v = relay.Var("v")
initial_write = relay.Let(
v,
relay.RefCreate(relay.Tuple([relay.const(1)])),
relay.RefWrite(v, relay.Tuple([relay.const(2)])),
)
write_val = run_as_python(initial_write)
assert_adt_len(write_val, 0)
# now ensure that the value, once written, can be read back
# (we read the value before and after mutation)
w = relay.Var("w")
read_after_write = relay.Let(
v,
relay.RefCreate(relay.Tuple([relay.const(1)])),
relay.Let(
w,
relay.RefCreate(relay.RefRead(v)),
seq(
relay.RefWrite(v, relay.Tuple([relay.const(2)])),
relay.Tuple([relay.RefRead(w), relay.RefRead(v)]),
),
),
)
read_val = run_as_python(read_after_write)
assert_adt_len(read_val, 2)
assert_adt_len(read_val[0], 1)
assert_adt_len(read_val[1], 1)
assert_tensor_value(read_val[0][0], 1)
assert_tensor_value(read_val[1][0], 2)
def test_if():
# we will have effects in the blocks to ensure only the intended one is executed
true_cond = relay.const(True)
false_cond = relay.const(False)
v = relay.Var("v")
true_branch = seq(relay.RefWrite(v, relay.const(1)), relay.RefRead(v))
false_branch = seq(relay.RefWrite(v, relay.const(2)), relay.RefRead(v))
true_expr = relay.Let(
v, relay.RefCreate(relay.const(0)), relay.If(true_cond, true_branch, false_branch)
)
false_expr = relay.Let(
v, relay.RefCreate(relay.const(0)), relay.If(false_cond, true_branch, false_branch)
)
true_val = run_as_python(true_expr)
assert_tensor_value(true_val, 1)
false_val = run_as_python(false_expr)
assert_tensor_value(false_val, 2)
def test_local_function():
v = relay.Var("v")
ident = relay.Function([v], v)
f = relay.Var("f")
call1 = relay.Let(f, ident, f(relay.Tuple([])))
call2 = relay.Let(f, ident, f(relay.const(2)))
call_val1 = run_as_python(call1)
assert_adt_len(call_val1, 0)
call_val2 = run_as_python(call2)
assert_tensor_value(call_val2, 2)
def test_global_function():
mod = tvm.IRModule()
ident = relay.GlobalVar("ident")
a = relay.TypeVar("a")
v = relay.Var("v", a)
mod[ident] = relay.Function([v], v, a, [a])
call1 = ident(relay.const(1))
call2 = ident(relay.Tuple([relay.const(2), relay.const(2)]))
call_val1 = run_as_python(call1, mod)
assert_tensor_value(call_val1, 1)
call_val2 = run_as_python(call2, mod)
assert_adt_len(call_val2, 2)
assert_tensor_value(call_val2[0], 2)
assert_tensor_value(call_val2[1], 2)
def test_constructor():
mod = tvm.IRModule()
box, box_ctor = init_box_adt(mod)
init_box_int = box_ctor(relay.const(1))
box_val_int = run_as_python(init_box_int, mod)
assert_constructor_value(box_val_int, box_ctor, 1)
assert_tensor_value(box_val_int.fields[0], 1)
init_box_tup = box_ctor(relay.Tuple([]))
box_val_tup = run_as_python(init_box_tup, mod)
assert_constructor_value(box_val_tup, box_ctor, 1)
assert_adt_len(box_val_tup.fields[0], 0)
def test_match_wildcard():
mod = tvm.IRModule()
box, box_ctor = init_box_adt(mod)
v = relay.Var("v")
match = relay.Let(
v,
box_ctor(relay.Tuple([])),
relay.Match(v, [relay.Clause(relay.PatternWildcard(), relay.const(1))]),
)
match_val = run_as_python(match, mod)
assert_tensor_value(match_val, 1)
def test_match_var():
mod = tvm.IRModule()
box, box_ctor = init_box_adt(mod)
v = relay.Var("v")
w = relay.Var("w")
match = relay.Let(
v, box_ctor(relay.const(1)), relay.Match(v, [relay.Clause(relay.PatternVar(w), w)])
)
match_val = run_as_python(match, mod)
assert_constructor_value(match_val, box_ctor, 1)
assert_tensor_value(match_val.fields[0], 1)
def test_match_pattern():
mod = tvm.IRModule()
box, box_ctor = init_box_adt(mod)
v = relay.Var("v")
w = relay.Var("w")
match = relay.Let(
v,
box_ctor(relay.const(1)),
relay.Match(
v, [relay.Clause(relay.PatternConstructor(box_ctor, [relay.PatternVar(w)]), w)]
),
)
match_val = run_as_python(match, mod)
assert_tensor_value(match_val, 1)
def test_nested_match_pattern():
mod = tvm.IRModule()
box, box_ctor = init_box_adt(mod)
v = relay.Var("v")
w = relay.Var("w")
match = relay.Let(
v,
box_ctor(box_ctor(relay.const(2))),
relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
box_ctor, [relay.PatternConstructor(box_ctor, [relay.PatternVar(w)])]
),
w,
)
],
),
)
match_val = run_as_python(match, mod)
assert_tensor_value(match_val, 2)
def test_match_order():
mod = tvm.IRModule()
box, box_ctor = init_box_adt(mod)
v = relay.Var("v")
w = relay.Var("w")
# wildcard pattern goes first
match = relay.Let(
v,
box_ctor(box_ctor(relay.const(2))),
relay.Match(
v,
[
relay.Clause(relay.PatternWildcard(), relay.const(1)),
relay.Clause(
relay.PatternConstructor(
box_ctor, [relay.PatternConstructor(box_ctor, [relay.PatternVar(w)])]
),
w,
),
],
),
)
match_val = run_as_python(match, mod)
assert_tensor_value(match_val, 1)
def test_local_recursion():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = p.mod.get_type("List")
v = relay.Var("v")
h = relay.Var("h")
t = relay.Var("t")
f = relay.Var("f")
# just returns the same list
let = relay.Let(
f,
relay.Function(
[v],
relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(h), relay.PatternVar(t)]),
cons(h, f(t)),
),
relay.Clause(relay.PatternConstructor(nil, []), nil()),
],
),
),
f(cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil())))),
)
val = run_as_python(let, mod)
assert_constructor_value(val, cons, 2)
assert_tensor_value(val.fields[0], 1)
assert_constructor_value(val.fields[1], cons, 2)
assert_tensor_value(val.fields[1].fields[0], 2)
assert_constructor_value(val.fields[1].fields[1], cons, 2)
assert_tensor_value(val.fields[1].fields[1].fields[0], 3)
assert_constructor_value(val.fields[1].fields[1].fields[1], nil, 0)
def test_global_recursion():
mod = tvm.IRModule()
p = Prelude(mod)
rlist, cons, nil = p.mod.get_type("List")
copy = relay.GlobalVar("copy")
# same as above: it copies the given list
a = relay.TypeVar("a")
v = relay.Var("v", rlist(a))
h = relay.Var("h")
t = relay.Var("t")
copy_def = relay.Function(
[v],
relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(h), relay.PatternVar(t)]),
cons(h, copy(t)),
),
relay.Clause(relay.PatternConstructor(nil, []), nil()),
],
),
rlist(a),
[a],
)
mod[copy] = copy_def
call1 = copy_def(cons(relay.const(1), cons(relay.const(2), nil())))
val1 = run_as_python(call1, mod)
assert_constructor_value(val1, cons, 2)
assert_tensor_value(val1.fields[0], 1)
assert_constructor_value(val1.fields[1], cons, 2)
assert_tensor_value(val1.fields[1].fields[0], 2)
assert_constructor_value(val1.fields[1].fields[1], nil, 0)
call2 = copy_def(cons(relay.Tuple([]), nil()))
val2 = run_as_python(call2, mod)
assert_constructor_value(val2, cons, 2)
assert_adt_len(val2.fields[0], 0)
assert_constructor_value(val2.fields[1], nil, 0)
def test_higher_order_call():
# test with anon func
h = relay.Var("h")
f = relay.Var("f")
x = relay.Var("x")
ho_anon = relay.Let(
h, relay.Function([f], f(relay.Tuple([]))), h(relay.Function([x], relay.const(1)))
)
anon_val = run_as_python(ho_anon)
assert_tensor_value(anon_val, 1)
# test with named func
g = relay.Var("g")
ho_named = relay.Let(
h,
relay.Function([f], f(relay.Tuple([]))),
relay.Let(g, relay.Function([x], relay.const(2)), h(g)),
)
named_val = run_as_python(ho_named)
assert_tensor_value(named_val, 2)
def test_match_effect_exactly_once():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = p.mod.get_type("List")
# the list should be of length 1!
# Unless we mistakenly execute the data clause more than once
r = relay.Var("r")
data = seq(relay.RefWrite(r, cons(relay.Tuple([]), relay.RefRead(r))), relay.RefRead(r))
match = relay.Let(
r,
relay.RefCreate(nil()),
relay.Match(
data,
[
relay.Clause(relay.PatternConstructor(nil, []), relay.const(0)),
relay.Clause(
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternConstructor(nil, [])]
),
relay.const(1),
),
relay.Clause(relay.PatternWildcard(), relay.const(2)),
],
),
)
match_val = run_as_python(match, mod)
assert_tensor_value(match_val, 1)
def test_arbitrary_let_nesting():
# something that is tricky to do in Python but comes naturally in Relay
mod = tvm.IRModule()
p = Prelude(mod)
x = relay.Var("x")
r = relay.Var("r")
y = relay.Var("y")
z = relay.Var("z")
expr = relay.Tuple(
[
relay.Let(x, relay.Tuple([relay.const(1), relay.const(2)]), relay.TupleGetItem(x, 1)),
relay.Let(
r,
relay.RefCreate(relay.const(1)),
seq(relay.RefWrite(r, relay.const(3)), relay.RefRead(r)),
),
relay.Let(y, p.id(relay.Let(z, relay.const(4), z)), y),
]
)
tup_val = run_as_python(expr, mod)
assert_adt_len(tup_val, 3)
assert_tensor_value(tup_val[0], 2)
assert_tensor_value(tup_val[1], 3)
assert_tensor_value(tup_val[2], 4)
def test_ref_execution_order():
# we want to have effects execute from left to right
x = relay.Var("x")
y = relay.Var("y")
f = relay.Var("f")
r = relay.Var("r")
expr = relay.Let(
f,
relay.Function([x, y], x),
# r = 1
relay.Let(
r,
relay.RefCreate(relay.const(1)),
relay.Tuple(
[
# should be 1
relay.RefRead(r),
# set r to 2 and read back
seq(relay.RefWrite(r, relay.const(2)), relay.RefRead(r)),
# set r to 3 and read back
seq(relay.RefWrite(r, relay.const(3)), relay.RefRead(r)),
# set r to 4 and read as first arg to f
# set r to 5 and read as second arg to f
# f should evaluate to 4
f(
seq(relay.RefWrite(r, relay.const(4)), relay.RefRead(r)),
seq(relay.RefWrite(r, relay.const(5)), relay.RefRead(r)),
),
# read back 5
relay.RefRead(r),
]
),
),
)
tup_val = run_as_python(expr)
assert_adt_len(tup_val, 5)
assert_tensor_value(tup_val[0], 1)
assert_tensor_value(tup_val[1], 2)
assert_tensor_value(tup_val[2], 3)
assert_tensor_value(tup_val[3], 4)
assert_tensor_value(tup_val[4], 5)
def test_op_add():
add = relay.add(relay.const(1), relay.const(2))
add_val = run_as_python(add)
assert_tensor_value(add_val, 3)
# test an op with a tuple input
# adapted from test_stack in test_op_level3
def test_op_stack():
def verify_stack(dshapes, axis):
x_data = [np.random.normal(size=shape).astype("int32") for shape in dshapes]
ref_res = np.stack(x_data, axis=axis)
args = []
for data in x_data:
args.append(relay.const(data))
call = relay.stack(relay.Tuple(args), axis)
call_val = run_as_python(call)
type(call_val)
assert_tensor_value(call_val, ref_res)
verify_stack([(2,), (2,), (2,)], -1)
verify_stack([(2,), (2,), (2,)], 0)
verify_stack([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1)
verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1)
# test an op with a tuple output
# adapted from test_split_infer_type in test_op_level3
def test_split():
def verify_split(shape, indices_or_sections, axis=0):
x = np.random.normal(size=shape).astype("float32")
ref_res = np.split(x, indices_or_sections, axis=axis)
call = relay.split(relay.const(x), indices_or_sections, axis=axis)
call_val = run_as_python(call)
assert_adt_len(call_val, len(ref_res))
for i in range(len(ref_res)):
assert_tensor_value(call_val[i], ref_res[i])
verify_split((2, 3), 2)
verify_split((5, 3), [3])
verify_split((5, 9, 3), [3, 4], 1)
verify_split((5, 5, 2, 2), 5, 1)
verify_split((5, 5, 2, 2), 5, 0)
# ensure we can generate code for batch_norm, since it requires simplify_inference
def test_batch_norm():
def verify_batch_norm(shapes):
data = [np.absolute(np.random.normal(size=shape).astype("float32")) for shape in shapes]
relay_args = [relay.const(arg) for arg in data]
eps = 1e-5
def reference(x, gamma, beta, moving_mean, moving_var):
return (x - moving_mean) / np.sqrt(moving_var + eps) * gamma + beta
ref_res = reference(*data)
call = relay.nn.batch_norm(*relay_args, epsilon=eps)[0]
call_val = run_as_python(call)
# there will be a change in accuracy so we need to check
# approximate equality
assert isinstance(call_val, tvm.nd.NDArray)
tvm.testing.assert_allclose(call_val.numpy(), ref_res, atol=eps, rtol=eps)
verify_batch_norm([(10, 20), (20,), (20,), (20,), (20,)])
verify_batch_norm([(20, 10), (10,), (10,), (10,), (10,)])
verify_batch_norm([(10, 50), (50,), (50,), (50,), (50,)])
verify_batch_norm([(30, 40), (40,), (40,), (40,), (40,)])
def test_return_global_var():
tt = relay.TensorType([1], "float32")
x = relay.Var("x", type_annotation=tt)
identity = relay.Function([x], x, ret_type=tt)
mod = tvm.IRModule()
mod["main"] = identity
main_var = mod.get_global_var("main")
main_func = run_as_python(main_var, mod=mod)
arg = tvm.nd.array(np.array([0.0], dtype="float32"))
res = main_func(arg)
assert arg.numpy() == res.numpy()
def test_closure_in_tuple():
tt = relay.TensorType([1], "float32")
x = relay.Var("x", type_annotation=tt)
identity = relay.Function([x], x, ret_type=tt)
tup = relay.Tuple([identity, identity])
index = relay.TupleGetItem(tup, 0)
func = run_as_python(index)
arg = tvm.nd.array(np.array([0.0], dtype="float32"))
res = func(arg)
assert arg.numpy() == res.numpy()
def test_closure_in_ref():
tt = relay.TensorType([1], "float32")
x = relay.Var("x", type_annotation=tt)
identity = relay.Function([x], x, ret_type=tt)
gv = relay.GlobalVar("id")
r = relay.Var("r")
seq = relay.Let(
r,
relay.RefCreate(gv),
relay.Call(relay.RefRead(r), [relay.const(np.array([0.0], dtype="float32"))]),
)
mod = tvm.IRModule()
mod[gv] = identity
res = run_as_python(seq, mod=mod)
assert res.numpy() == np.array([0.0], dtype="float32")
def test_compiling_with_main():
unit_type = relay.TupleType([])
unit = relay.Function([], relay.Tuple([]), ret_type=unit_type)
x = relay.Var("x", type_annotation=unit_type)
identity = relay.Function([x], x, ret_type=unit_type)
mod = tvm.IRModule()
mod["unit"] = unit
mod["main"] = identity
res = run_as_python(mod.get_global_var("main")(mod.get_global_var("unit")()), mod=mod)
assert isinstance(res, ADT)
assert len(res) == 0
| 20,843 | 29.88 | 99 | py |
tvm | tvm-main/tests/python/relay/test_pass_fold_scale_axis.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import create_workload
from tvm.relay.build_module import bind_params_by_name
def initializer(_, param):
param = np.zeros(param.shape)
def _get_positive_scale(size):
return np.random.uniform(0.5, 1, size=size).astype("float32")
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_fold_fwd_simple():
"""Simple testcase."""
def before(x, conv_weight, in_bias, in_scale, channels, blocking):
args = [x, conv_weight, in_bias]
x = relay.multiply(x, in_scale)
x = relay.nn.relu(x)
x = relay.add(x, in_bias)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW2i{}o".format(blocking[1]) if blocking else "OIHW",
)
return relay.Function(args, y)
def expected(x, conv_weight, in_bias, in_scale, in_channels, channels, blocking):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight, in_bias]
if blocking:
squeezed_scale = relay.squeeze(in_scale, axis=[0, 2, 3])
x = relay.nn.relu(x)
in_bias = relay.divide(
in_bias,
relay.reshape(squeezed_scale, (1, in_channels // blocking[0], 1, 1, blocking[0])),
) # NCHWc
x = relay.add(x, in_bias)
conv_weight = relay.multiply(
conv_weight, relay.reshape(squeezed_scale, (1, in_channels // 2, 1, 1, 2, 1))
) # OIHWio
else:
squeezed_scale = relay.squeeze(in_scale, axis=[1, 2])
x = relay.nn.relu(x)
in_bias = relay.divide(
in_bias, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2)
)
x = relay.add(x, in_bias)
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2)
)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW2i{}o".format(blocking[1]) if blocking else "OIHW",
)
return relay.Function(args, y)
def check(shape, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
in_channels = shape[1] * shape[4]
in_bias = relay.var("in_bias", shape=(1, in_channels // blocking[0], 1, 1, blocking[0]))
in_scale = relay.const(
_get_positive_scale((1, in_channels // blocking[0], 1, 1, blocking[0]))
)
else:
in_channels = shape[1]
in_bias = relay.var("in_bias", shape=(in_channels, 1, 1))
in_scale = relay.const(_get_positive_scale((in_channels, 1, 1)))
y1 = before(x, weight, in_bias, in_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
y1_expected = expected(x, weight, in_bias, in_scale, in_channels, channels, blocking)
y1_folded = run_opt_pass(y1_folded, transform.InferType())
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 2, None)
check((2, 2, 10, 10, 2), 8, (2, 4))
def test_fold_fwd_dual_path():
"""scale axis being consumed by two consumers"""
def before(x, conv_weight, in_bias, in_scale, channels, blocking):
args = [x, conv_weight, in_bias]
x = relay.multiply(in_scale, x)
x = relay.nn.relu(x)
x = relay.subtract(x, in_bias)
y1 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC{}c".format(blocking[0]) if blocking else "NHWC",
kernel_layout="HWIO1i{}o".format(blocking[1]) if blocking else "HWIO",
groups=channels,
padding=(1, 1),
)
y2 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC{}c".format(blocking[0]) if blocking else "NHWC",
kernel_layout="HWIO1i{}o".format(blocking[1]) if blocking else "HWIO",
groups=channels,
padding=(1, 1),
)
z = relay.add(y1, y2)
return relay.Function(args, z)
def expected(x, conv_weight, in_bias, in_scale, channels, blocking):
args = [x, conv_weight, in_bias]
x = relay.nn.relu(x)
if blocking:
_in_scale = relay.reshape(
in_scale, (1, 1, 1, channels // blocking[0], blocking[0])
) # NHWCc
else:
_in_scale = in_scale
in_bias = relay.divide(in_bias, _in_scale)
x = relay.subtract(x, in_bias)
if blocking:
_in_scale = relay.reshape(
in_scale, (1, 1, 1, channels // blocking[0], 1, blocking[0])
) # HWIOio
y1 = relay.nn.conv2d(
x,
relay.multiply(conv_weight, _in_scale),
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC{}c".format(blocking[0]) if blocking else "NHWC",
kernel_layout="HWIO1i{}o".format(blocking[1]) if blocking else "HWIO",
groups=channels,
padding=(1, 1),
)
if blocking:
_in_scale = relay.reshape(
in_scale, (1, 1, 1, channels // blocking[0], 1, blocking[0])
) # HWIOio
y2 = relay.nn.conv2d(
x,
relay.multiply(conv_weight, _in_scale),
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC{}c".format(blocking[0]) if blocking else "NHWC",
kernel_layout="HWIO1i{}o".format(blocking[1]) if blocking else "HWIO",
groups=channels,
padding=(1, 1),
)
z = relay.add(y1, y2)
return relay.Function(args, z)
def check(dshape, channels, blocking):
x = relay.var("x", shape=dshape)
if blocking:
in_channels = dshape[3] * dshape[4]
wshape = (3, 3, 1, channels // blocking[1], 1, blocking[1]) # HWIOio
weight = relay.var("weight", shape=wshape)
in_bias = relay.var("in_bias", shape=(in_channels // blocking[0], blocking[0]))
in_scale = relay.const(_get_positive_scale((in_channels // blocking[0], blocking[0])))
else:
in_channels = dshape[-1]
wshape = (3, 3, 1, channels) # HWIO
weight = relay.var("weight", shape=wshape)
in_bias = relay.var("in_bias", shape=(in_channels,))
in_scale = relay.const(
_get_positive_scale(
in_channels,
)
)
# test depthwise
assert in_channels == channels
y1 = before(x, weight, in_bias, in_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_expected = expected(x, weight, in_bias, in_scale, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 3), 3, None)
check((2, 4, 10, 2, 2), 4, (2, 2))
def test_fold_fwd_fail():
"""testcase where we canont fold"""
def before(x, conv_weight, in_bias, in_scale, channels, blocking):
x = relay.multiply(x, in_scale)
xx = relay.nn.leaky_relu(x, alpha=0.1)
y1 = relay.nn.conv2d(
xx,
conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC{}c".format(blocking[0]) if blocking else "NHWC",
kernel_layout="HWIO1i{}o".format(blocking[1]) if blocking else "HWIO",
padding=(1, 1),
)
z = relay.add(y1, x)
return relay.Function(relay.analysis.free_vars(z), z)
def check(shape, channels, blocking):
x = relay.var("x", shape=shape)
if blocking:
in_channels = shape[3] * shape[4]
in_bias = relay.var("in_bias", shape=(in_channels // blocking[0], blocking[0]))
in_scale = relay.const(_get_positive_scale((in_channels // blocking[0], blocking[0])))
else:
in_channels = shape[-1]
in_bias = relay.var("in_bias", shape=(in_channels,))
in_scale = relay.const(_get_positive_scale(size=(in_channels,)))
# test depthwise
assert in_channels == channels
weight = relay.var("weight")
y1 = before(x, weight, in_bias, in_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
assert tvm.ir.structural_equal(y1, y1_folded)
check((2, 11, 10, 4), 4, None)
check((2, 11, 10, 2, 2), 4, (2, 2))
def test_fold_fwd_relu_fail():
"""testcase where we canont fold because scale can not pass relu"""
def before(x, conv_weight, in_bias, in_scale, channels, blocking):
x = relay.multiply(x, in_scale)
xx = relay.nn.relu(x)
y1 = relay.nn.conv2d(
xx,
conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC{}c".format(blocking[0]) if blocking else "NHWC",
kernel_layout="HWIO1i{}o".format(blocking[1]) if blocking else "HWIO",
padding=(1, 1),
)
z = relay.add(y1, x)
return relay.Function(relay.analysis.free_vars(z), z)
def check(shape, channels, blocking, in_scale):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
in_channels = shape[3] * shape[4]
in_bias = relay.var("in_bias", shape=(1, in_channels // blocking[0], 1, 1, blocking[0]))
else:
in_channels = shape[-1]
in_bias = relay.var("in_bias", shape=(in_channels,))
assert in_channels == channels
y1 = before(x, weight, in_bias, in_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
assert tvm.ir.structural_equal(y1, y1_folded)
in_scale = relay.var("in_scale", shape=(4,))
check((2, 11, 10, 4), 4, None, in_scale)
in_scale = relay.const(-_get_positive_scale((4,)))
check((2, 11, 10, 4), 4, None, in_scale)
in_scale = relay.var("in_scale", shape=(1, 1, 1, 2, 2))
check((2, 11, 10, 2, 2), 4, (2, 2), in_scale)
in_scale = relay.const(-_get_positive_scale((1, 1, 1, 2, 2)))
check((2, 11, 10, 2, 2), 4, (2, 2), in_scale)
def test_fold_fwd_let_fail():
"""testcase where we canont fold"""
def before(x, conv_weight, in_bias, in_scale, channels):
args = [x, conv_weight, in_bias]
x = relay.multiply(x, in_scale)
x = relay.nn.relu(x)
x = relay.add(x, in_bias)
x_var = relay.Var("x_var")
y1 = relay.nn.conv2d(
x_var,
conv_weight,
channels=channels,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
padding=(1, 1),
)
z = relay.add(y1, x)
let = relay.Let(x_var, x, z)
return relay.Function(args, let)
def check(shape, channels):
x = relay.var("x", shape=shape)
in_channels = shape[-1]
in_bias = relay.var("in_bias", shape=(in_channels,))
in_scale = relay.const(_get_positive_scale(size=(in_channels,)))
# test depthwise
assert in_channels == channels
weight = relay.var("weight")
y1 = before(x, weight, in_bias, in_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
assert tvm.ir.structural_equal(y1, y1_folded)
check((2, 11, 10, 4), 4)
def test_fold_fwd_negative_scale():
"""Testcase of folding negative scale"""
def before(x, conv_weight, in_scale, channels, blocking):
args = [x, conv_weight]
x = relay.multiply(x, in_scale)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW4i{}o".format(blocking[1]) if blocking else "OIHW",
)
return relay.Function(args, y)
def expected(x, conv_weight, in_scale, in_channels, channels, blocking):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight]
if blocking:
squeezed_scale = relay.squeeze(in_scale, axis=[0, 2, 3])
conv_weight = relay.multiply(
conv_weight, relay.reshape(squeezed_scale, (1, in_channels // 4, 1, 1, 4, 1))
)
# blocking by "i" in OIHWio
else:
squeezed_scale = relay.squeeze(in_scale, axis=[1, 2])
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2)
)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW4i{}o".format(blocking[1]) if blocking else "OIHW",
)
return relay.Function(args, y)
def check(shape, channels, blocking):
x = relay.var("x", shape=shape)
if blocking:
in_channels = shape[1] * shape[4]
in_scale = relay.const(-_get_positive_scale((1, shape[1], 1, 1, shape[4])))
else:
in_channels = shape[1]
in_scale = relay.const(-_get_positive_scale((in_channels, 1, 1)))
weight = relay.var("weight")
y1 = before(x, weight, in_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
y1_expected = expected(x, weight, in_scale, in_channels, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 4, None)
check((2, 2, 10, 10, 2), 8, (2, 2))
def test_fold_fwd_dense():
"""dense testcase."""
def before(x, weight, in_bias, in_scale):
args = [x, weight, in_bias]
x = relay.multiply(x, in_scale)
x = relay.nn.relu(x)
x = relay.add(x, in_bias)
y = relay.nn.dense(x, weight)
return relay.Function(args, y)
def expected(x, weight, in_bias, in_scale):
# use a fixed order of args so alpha equal check can pass
args = [x, weight, in_bias]
x = relay.nn.relu(x)
in_bias = relay.divide(in_bias, in_scale)
x = relay.add(x, in_bias)
weight = relay.multiply(weight, in_scale)
y = relay.nn.dense(x, weight)
return relay.Function(args, y)
def check(data_shape, weight_shape):
x = relay.var("x", shape=data_shape)
weight = relay.var("weight", shape=weight_shape)
in_channels = data_shape[1]
in_bias = relay.var("in_bias", shape=(in_channels,))
in_scale = relay.const(_get_positive_scale((in_channels,)))
y1 = before(x, weight, in_bias, in_scale)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
y1_expected = expected(x, weight, in_bias, in_scale)
y1_folded = run_opt_pass(y1_folded, transform.InferType())
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4), (3, 4))
check((3, 5), (4, 5))
def test_fold_bwd_simple():
"""Simple testcase."""
def before(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
if blocking:
out_bias = relay.reshape(out_bias, (1, channels // blocking[1], 1, 1, blocking[1]))
else:
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y = relay.add(y, out_bias)
y = relay.nn.relu(y)
if blocking:
out_scale = relay.reshape(out_scale, (1, channels // blocking[1], 1, 1, blocking[1]))
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def expected(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight, out_bias]
if blocking:
out_bias = relay.reshape(out_bias, (1, channels // blocking[1], 1, 1, blocking[1]))
out_scale = relay.reshape(out_scale, (1, channels // blocking[1], 1, 1, blocking[1]))
squeezed_scale = relay.squeeze(out_scale, axis=[0, 2, 3])
conv_weight = relay.multiply(
conv_weight,
relay.reshape(squeezed_scale, (channels // blocking[1], 1, 1, 1, 1, blocking[1])),
)
else:
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
squeezed_scale = relay.squeeze(out_scale, axis=[1, 2])
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
if blocking:
out_bias = relay.multiply(
out_bias,
relay.reshape(squeezed_scale, (1, channels // blocking[1], 1, 1, blocking[1])),
)
else:
out_bias = relay.multiply(
out_bias, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=2)
)
y = relay.add(y, out_bias)
y = relay.nn.relu(y)
return relay.Function(args, y)
def check(shape, in_channels, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
out_bias = relay.var("out_bias", shape=(channels,))
if blocking:
out_scale = relay.const(_get_positive_scale((channels,)))
else:
out_scale = relay.const(_get_positive_scale((channels, 1, 1)))
y1 = before(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 4, 8, None)
check((2, 2, 10, 10, 16), 32, 64, (16, 16))
def test_fold_bwd_dual_path():
"""Dual path testcase."""
def before(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
y1 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def expected(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight, out_bias]
if not blocking:
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=2)
squeezed_scale = relay.squeeze(out_scale, axis=[1, 2])
def fold_conv_weight():
if blocking:
return relay.multiply(
conv_weight,
relay.reshape(
squeezed_scale, (channels // blocking[1], 1, 1, 1, 1, blocking[1])
),
)
else:
return relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y1 = relay.nn.conv2d(
x,
fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(
x,
fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
return relay.Function(args, y)
def check(shape, in_channels, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
out_bias = relay.var("out_bias", shape=(channels // blocking[1], 1, 1, blocking[1]))
out_scale = relay.const(
_get_positive_scale((channels // blocking[1], 1, 1, blocking[1]))
)
else:
out_bias = relay.var("out_bias", shape=(channels,))
out_scale = relay.const(_get_positive_scale((channels, 1, 1)))
y1 = before(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 4, 8, None)
check((2, 2, 10, 10, 2), 4, 8, (2, 2))
def test_fold_bwd_simple_constant():
def before(data, weight, out_bias, channels):
y = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
y = relay.add(y, out_bias)
c2 = relay.const(2.0)
y = relay.nn.relu(y)
y = relay.multiply(y, c2)
mod, params = create_workload(y, initializer)
mod["main"] = bind_params_by_name(mod["main"], params)
return mod
def expected(data, weight, out_bias, channels):
y0 = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
y0 = relay.add(y0, out_bias)
y0 = relay.nn.relu(y0)
mod, params = create_workload(y0, initializer)
mod["main"] = bind_params_by_name(mod["main"], params)
return mod
def check(shape, channels):
x = relay.var("data", relay.TensorType(shape, "float32"))
weight = relay.var("weight")
out_bias = relay.var("in_bias", shape=(channels, 1, 1))
y0 = before(x, weight, out_bias, channels)
remove_last_multiply = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldScaleAxis(),
]
)
with tvm.transform.PassContext(opt_level=3):
y0 = remove_last_multiply(y0)
_expect = expected(x, weight, out_bias, channels)
tvm.ir.assert_structural_equal(y0, _expect)
check((1, 3, 200, 200), 16)
def test_fold_bwd_dual_consumer():
def before(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
y0 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y0 = relay.multiply(y0, out_scale)
y0 = relay.nn.relu(y0)
y1 = relay.nn.conv2d(
y0,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y1 = relay.multiply(y1, out_scale)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(
y0,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y2 = relay.multiply(y2, out_scale)
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
return relay.Function(args, y)
def expected(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight, out_bias]
def fold_conv_weight():
squeezed_scale = relay.squeeze(out_scale, axis=[1, 2])
if blocking:
return relay.multiply(
conv_weight,
relay.reshape(
squeezed_scale, (channels // blocking[1], 1, 1, 1, 1, blocking[1])
),
)
else:
return relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y0 = relay.nn.conv2d(
x,
fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y0 = relay.nn.relu(y0)
y1 = relay.nn.conv2d(
y0,
fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(
y0,
fold_conv_weight(),
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
return relay.Function(args, y)
def check(shape, in_channels, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
out_bias = relay.var("out_bias", shape=(channels // blocking[1], 1, 1, blocking[1]))
out_scale = relay.const(
_get_positive_scale((channels // blocking[1], 1, 1, blocking[1]))
)
else:
out_bias = relay.var("out_bias", shape=(channels,))
out_scale = relay.const(_get_positive_scale((channels, 1, 1)))
y1 = before(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 4, 4, None)
check((2, 2, 10, 10, 2), 4, 4, (2, 2))
def test_fold_bwd_fail():
"""Dual path testcase."""
def fail1(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
y1 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y1 = relay.nn.relu(y1)
y2 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
out_layout="CNHW{}c".format(blocking[1]) if blocking else "CNHW",
)
# fold will fail because the axis from two path
# differs from each other.
y2 = relay.nn.relu(y2)
y = relay.add(y1, y2)
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def fail2(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
y1 = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y2 = relay.nn.relu(y1)
# fold will fail because y1 is referred also by y2
y1 = relay.multiply(y1, out_scale)
y = relay.add(y1, y2)
return relay.Function(args, y)
def check(shape, in_channels, channels, blocking, fbefore):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
out_bias = relay.var("out_bias", shape=(channels // blocking[1], 1, 1, blocking[1]))
out_scale = relay.const(
_get_positive_scale((channels // blocking[1], 1, 1, blocking[1]))
)
else:
out_bias = relay.var("out_bias", shape=(channels, 1, 1))
out_scale = relay.const(_get_positive_scale((channels, 1, 1)))
y1 = fbefore(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
assert tvm.ir.structural_equal(y1_folded, y1)
check((4, 4, 10, 10), 4, 4, None, fail1)
check((2, 2, 10, 10, 2), 4, 4, (2, 2), fail1)
check((4, 4, 10, 10), 4, 4, None, fail2)
check((4, 2, 10, 10, 2), 4, 4, (2, 2), fail2)
def test_fold_bwd_relu_fail():
"""testcase where we canont fold because scale can not pass relu"""
def before(x, conv_weight, out_scale, channels, blocking):
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y = relay.nn.relu(y)
y = relay.multiply(x, out_scale)
return relay.Function(relay.analysis.free_vars(y), y)
def check(shape, channels, blocking, out_scale):
x = relay.var("x", shape=shape)
in_channels = shape[1]
weight = relay.var("weight")
y1 = before(x, weight, out_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
assert tvm.ir.structural_equal(y1, y1_folded)
out_scale = relay.var("in_scale", shape=(4, 1, 1))
check((4, 4, 10, 10), 4, None, out_scale)
out_scale = relay.const(np.random.uniform(size=(4, 1, 1), low=-1.0, high=0.0)).astype("float32")
check((4, 4, 10, 10), 4, None, out_scale)
out_scale = relay.var("in_scale", shape=(1, 2, 1, 1, 2))
check((4, 2, 10, 10, 2), 4, (2, 2), out_scale)
out_scale = relay.const(np.random.uniform(size=(1, 2, 1, 1, 2), low=-1.0, high=0.0)).astype(
"float32"
)
check((4, 2, 10, 10, 2), 4, (2, 2), out_scale)
def test_fold_bwd_negative_scale():
"""Testcase of folding negative scale"""
def before(x, conv_weight, out_scale, channels, blocking):
args = [x, conv_weight]
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def expected(x, conv_weight, out_scale, channels, blocking):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight]
if blocking:
squeezed_scale = relay.squeeze(out_scale, axis=[0, 2, 3])
conv_weight = relay.multiply(
conv_weight,
relay.reshape(squeezed_scale, (channels // blocking[1], 1, 1, 1, 1, blocking[1])),
)
else:
squeezed_scale = relay.squeeze(out_scale, axis=[1, 2])
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW{}c".format(blocking[0]) if blocking else "NCHW",
kernel_layout="OIHW1i{}o".format(blocking[1]) if blocking else "OIHW",
)
return relay.Function(args, y)
def check(shape, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
out_scale = relay.const(
-_get_positive_scale((1, channels // blocking[1], 1, 1, blocking[1]))
)
else:
out_scale = relay.const(-_get_positive_scale((channels, 1, 1)))
y1 = before(x, weight, out_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_scale, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 8, None)
check((2, 2, 10, 10, 2), 8, (2, 2))
def test_fold_bwd_dense():
"""dense testcase."""
def before(x, weight, in_bias, in_scale):
args = [x, weight, in_bias]
x = relay.nn.dense(x, weight)
x = relay.add(x, in_bias)
x = relay.nn.relu(x)
y = relay.multiply(x, in_scale)
return relay.Function(args, y)
def expected(x, weight, in_bias, in_scale):
# use a fixed order of args so alpha equal check can pass
args = [x, weight, in_bias]
scale = relay.expand_dims(in_scale, axis=1)
weight = relay.multiply(weight, scale)
x = relay.nn.dense(x, weight)
bias = relay.multiply(in_bias, in_scale)
x = relay.add(x, bias)
y = relay.nn.relu(x)
return relay.Function(args, y)
def check(data_shape, weight_shape):
x = relay.var("x", shape=data_shape)
weight = relay.var("weight", shape=weight_shape)
out_channels = weight_shape[0]
in_bias = relay.var("in_bias", shape=(out_channels,))
in_scale = relay.const(_get_positive_scale((out_channels,)))
y1 = before(x, weight, in_bias, in_scale)
y1 = run_opt_pass(y1, transform.InferType())
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, in_bias, in_scale)
y1_folded = run_opt_pass(y1_folded, transform.InferType())
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4), (3, 4))
check((3, 5), (4, 5))
def test_fold_bwd_bias_add():
"""bias add testcase."""
def before(x, conv_weight, out_bias, out_scale, channels):
args = [x, conv_weight, out_bias]
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.nn.bias_add(y, out_bias)
y = relay.nn.relu(y)
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def expected(x, conv_weight, out_bias, out_scale, channels):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight, out_bias]
squeezed_scale = relay.squeeze(out_scale, axis=[1, 2])
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y = relay.nn.conv2d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
out_bias = relay.multiply(out_bias, squeezed_scale)
y = relay.nn.bias_add(y, out_bias)
y = relay.nn.relu(y)
return relay.Function(args, y)
def check(shape, channels):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
out_bias = relay.var("out_bias", shape=(channels,))
out_scale = relay.const(_get_positive_scale((channels, 1, 1)))
y1 = before(x, weight, out_bias, out_scale, channels)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_bias, out_scale, channels)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10), 4)
def test_fold_fwd_conv3d():
"""Conv3d testcase."""
def before(x, conv_weight, in_bias, in_scale, channels, blocking):
args = [x, conv_weight, in_bias]
x = relay.multiply(x, in_scale)
x = relay.nn.relu(x)
x = relay.add(x, in_bias)
y = relay.nn.conv3d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
data_layout="NCDHW{}c".format(blocking[0]) if blocking else "NCDHW",
kernel_layout="OIDHW2i{}o".format(blocking[1]) if blocking else "OIDHW",
)
return relay.Function(args, y)
def expected(x, conv_weight, in_bias, in_scale, in_channels, channels, blocking):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight, in_bias]
if blocking:
squeezed_scale = relay.squeeze(in_scale, axis=[0, 2, 3, 4])
x = relay.nn.relu(x)
in_bias = relay.divide(
in_bias,
relay.reshape(
squeezed_scale, (1, in_channels // blocking[0], 1, 1, 1, blocking[0])
),
) # NCHWc
x = relay.add(x, in_bias)
conv_weight = relay.multiply(
conv_weight, relay.reshape(squeezed_scale, (1, in_channels // 2, 1, 1, 1, 2, 1))
) # OIHWio
else:
squeezed_scale = relay.squeeze(in_scale, axis=[1, 2, 3])
x = relay.nn.relu(x)
in_bias = relay.divide(
in_bias, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
x = relay.add(x, in_bias)
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y = relay.nn.conv3d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
data_layout="NCDHW{}c".format(blocking[0]) if blocking else "NCDHW",
kernel_layout="OIDHW2i{}o".format(blocking[1]) if blocking else "OIDHW",
)
return relay.Function(args, y)
def check(shape, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
if blocking:
in_channels = shape[1] * shape[-1]
in_bias = relay.var(
"in_bias", shape=(1, in_channels // blocking[0], 1, 1, 1, blocking[0])
)
in_scale = relay.const(
_get_positive_scale((1, in_channels // blocking[0], 1, 1, 1, blocking[0]))
)
else:
in_channels = shape[1]
in_bias = relay.var("in_bias", shape=(in_channels, 1, 1, 1))
in_scale = relay.const(_get_positive_scale((in_channels, 1, 1, 1)))
y1 = before(x, weight, in_bias, in_scale, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.ForwardFoldScaleAxis())
y1_expected = expected(x, weight, in_bias, in_scale, in_channels, channels, blocking)
y1_folded = run_opt_pass(y1_folded, transform.InferType())
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10, 10), 2, None)
check((2, 2, 10, 10, 10, 2), 8, (2, 4))
def test_fold_bwd_conv3d():
"""Conv3d testcase."""
def before(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
args = [x, conv_weight, out_bias]
if blocking:
out_bias = relay.reshape(out_bias, (1, channels // blocking[1], 1, 1, 1, blocking[1]))
else:
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=3)
y = relay.nn.conv3d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
data_layout="NCDHW{}c".format(blocking[0]) if blocking else "NCDHW",
kernel_layout="OIDHW1i{}o".format(blocking[1]) if blocking else "OIDHW",
)
y = relay.add(y, out_bias)
y = relay.nn.relu(y)
if blocking:
out_scale = relay.reshape(out_scale, (1, channels // blocking[1], 1, 1, 1, blocking[1]))
y = relay.multiply(y, out_scale)
return relay.Function(args, y)
def expected(x, conv_weight, out_bias, out_scale, in_channels, channels, blocking):
# use a fixed order of args so alpha equal check can pass
args = [x, conv_weight, out_bias]
if blocking:
out_bias = relay.reshape(out_bias, (1, channels // blocking[1], 1, 1, 1, blocking[1]))
out_scale = relay.reshape(out_scale, (1, channels // blocking[1], 1, 1, 1, blocking[1]))
squeezed_scale = relay.squeeze(out_scale, axis=[0, 2, 3, 4])
conv_weight = relay.multiply(
conv_weight,
relay.reshape(
squeezed_scale, (channels // blocking[1], 1, 1, 1, 1, 1, blocking[1])
),
)
else:
out_bias = relay.expand_dims(out_bias, axis=1, num_newaxis=3)
squeezed_scale = relay.squeeze(out_scale, axis=[1, 2, 3])
conv_weight = relay.multiply(
conv_weight, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=4)
)
y = relay.nn.conv3d(
x,
conv_weight,
channels=channels,
kernel_size=(3, 3, 3),
padding=(1, 1, 1),
data_layout="NCDHW{}c".format(blocking[0]) if blocking else "NCDHW",
kernel_layout="OIDHW1i{}o".format(blocking[1]) if blocking else "OIDHW",
)
if blocking:
out_bias = relay.multiply(
out_bias,
relay.reshape(squeezed_scale, (1, channels // blocking[1], 1, 1, 1, blocking[1])),
)
else:
out_bias = relay.multiply(
out_bias, relay.expand_dims(squeezed_scale, axis=1, num_newaxis=3)
)
y = relay.add(y, out_bias)
y = relay.nn.relu(y)
return relay.Function(args, y)
def check(shape, in_channels, channels, blocking):
x = relay.var("x", shape=shape)
weight = relay.var("weight")
out_bias = relay.var("out_bias", shape=(channels,))
if blocking:
out_scale = relay.const(_get_positive_scale((channels,)))
else:
out_scale = relay.const(_get_positive_scale((channels, 1, 1, 1)))
y1 = before(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1 = run_opt_pass(y1, transform.InferType())
type_dict = {x.name_hint: x.checked_type for x in y1.params}
weight = relay.var("weight", type_dict["weight"])
y1_folded = run_opt_pass(y1, transform.BackwardFoldScaleAxis())
y1_expected = expected(x, weight, out_bias, out_scale, in_channels, channels, blocking)
y1_expected = run_opt_pass(y1_expected, transform.InferType())
assert tvm.ir.structural_equal(y1_folded, y1_expected)
check((2, 4, 10, 10, 10), 4, 8, None)
check((2, 2, 10, 10, 10, 16), 32, 64, (16, 16))
if __name__ == "__main__":
test_fold_fwd_simple()
test_fold_fwd_dual_path()
test_fold_fwd_fail()
test_fold_fwd_relu_fail()
test_fold_fwd_negative_scale()
test_fold_fwd_dense()
test_fold_bwd_simple_constant()
test_fold_bwd_simple()
test_fold_bwd_dual_path()
test_fold_bwd_dual_consumer()
test_fold_bwd_fail()
test_fold_bwd_relu_fail()
test_fold_bwd_negative_scale()
test_fold_bwd_dense()
test_fold_bwd_bias_add()
test_fold_fwd_conv3d()
test_fold_bwd_conv3d()
| 50,270 | 38.428235 | 100 | py |
tvm | tvm-main/tests/python/relay/test_op_level1.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import te
import scipy
from tvm import relay
import pytest
from tvm.relay.testing import run_infer_type
import tvm.topi.testing
from tvm.contrib.nvcc import have_fp16
import tvm.testing
from tvm.topi.utils import get_const_tuple
executor_kind = tvm.testing.parameter("graph", "vm")
def sigmoid(x):
one = np.ones_like(x)
return one / (one + np.exp(-x))
def relu(x):
x_copy = np.copy(x)
np.maximum(x_copy, 0, x_copy)
return x_copy
def rsqrt(x):
one = np.ones_like(x)
return one / np.sqrt(x)
class TestUnaryOp:
# Tuple of (operator, reference op, supports fp16)
op_list = {
"log": (tvm.relay.log, np.log, True),
"exp": (tvm.relay.exp, np.exp, True),
"erf": (tvm.relay.erf, scipy.special.erf, True),
"sqrt": (tvm.relay.sqrt, np.sqrt, True),
"rqsrt": (tvm.relay.rsqrt, rsqrt, True),
"sigmoid": (tvm.relay.sigmoid, sigmoid, True),
"tanh": (tvm.relay.tanh, np.tanh, False),
"relu": (relay.nn.relu, relu, True),
"cos": (tvm.relay.cos, np.cos, True),
"sin": (tvm.relay.sin, np.sin, True),
"tan": (tvm.relay.tan, np.tan, False),
"atan": (tvm.relay.atan, np.arctan, False),
"ceil": (tvm.relay.ceil, np.ceil, True),
"floor": (tvm.relay.floor, np.floor, True),
"trunc": (tvm.relay.trunc, np.trunc, True),
"round": (tvm.relay.round, np.round, False),
}
dtype = tvm.testing.parameter("float16", "float32")
relay_op, ref_func, supports_fp16 = tvm.testing.parameters(
*op_list.values(), ids=op_list.keys()
)
def test_unary_op(self, target, dev, relay_op, ref_func, supports_fp16, dtype):
target = tvm.target.Target(target)
if dtype == "float16":
if target.kind.name == "cuda":
if not have_fp16(tvm.cuda(0).compute_version):
pytest.xfail(
"No float16 support on local cuda device (compute_version != 5.3 and < 6.0)"
)
elif target.kind.name == "vulkan" and not target.attrs.get("supports_float16", False):
pytest.xfail("No float16 support on vulkan target (supports_float16=False)")
elif not supports_fp16:
pytest.xfail(f"No float16 support on {target.kind.name} target")
if target.kind.name == "vulkan" and relay_op in [
tvm.relay.erf,
tvm.relay.tan,
tvm.relay.atan,
]:
pytest.xfail(f"Vulkan runtime doesn't yet support {relay_op}")
shape = (10, 4)
dtype = dtype
tp = relay.TensorType(shape, dtype=dtype)
x = relay.var("x", type_annotation=tp)
y = relay_op(x)
# test printer
assert ("{}(%x)".format(y.op.name)) in y.astext()
# test type inference
yy = run_infer_type(y)
assert yy.checked_type == tp
if ref_func is not None:
data = np.random.rand(*shape).astype(dtype)
ref_res = ref_func(data).astype(dtype)
func = relay.Function([x], y)
# use graph by execuor default for testing, as we need
# create function explicitly to avoid constant-folding.
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tolerance = 1e-2 if dtype == "float16" else 1e-5
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=tolerance)
@tvm.testing.uses_gpu
def test_binary_op():
def inst(vars, sh):
return [vars.get(s, s) for s in sh]
def check_binary_op(opfunc, ref, dtype):
# TODO(@jroesch): this piece of code improperly uses type variables.
n = te.var("n")
s1 = (5, n, 5)
s2 = (n, 1)
t1 = relay.TensorType(s1)
t2 = relay.TensorType(s2)
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
# test printer
assert ("{}(%x, %y)".format(z.op.name)) in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == t1
if ref is not None:
t1 = relay.TensorType((5, 10, 5))
t2 = relay.TensorType((5, 10, 5))
x = relay.var("x", t1, dtype=dtype)
y = relay.var("y", t2, dtype=dtype)
z = opfunc(x, y)
x_data = np.random.rand(5, 10, 5).astype(dtype)
y_data = np.random.rand(5, 10, 5).astype(dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, dev in tvm.testing.enabled_targets():
# use graph by execuor default for testing, as we need
# create function explicitly to avoid constant-folding.
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.cuda(0).compute_version)
):
continue
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01, atol=1e-3)
for opfunc, ref in [
(relay.add, np.add),
(relay.subtract, np.subtract),
(relay.multiply, np.multiply),
(relay.divide, np.divide),
(relay.floor_divide, np.floor_divide),
(relay.floor_mod, np.fmod),
]:
for dtype in ["float16", "float32"]:
check_binary_op(opfunc, ref, dtype)
@tvm.testing.uses_gpu
def test_expand_dims():
# based on topi test
def verify_expand_dims(dshape, dtype, oshape, axis, num_newaxis):
x = relay.Var("x", relay.TensorType(dshape, dtype))
func = relay.Function([x], relay.expand_dims(x, axis, num_newaxis))
for target, dev in tvm.testing.enabled_targets():
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.cuda(0).compute_version)
):
continue
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = data.reshape(oshape)
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
for dtype in ["float16", "float32"]:
verify_expand_dims((3, 10), dtype, (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), dtype, (1, 3, 10), -3, 1)
@tvm.testing.uses_gpu
def test_bias_add():
for dtype in ["float16", "float32"]:
xshape = (10, 2, 3, 4)
bshape = (2,)
rtol = 1e-2 if dtype == "float16" else 1e-5
x = relay.var("x", shape=xshape, dtype=dtype)
bias = relay.var("bias", dtype=dtype)
z = relay.nn.bias_add(x, bias)
zz = run_infer_type(z)
assert "axis=" not in zz.astext()
assert zz.args[1].checked_type == relay.TensorType(bshape, dtype)
func = relay.Function([x, bias], z)
x_data = np.random.uniform(size=xshape).astype(dtype)
y_data = np.random.uniform(size=bshape).astype(dtype)
ref_res = x_data + y_data.reshape((2, 1, 1))
for target, dev in tvm.testing.enabled_targets():
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.cuda(0).compute_version)
):
continue
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol)
def test_bias_add_type_failure():
def assert_failure(expr):
try:
run_infer_type(expr)
except tvm._ffi.base.TVMError:
return
else:
assert False
for axis in (0, -1, -3, 1):
assert_failure(relay.nn.bias_add(relay.const(1), relay.const(2), axis=axis))
def test_expand_dims_infer_type():
for dtype in ["float16", "float32"]:
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", shape=(n, t, d), dtype=dtype)
y = relay.expand_dims(x, axis=2)
assert "axis=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, t, 1, 100), dtype)
@tvm.testing.uses_gpu
def test_softmax():
for shape in [(10, 4), (10, 5, 4)]:
for dtype in ["float16", "float32"]:
# Softmax accuracy for float16 is poor
if dtype == "float16":
continue
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.softmax(x, axis=1)
assert "nn.softmax" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = tvm.topi.testing.softmax_python(x_data, axis=1)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data
)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_log_softmax():
for shape in [(10, 4), (10, 5, 4)]:
for dtype in ["float16", "float32"]:
# Softmax accuracy for float16 is poor
if dtype == "float16":
continue
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.nn.log_softmax(x, axis=1)
assert "nn.log_softmax" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType(shape, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=shape).astype(dtype)
ref_res = tvm.topi.testing.log_softmax_python(x_data, axis=1)
for target, dev in tvm.testing.enabled_targets():
if target == "nvptx":
continue
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data
)
np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_concatenate(executor_kind):
for dtype in ["float16", "float32"]:
n, t, d = te.size_var("n"), te.size_var("t"), 100
x = relay.var("x", shape=(n, t, d))
y = relay.var("y", shape=(n, t, d))
z = relay.concatenate((x, y), axis=-1)
assert "axis=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t, 200))
x = relay.exp(x)
z = relay.concatenate((x, y), axis=2)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t, 200))
z = relay.concatenate((x, y), axis=1)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, t + t, 100))
# check shape mismatches (the following case is expected to raise tvm._ffi.base.TVMError.
try:
x = relay.var("p1", shape=(2, 5))
y = relay.var("p2", shape=(2, 3))
c = relay.concatenate([x, y], axis=0)
func = relay.Function([x, y], c)
zz = run_infer_type(func)
except tvm._ffi.base.TVMError:
pass
else:
assert False
x = relay.var("x", shape=(10, 5), dtype=dtype)
y = relay.var("y", shape=(10, 5), dtype=dtype)
t = relay.var("z", shape=(), dtype=dtype)
z = relay.concatenate((x, y), axis=1)
z = relay.add(z, t)
# Check result.
func = relay.Function([x, y, t], z)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
ref_res = np.concatenate((x_data, y_data), axis=1) + t_data
for target, dev in tvm.testing.enabled_targets():
if (
dtype == "float16"
and target == "cuda"
and not have_fp16(tvm.cuda(0).compute_version)
):
continue
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, y_data, t_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01)
def test_dropout(executor_kind):
for dtype in ["float16", "float32"]:
n, t, d = te.size_var("n"), te.size_var("t"), te.size_var("d")
input_ty = relay.TensorType((n, t, d), dtype)
x = relay.var("x", input_ty)
y = relay.nn.dropout(x, rate=0.75)
assert "rate=" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == input_ty
in_np = np.random.random([4, 5, 6]).astype("float32")
x = relay.const(in_np)
y = relay.nn.dropout(x, rate=0.5)
func = relay.Function([], y)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)()
tvm.testing.assert_allclose(op_res.numpy(), in_np, rtol=0.01)
def test_batch_norm():
for dtype in ["float16", "float32"]:
# beta and gamma ignored
data = relay.var("data", relay.TensorType((3, 2, 1), dtype))
beta = relay.var("beta", relay.TensorType((2,), dtype))
gamma = relay.var("gamma", relay.TensorType((2,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((2,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((2,), dtype))
y = relay.nn.batch_norm(
data, gamma, beta, moving_mean, moving_var, center=False, scale=False
)
yy = run_infer_type(y.astuple())
assert "center=" in yy.astext()
assert yy.checked_type == relay.ty.TupleType(
tvm.runtime.convert(
[
relay.TensorType((3, 2, 1), dtype),
relay.TensorType((2,), dtype),
relay.TensorType((2,), dtype),
]
)
)
# axis=1
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(
data, gamma, beta, moving_mean, moving_var, axis=0, center=False, scale=False
)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((3, 2, 1), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype),
]
)
)
# axis=-1
data = relay.var("data", relay.TensorType((1, 2, 3), dtype))
beta = relay.var("beta", relay.TensorType((3,), dtype))
gamma = relay.var("gamma", relay.TensorType((3,), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((3,), dtype))
moving_var = relay.var("moving_var", relay.TensorType((3,), dtype))
y = relay.nn.batch_norm(
data, gamma, beta, moving_mean, moving_var, axis=-1, center=False, scale=False
)
yy = run_infer_type(y.astuple())
assert yy.checked_type == relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((1, 2, 3), dtype),
relay.ty.TensorType((3,), dtype),
relay.ty.TensorType((3,), dtype),
]
)
)
def do_concat_test(shapes, t_shape, dtype, axis, dev, target):
varsToConcat = []
inputData = []
pos = 0
for s in shapes:
varsToConcat.append(relay.var("x{}".format(pos), shape=s))
inputData.append(np.random.rand(*s).astype(dtype))
pos += 1
t = relay.var("z", shape=t_shape, dtype=dtype)
z = relay.concatenate(varsToConcat, axis=axis)
z = relay.add(z, t)
params = varsToConcat
params.append(t)
func = relay.Function(params, z)
t_data = np.random.uniform(low=-10, high=10, size=t_shape).astype(dtype)
ref_res = np.concatenate((tuple(inputData)), axis=axis) + t_data
mod = tvm.IRModule.from_expr(func)
executor = relay.create_executor("graph", mod=mod, device=dev, target=target)
op_res1 = executor.evaluate()(*inputData, t_data)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=0.000001)
op_res2 = relay.create_executor("debug", device=dev, target=target).evaluate(func)(
*inputData, t_data
)
tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=0.000001)
@tvm.testing.parametrize_targets("llvm")
def test_concatenate1(target, dev):
np.random.seed(471)
maxNumDimensions = 6
shape = [4, 32, 16, 1, 31, 20, 21, 8, 28, 7] # just randomly selected 10 numbers
for dtype in ["float32"]:
for dimsNum in range(1, maxNumDimensions):
np.random.shuffle(shape)
for axis in range(0, dimsNum): # range should be (-dimsNum + 1, dimsNum)
numToConcat = np.random.uniform(low=2, high=10, size=(1)).astype("int64")[0]
shapes = []
# the code below to normalize axes index. For some reasons tvm notifies about error if the axis is negative
normalizedAxis = axis
if axis < 0:
normalizedAxis += dimsNum
finalSize = 0
for i in range(0, numToConcat):
shp = tuple(shape[:dimsNum])
finalSize += shape[(i % len(shape))]
shapes.append(
shp[:normalizedAxis]
+ tuple([shape[(i % len(shape))]])
+ shp[normalizedAxis + 1 :]
)
t_shape = shp[:normalizedAxis] + tuple([finalSize]) + shp[normalizedAxis + 1 :]
do_concat_test(shapes, t_shape, dtype, axis, dev, target)
@tvm.testing.parametrize_targets("llvm")
def test_concatenate2(target, dev):
# test to cover cases (1, .. , x, 1, .. , 1)
np.random.seed(13)
maxNumDimensions = 6
shape = [8, 3, 25, 33, 12, 29, 5, 11, 29, 11] # just randomly selected 10 numbers
ind = 0
for dtype in ["float32"]:
for dimsNum in range(2, maxNumDimensions):
np.random.shuffle(shape)
for axis in range(-dimsNum + 1, dimsNum): # range should be (-dimsNum + 1, dimsNum)
numToConcat = np.random.uniform(low=2, high=10, size=(1)).astype("int64")[0]
shapes = []
# the code below to normalize axes index. For some reasons tvm notifies about error if the axis is negative
normalizedAxis = axis
if axis < 0:
normalizedAxis += dimsNum
finalSize = 0
for i in range(0, numToConcat):
axisVal = [1] * dimsNum
axisVal[axis] = shape[(ind % len(shape))]
ind += 1
finalSize += axisVal[axis]
shapes.append(tuple(axisVal))
temp = [1] * dimsNum
temp[axis] = finalSize
t_shape = tuple(temp)
do_concat_test(shapes, t_shape, dtype, axis, dev, target)
@tvm.testing.parametrize_targets("llvm")
def test_concatenate3(target, dev):
np.random.seed(477)
for dtype in ["float32"]:
axis = -2
ending = 1
shapes = [[3, 2, 1, ending], [3, 2, 1, ending]]
t_shape = [3, 2, 2, ending]
do_concat_test(shapes, t_shape, dtype, axis, dev, target)
@tvm.testing.parametrize_targets("llvm")
def test_concatenate4(target, dev):
np.random.seed(7)
x_shape = (2, 1)
x = relay.var("x", shape=x_shape, dtype="int64")
concat = relay.concatenate([x], axis=1)
f = relay.Function([x], concat)
x_val = np.array([[33], [13]], dtype="int64")
graph = relay.create_executor("graph", device=tvm.cpu(), target="llvm")
op_res = graph.evaluate(f)(x_val)
ref_res = np.concatenate([x_val], axis=1)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.000001)
def test_batch_norm_fold_const():
axis = 1
dtype = "float32"
shape = [4, 5, 6]
data_np = np.random.random(shape).astype(dtype)
beta_np = np.random.random(shape[axis]).astype(dtype)
gamma_np = np.random.random(shape[axis]).astype(dtype)
moving_mean_np = np.random.random(shape[axis]).astype(dtype)
moving_var_np = np.random.random(shape[axis]).astype(dtype)
data = relay.var("data", relay.TensorType(shape, dtype))
beta = relay.var("beta", relay.TensorType((shape[1],), dtype))
gamma = relay.var("gamma", relay.TensorType((shape[1],), dtype))
moving_mean = relay.var("moving_mean", relay.TensorType((shape[1],), dtype))
moving_var = relay.var("moving_var", relay.TensorType((shape[1],), dtype))
out = relay.nn.batch_norm(data, gamma, beta, moving_mean, moving_var, axis=axis).astuple()
func = relay.Function([data, gamma, beta, moving_mean, moving_var], out)
out_const = relay.nn.batch_norm(
relay.const(data_np),
relay.const(gamma_np),
relay.const(beta_np),
relay.const(moving_mean_np),
relay.const(moving_var_np),
axis=axis,
).astuple()
func_const = relay.Function([], out_const)
# Build the module with constants to have FoldConstant transform batch_norm.
mod_const = tvm.IRModule.from_expr(func_const)
mod_const = relay.transform.FoldConstant()(mod_const)
const_data_out = mod_const["main"].body[0].data
const_moving_mean_out = mod_const["main"].body[1].data
const_moving_var_out = mod_const["main"].body[2].data
# Run the Relay func without constants. This will use SimplyInference instead.
vm_data_out, vm_moving_mean_out, vm_moving_var_out = relay.create_executor(
"vm", device=tvm.device("llvm"), target="llvm"
).evaluate(func)(data_np, gamma_np, beta_np, moving_mean_np, moving_var_np)
tvm.testing.assert_allclose(const_data_out.numpy(), vm_data_out.numpy())
tvm.testing.assert_allclose(const_moving_mean_out.numpy(), vm_moving_mean_out.numpy())
tvm.testing.assert_allclose(const_moving_var_out.numpy(), vm_moving_var_out.numpy())
@pytest.mark.xfail
def test_matmul_type_check():
dtype = "float16"
n, c, h, w = 2, 2, 2, 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
# it should fail since it does not match with m(2)
mismatch_w = 3
w = relay.var("w", relay.TensorType((mismatch_w, 2), dtype))
y = relay.nn.matmul(x, w)
yy = run_infer_type(y)
i0 = relay.var("i0", shape=(1, 1), dtype="float32")
i1 = relay.var("i1", shape=(1,), dtype="float32")
with pytest.raises(tvm.TVMError):
run_infer_type(relay.nn.matmul(i0, i1))
@tvm.testing.uses_gpu
def test_matmul(executor_kind):
for dtype in ["float16", "float32"]:
# Matmul accuracy for float16 is poor
if dtype == "float16":
continue
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.TensorType((2, w), dtype))
y = relay.nn.matmul(x, w, units=2, transpose_b=True)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, w, h), dtype))
wh, ww = te.size_var("wh"), te.size_var("ww")
w = relay.var("w", relay.TensorType((wh, ww), dtype))
y = relay.nn.matmul(x, w, transpose_a=True)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.matmul(x, w, units=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
x = relay.var("x", shape=(5, 10), dtype=dtype)
w = relay.var("w", shape=(5, 2), dtype=dtype)
z = relay.nn.matmul(x, w, transpose_a=True)
# Check result.
func = relay.Function([x, w], z)
x_data = np.random.rand(5, 10).astype(dtype)
w_data = np.random.rand(5, 2).astype(dtype)
ref_res = np.dot(x_data.transpose(), w_data)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, w_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@pytest.mark.xfail
def test_dense_type_check():
dtype = "float16"
n, c, h, w = 2, 2, 2, 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
# it should fail since it does not match with m(2)
mismatch_w = 3
w = relay.var("w", relay.TensorType((2, mismatch_w), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
@tvm.testing.uses_gpu
def test_dense(executor_kind):
for dtype in ["float16", "float32"]:
# Dense accuracy for float16 is poor
if dtype == "float16":
continue
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.TensorType((2, w), dtype))
y = relay.nn.dense(x, w, units=2)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
wh, ww = te.size_var("wh"), te.size_var("ww")
w = relay.var("w", relay.TensorType((ww, wh), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype)
# test dynamic shape in inner
m, k = 4, 2
x = relay.var("x", relay.TensorType((m, k), dtype))
k, nw = relay.Any(), 6
w = relay.var("w", relay.TensorType((k, n), dtype))
y = relay.nn.dense(x, w)
yy = run_infer_type(y)
# Confirm that input shape has not been rewritten to become dynamic.
assert get_const_tuple(yy.type_args[0].shape) == (4, 2)
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), 2
x = relay.var("x", relay.TensorType((n, c, h, w), dtype))
w = relay.var("w", relay.IncompleteType())
y = relay.nn.dense(x, w, units=2)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype)
x = relay.var("x", shape=(10, 5), dtype=dtype)
w = relay.var("w", shape=(2, 5), dtype=dtype)
z = relay.nn.dense(x, w)
# Check result.
func = relay.Function([x, w], z)
x_data = np.random.rand(10, 5).astype(dtype)
w_data = np.random.rand(2, 5).astype(dtype)
ref_res = np.dot(x_data, w_data.T)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data, w_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_dense_same_args_compile():
for dtype in ["float32", "int8"]:
x = relay.var("x", shape=(32, 64), dtype=dtype)
out_dtype = "int32" if dtype == "int8" else "float32"
f = relay.Function([x], relay.nn.dense(x, x, out_dtype=out_dtype))
m = tvm.IRModule.from_expr(f)
for target, _ in tvm.testing.enabled_targets():
tvm.relay.build(m, target=target)
def test_dense_dtype():
data_dtype = "uint8"
weight_dtype = "int8"
out_dtype = "uint8"
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), data_dtype))
w = relay.var("w", relay.TensorType((2, w), weight_dtype))
y = relay.nn.dense(x, w, units=2, out_dtype=out_dtype)
assert "units=2" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, c, h, 2), out_dtype)
assert run_infer_type(yy.args[0]).checked_type.dtype == "uint8"
assert run_infer_type(yy.args[1]).checked_type.dtype == "int8"
def test_bitserial_dense():
m, k = te.size_var("m"), te.size_var("k")
x = relay.var("x", relay.TensorType((m, k), "int16"))
w = relay.var("w", relay.TensorType((k, 32), "int16"))
y = relay.nn.bitserial_dense(x, w, units=32)
"units=8" in y.astext()
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((m, 32), "int16")
def dense_x86_test(m, n, k, target="llvm -mcpu=cascadelake", intrins=["vpdpbusd"]):
data_shape = (m, k)
weight_shape = (n, k)
for data_dtype in ["uint8", "int8"]:
data = relay.var("data", shape=data_shape, dtype=data_dtype)
weight = relay.var("weight", shape=weight_shape, dtype="int8")
bias = relay.var("bias", shape=(weight_shape[0],), dtype="int32")
dense = relay.nn.dense(data, weight, out_dtype="int32")
out = relay.nn.bias_add(dense, bias)
mod = tvm.IRModule.from_expr(out)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
# TODO(vvchernov): needs for avx512 arch, can be extended
if n % 16 == 0 and k % 4 == 0:
asm = lib.lib.get_source("asm")
for intrin in intrins:
assert intrin in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
a = np.random.uniform(1, 10, size=data_shape).astype(data_dtype)
b = np.random.uniform(1, 10, size=weight_shape).astype("int8")
c = np.random.uniform(1, 10, size=(weight_shape[0],)).astype("int32")
runtime.set_input("data", a)
runtime.set_input("weight", b)
runtime.set_input("bias", c)
runtime.run()
out = runtime.get_output(0).numpy()
ref = np.dot(a.astype("int32"), b.transpose().astype("int32")) + c
np.testing.assert_equal(out, ref)
@tvm.testing.requires_llvm
@pytest.mark.skip("skip due to AMX feature not avaliable yet")
def test_dense_amx_int8():
data_shape = (32, 128)
weight_shape = (32, 128)
amx_init = tvm.get_global_func("runtime.amx_init")
amx_tileconfig = tvm.get_global_func("runtime.amx_tileconfig")
assert amx_init()
assert amx_tileconfig(16, 64) # config tile size to 16 rows by 64 columns.
for data_dtype in ["uint8", "int8"]:
data = relay.var("data", shape=data_shape, dtype=data_dtype)
weight = relay.var("weight", shape=weight_shape, dtype="int8")
bias = relay.var("bias", shape=(weight_shape[0],), dtype="int32")
dense = relay.nn.dense(data, weight, out_dtype="int32")
out = relay.nn.bias_add(dense, bias)
mod = tvm.IRModule.from_expr(out)
target = "llvm -mcpu=sapphirerapids"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
asm = lib.lib.get_source("asm")
assert "tilezero" in asm
assert "tileloaddt1" in asm
assert "tdpbusd" in asm
assert "tilestored" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
a = np.random.uniform(1, 10, size=data_shape).astype(data_dtype)
b = np.random.uniform(1, 10, size=weight_shape).astype("int8")
c = np.random.uniform(1, 10, size=(weight_shape[0],)).astype("int32")
runtime.set_input("data", a)
runtime.set_input("weight", b)
runtime.set_input("bias", c)
runtime.run()
out = runtime.get_output(0).numpy()
ref = np.dot(a.astype("int32"), b.transpose().astype("int32")) + c
np.testing.assert_equal(out, ref)
@tvm.testing.requires_cascadelake
@pytest.mark.parametrize("m,n,k", [(32, 128, 96), (32, 128, 97)])
def test_dense_vnni(m, n, k):
dense_x86_test(m, n, k)
@tvm.testing.requires_skylake_avx512
@pytest.mark.parametrize("m,n,k", [(32, 128, 96), (32, 128, 97)])
def test_dense_skylake_avx512(m, n, k):
dense_x86_test(m, n, k, "llvm -mcpu=skylake-avx512", ["pmaddubs", "pmaddw", "vpaddd"])
@pytest.mark.skip("Requires GFX10 AMDGPU")
def test_dense_rocm_sdot4():
data_shape = (32, 96)
weight_shape = (128, 96)
data_dtype = "int8"
data = relay.var("data", shape=data_shape, dtype=data_dtype)
weight = relay.var("weight", shape=weight_shape, dtype="int8")
bias = relay.var("bias", shape=(weight_shape[0],), dtype="int32")
dense = relay.nn.dense(data, weight, out_dtype="int32")
out = relay.nn.bias_add(dense, bias)
mod = tvm.IRModule.from_expr(out)
target = "rocm -mattr=+dotprod"
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target)
asm = lib.lib.imported_modules[0].get_source("asm")
assert "v_dot4_i32_i8" in asm
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
a = np.random.uniform(1, 10, size=data_shape).astype(data_dtype)
b = np.random.uniform(1, 10, size=weight_shape).astype("int8")
c = np.random.uniform(1, 10, size=(weight_shape[0],)).astype("int32")
runtime.set_input("data", a)
runtime.set_input("weight", b)
runtime.set_input("bias", c)
runtime.run()
out = runtime.get_output(0).numpy()
ref = np.dot(a.astype("int32"), b.transpose().astype("int32")) + c
np.testing.assert_equal(out, ref)
def test_extern_concat_injective_fuse():
# This is a subgraph from MobileBERT, which crashes compilation if buffers created in te.extern(...)
# do not have their elem_offset explicitly set as a variable.
# fmt: off
mod = tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%p0844: Tensor[(1, 384), int64], %p1652: Tensor[(2016, 128), float16]) {
%1331 = cast(%p0844, dtype="int32");
%1332 = take(%p1652, %1331, axis=0);
%1333 = strided_slice(%1332, begin=[0, 1, 0], end=[1, 384, 128], strides=[1, 1, 1], axes=None);
%1334 = strided_slice(%1332, begin=[0, 0, 0], end=[1, -1, 128], strides=[1, 1, 1], axes=None);
%1335 = nn.pad(%1333, 0, pad_width=[[0, 0], [0, 1], [0, 0]]);
%1336 = nn.pad(%1334, 0, pad_width=[[0, 0], [1, 0], [0, 0]]);
%1337 = (%1335, %1332, %1336);
%1338 = concatenate(%1337, axis=2);
reshape(%1338, newshape=[-1, 384])
}
"""
)
# fmt: on
relay.build(mod, params={}, target="llvm")
if __name__ == "__main__":
tvm.testing.main()
| 36,415 | 38.283711 | 123 | py |
tvm | tvm-main/tests/python/relay/test_ir_bind.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" test bind function."""
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm import TVMError
def test_bind_params():
x = relay.var("x")
y = relay.var("y")
z = relay.add(x, y)
f = relay.Function([x, y], z)
fbinded = relay.bind(f, {x: relay.const(1, "float32")})
fexpected = relay.Function([y], relay.add(relay.const(1, "float32"), y))
assert tvm.ir.structural_equal(fbinded, fexpected)
zbinded = relay.bind(z, {y: x})
zexpected = relay.add(x, x)
assert tvm.ir.structural_equal(zbinded, zexpected)
def test_bind_duplicated_params():
a = relay.var("a", shape=(1,))
aa = relay.var("a", shape=(1,))
s = a + aa
func = relay.Function([a, aa], s)
with pytest.raises(TVMError):
relay.build_module.bind_params_by_name(func, {"a": [1.0]})
if __name__ == "__main__":
test_bind_params()
test_bind_duplicated_params()
| 1,698 | 31.673077 | 76 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_subtract.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import numpy as np
from tvm import relay
def qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp, data_dtype="uint8"):
# all x, y and golden outputs should be of the same length
assert len(x_datas) == len(y_datas)
assert len(y_datas) == len(golden_outputs)
x = relay.var("x", shape=(1, 4), dtype=data_dtype)
y = relay.var("y", shape=(1, 4), dtype=data_dtype)
lhs_scale = relay.const(scale_and_zp["lhs_scale"], "float32")
lhs_zp = relay.const(scale_and_zp["lhs_zp"], "int32")
rhs_scale = relay.const(scale_and_zp["rhs_scale"], "float32")
rhs_zp = relay.const(scale_and_zp["rhs_zp"], "int32")
output_scale = relay.const(scale_and_zp["output_scale"], "float32")
output_zp = relay.const(scale_and_zp["output_zp"], "int32")
z = relay.qnn.op.subtract(
lhs=x,
rhs=y,
lhs_scale=lhs_scale,
lhs_zero_point=lhs_zp,
rhs_scale=rhs_scale,
rhs_zero_point=rhs_zp,
output_scale=output_scale,
output_zero_point=output_zp,
)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
func = mod["main"]
for i in range(0, len(x_datas)):
x_data = x_datas[i]
y_data = y_datas[i]
golden_output = golden_outputs[i]
op_res = relay.create_executor("graph", device=tvm.cpu(0), target="llvm").evaluate(func)(
x_data, y_data
)
np.testing.assert_equal(op_res.numpy(), golden_output)
def test_tflite_same_io_qnn_params():
scale_and_zp = {
"lhs_scale": 0.00784314,
"lhs_zp": 127,
"rhs_scale": 0.00784314,
"rhs_zp": 127,
"output_scale": 0.00784314,
"output_zp": 127,
}
x_datas = [
np.array((140, 153, 165, 178)).reshape((1, 4)),
np.array((25, 153, 178, 216)).reshape((1, 4)),
np.array((25, 153, 216, 165)).reshape((1, 4)),
]
y_datas = [
np.array((204, 178, 165, 140)).reshape((1, 4)),
np.array((204, 178, 191, 25)).reshape((1, 4)),
np.array((204, 178, 25, 191)).reshape((1, 4)),
]
golden_outputs = [
np.array((63, 102, 127, 165)).reshape((1, 4)),
np.array((0, 102, 114, 255)).reshape((1, 4)),
np.array((0, 102, 255, 101)).reshape((1, 4)),
]
qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)
def test_tflite_different_io_qnn_params():
scale_and_zp = {
"lhs_scale": 0.0156863,
"lhs_zp": 127,
"rhs_scale": 0.0117647,
"rhs_zp": 85,
"output_scale": 0.0235294,
"output_zp": 128,
}
x_datas = [
np.array((76, 140, 153, 172)).reshape((1, 4)),
np.array((133, 140, 146, 153)).reshape((1, 4)),
np.array((76, 140, 172, 146)).reshape((1, 4)),
]
y_datas = [
np.array((136, 119, 128, 17)).reshape((1, 4)),
np.array((136, 119, 111, 94)).reshape((1, 4)),
np.array((136, 119, 17, 128)).reshape((1, 4)),
]
golden_outputs = [
np.array((68, 120, 123, 192)).reshape((1, 4)),
np.array((106, 120, 128, 140)).reshape((1, 4)),
np.array((68, 120, 192, 119)).reshape((1, 4)),
]
qnn_subtract_driver(x_datas, y_datas, golden_outputs, scale_and_zp)
def test_saturation():
# Same params
scale_and_zp = {
"lhs_scale": 0.125,
"lhs_zp": 0,
"rhs_scale": 0.125,
"rhs_zp": 0,
"output_scale": 0.125,
"output_zp": 0,
}
x_data = [np.array((255, 1, 1, 0)).reshape((1, 4))]
y_data = [np.array((255, 255, 128, 0)).reshape((1, 4))]
golden_output = [np.array((0, 0, 0, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
# Same params, different scale
scale_and_zp = {
"lhs_scale": 0.125,
"lhs_zp": 0,
"rhs_scale": 0.125,
"rhs_zp": 0,
"output_scale": 0.25,
"output_zp": 0,
}
x_data = [np.array((255, 1, 200, 0)).reshape((1, 4))]
y_data = [np.array((255, 255, 127, 0)).reshape((1, 4))]
golden_output = [np.array((0, 0, 36, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
# All params different
scale_and_zp = {
"lhs_scale": 0.5,
"lhs_zp": 0,
"rhs_scale": 0.25,
"rhs_zp": 0,
"output_scale": 0.125,
"output_zp": 0,
}
x_data = [np.array((255, 0, 1, 0)).reshape((1, 4))]
y_data = [np.array((0, 128, 64, 0)).reshape((1, 4))]
golden_output = [np.array((255, 0, 0, 0)).reshape((1, 4))]
qnn_subtract_driver(x_data, y_data, golden_output, scale_and_zp)
if __name__ == "__main__":
test_tflite_same_io_qnn_params()
test_tflite_different_io_qnn_params()
test_saturation()
| 5,643 | 33.625767 | 97 | py |
tvm | tvm-main/tests/python/relay/test_analysis_extract_operators.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test function extraction"""
import pytest
import tvm
from tvm import relay
from tvm.relay.testing.resnet import get_workload
from tvm.relay.testing import run_opt_pass
def get_conv_net():
"""This gets the net for:
conv2d
/ |
/ |
conv2d |
\ |
\ |
elemwise add
|
"""
dshape = (1, 1, 5, 1)
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
return tvm.IRModule.from_expr(z)
def get_conv2d():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
return tvm.IRModule.from_expr(y)
def test_extract_identity():
mod = get_conv2d()
op_freqs = relay.analysis.list_op_freqs(mod)
assert len(op_freqs) == 1
assert op_freqs["nn.conv2d"] == 1
def test_extract_conv_net():
mod = get_conv_net()
op_freqs = relay.analysis.list_op_freqs(mod)
assert len(op_freqs) == 2
assert op_freqs["add"] == 1
assert op_freqs["nn.conv2d"] == 2
def test_extract_fused():
mod = get_conv_net()
mod = relay.transform.InferType()(mod)
mod = relay.transform.FuseOps(3)(mod)
op_freqs = relay.analysis.list_op_freqs(mod)
assert len(op_freqs) == 2
assert op_freqs["add"] == 1
assert op_freqs["nn.conv2d"] == 2
def test_extract_resnet():
mod, _params = get_workload()
expected_op_freqs = {
"nn.batch_norm": 19,
"nn.conv2d": 21,
"nn.relu": 18,
"nn.max_pool2d": 1,
"add": 8,
"nn.global_avg_pool2d": 1,
"nn.batch_flatten": 1,
"nn.dense": 1,
"nn.bias_add": 1,
"nn.softmax": 1,
}
op_freqs = relay.analysis.list_op_freqs(mod)
assert len(op_freqs) == len(expected_op_freqs)
assert all([op_freqs[op] == expected_op_freqs[op] for op in expected_op_freqs])
if __name__ == "__main__":
tvm.testing.main()
| 3,064 | 27.37963 | 92 | py |
tvm | tvm-main/tests/python/relay/test_backend_graph_executor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
from unittest.mock import patch
import tvm
import json
from tvm import relay
from tvm.contrib import graph_executor
from tvm.relay.op import add
import tvm.testing
from tvm.relay.testing import mlp
from tvm import rpc
from tvm.contrib import utils
# @tq, @jr should we put this in testing ns?
def check_rts(expr, args, expected_result, mod=None):
"""
Check that evaluating `expr` applied to the arguments produces
`result` on both the evaluator and TVM runtime.
Parameters
----------
expr:
The expression to evaluate
args: list of Expr
The arguments to supply the expr.
expected_result:
The expected result of running the expression.
"""
eval_result = relay.create_executor("debug", mod=mod).evaluate(expr)(*args)
rts_result = relay.create_executor("graph", mod=mod).evaluate(expr)(*args)
tvm.testing.assert_allclose(eval_result.numpy(), rts_result.numpy())
tvm.testing.assert_allclose(eval_result.numpy(), expected_result)
def test_add_op_scalar():
"""
test_add_op_scalar:
fn (x, y) {
return x + y;
}
"""
x = relay.var("x", shape=()) # Default to float32
y = relay.var("y", shape=()) # Default to float32
func = relay.Function([x, y], add(x, y))
x_y_data = [
(np.array(10.0, dtype="float32"), np.array(1.0, dtype="float32")),
(np.float32(10.0), np.float32(1.0)),
(10.0, 1.0),
]
for (x_data, y_data) in x_y_data:
check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_scalar_int():
"""
test_add_op_scalar_int:
fn (x, y) {
return x + y;
}
"""
x = relay.var("x", shape=(), dtype="int32")
y = relay.var("y", shape=(), dtype="int32")
func = relay.Function([x, y], add(x, y))
x_y_data = [
(np.array(10.0, dtype="int32"), np.array(1.0, dtype="int32")),
(np.int32(10), np.int32(1)),
(10, 1),
]
for (x_data, y_data) in x_y_data:
check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_tensor():
"""
Program:
fn (x, y) {
return x + y;
}
"""
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(10, 5))
func = relay.Function([x, y], add(x, y))
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(10, 5).astype("float32")
check_rts(func, [x_data, y_data], x_data + y_data)
def test_add_op_broadcast():
"""
Program:
fn (x, y) {
return x + y;
}
"""
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
func = relay.Function([x, y], add(x, y))
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
check_rts(func, [x_data, y_data], x_data + y_data)
def test_with_params():
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
z = relay.add(x, y)
z = relay.exp(z)
func = relay.Function([x, y], z)
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
params = {"y": y_data}
graph, lib, params = relay.build(tvm.IRModule.from_expr(func), "llvm", params=params)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input(**params)
mod.set_input(x=x_data)
mod.run()
res = mod.get_output(0).numpy()
ref_res = np.exp(y_data + x_data)
tvm.testing.assert_allclose(res, ref_res, atol=1e-5, rtol=1e-5)
def test_plan_memory():
# it is sufficient to cycle through two memories.
x = relay.var("x", shape=(10,))
y = relay.var("x", shape=(1,))
y2 = relay.exp(y)
z = relay.add(x, y2)
z = relay.exp(z)
z = relay.exp(z)
z = relay.exp(z)
z = relay.exp(z)
z = relay.exp(z)
func = relay.Function([x, y], z)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
mod = relay.transform.FuseOps(0)(mod)
func = mod["main"]
mod = relay.transform.InferType()(mod)
memory_plan = relay.backend._backend.GraphPlanMemory(func)
storage_ids = set()
device_types = set()
storage_sizes = {}
for k, v in memory_plan.expr_to_storage_info.items():
for x in v.storage_ids:
storage_ids.add(x)
storage_sizes[x] = v.storage_sizes
for x in v.device_types:
device_types.add(x)
# Current rule requires vars have unique storage id
# because we don't do inplace, we will need another
# two alternating temporary space.
assert len(storage_ids) == 4, f"found storage_ids: {storage_ids}"
assert len(device_types) == 1
assert len(storage_sizes) == 4
# Check the specific size of each sid
assert (
storage_sizes[0][0] == 40
and storage_sizes[1][0] == 4
and storage_sizes[2][0] == 4
and storage_sizes[3][0] == 40
)
def test_plan_2d_memory():
"""Verification if GraphPlanMemory manages 2d memory reffered as
global.texture* memory scopes in json file."""
global_virtual_device = tvm.target.VirtualDevice(memory_scope="global")
texture_virtual_device = tvm.target.VirtualDevice(memory_scope="global.texture")
metatable = {
"VirtualDevice": [
global_virtual_device,
texture_virtual_device,
]
}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data1: Tensor[(1, 32, 40, 40), float32],
%data2: Tensor[(1, 32, 40, 40), float32]) {
%0 = fn (%a, Primitive=1) {
layout_transform(%a, src_layout="NCHW", dst_layout="NCHW4c")
};
%1 = %0(%data1);
%3 = %0(%data2);
%5 = fn (%a {virtual_device=meta[VirtualDevice][0]}, // global
%b {virtual_device=meta[VirtualDevice][0]}, // global
virtual_device=meta[VirtualDevice][1], // texture
Primitive=1) {
add(%a, %b)
};
%6 = %5(%1, %3);
%7 = fn (%a {virtual_device=meta[VirtualDevice][1]}, // texture
%b {virtual_device=meta[VirtualDevice][0]}, // global
virtual_device=meta[VirtualDevice][1], // texture
Primitive=1) {
add(%a, %b)
};
%8 = %7(%6, %3);
%9 = fn (%a {virtual_device=meta[VirtualDevice][1]}, // texture
%b {virtual_device=meta[VirtualDevice][1]}, // texture
virtual_device=meta[VirtualDevice][1], // texture
Primitive=1) {
add(%a, %b)
};
%10 = %9(%8, %6);
%11 = fn (%a,
virtual_device=meta[VirtualDevice][0], // global
Primitive=1) {
layout_transform(%a, src_layout="NCHW4c", dst_layout="NCHW")
};
%11(%10)
}
""",
"from_string",
None,
metatable,
)
GPU_DEVICE = tvm.device("cuda")
HOST_TARGET = tvm.target.Target("llvm")
GPU_TARGET = tvm.target.Target("cuda").with_host(HOST_TARGET)
GPU = tvm.target.VirtualDevice(GPU_DEVICE, GPU_TARGET) # device_type=2
CTXT = tvm.transform.PassContext(config={"relay.fallback_device_type": GPU.device_type_int})
config = tvm.target.make_compilation_config(CTXT, GPU_TARGET)
mod = relay.transform.InferType()(mod)
# PlanDevices should succeed.
mod = relay.transform.PlanDevices(config)(mod)
func = mod["main"]
memory_plan = relay.backend._backend.GraphPlanMemory(func)
virtual_devices = {}
# We do not have execution ordered information, the only order that we can stick
# in this place - storage_id
# for above graph we know that
# We have
# - 8 manageable storages for above graph
# - 5 of them are buffers
# - 3 of them are textures (2d storages)
# - 1 of buffer will be reused, since we have storage id maped data, we will have 4th
# storage id reuesed and hidden in virtual_devices map
# - no textures are reused so far
for k, v in memory_plan.expr_to_storage_info.items():
virtual_devices[v.storage_ids[0]] = v.virtual_devices[0].memory_scope
# Check the scopes according to abvoce expectaions
assert (
virtual_devices[0] == "global"
and virtual_devices[1] == "global"
and virtual_devices[2] == "global"
and virtual_devices[3] == "global"
and virtual_devices[4] == "global.texture"
and virtual_devices[5] == "global.texture"
and virtual_devices[6] == "global.texture"
)
def test_reshape_nop():
# test that reshape can be turned into nop
x = relay.var("x", shape=(10, 4))
xx = relay.abs(x)
y = relay.expand_dims(xx, axis=1)
t0 = relay.reshape(y, (1, 40))
t1 = relay.abs(y)
z0 = relay.reshape(t0, (2, 20))
z1 = relay.sqrt(t1)
z2 = relay.reshape(t1, (1, 40))
func = relay.Function([x], relay.Tuple([z0, z1, z2]))
x_data = np.random.rand(10, 4).astype("float32")
graph = relay.build(tvm.IRModule.from_expr(func), "llvm")
graph_json_str = graph.get_graph_json()
graph_json = json.loads(graph_json_str)
# reshape must force sharing memory
storage_ids = graph_json["attrs"]["storage_id"][1]
assert tuple(storage_ids) == (0, 1, 1, 2, 3, 2)
assert graph_json["nodes"][2]["attrs"]["func_name"] == "__nop"
assert graph_json["nodes"][5]["attrs"]["func_name"] == "__nop"
gmod = graph_executor.GraphModule(graph["default"](tvm.cpu(0)))
gmod.set_input(x=x_data)
gmod.run()
z0_np = x_data.reshape(2, 20)
z1_np = np.sqrt(
np.abs(
x_data.reshape(
10,
1,
4,
)
)
)
z2_np = np.abs(x_data).reshape(1, 40)
tvm.testing.assert_allclose(gmod.get_output(0).numpy(), z0_np)
tvm.testing.assert_allclose(gmod.get_output(1).numpy(), z1_np)
tvm.testing.assert_allclose(gmod.get_output(2).numpy(), z2_np)
@tvm.testing.uses_gpu
def test_gru_like():
def unit(rnn_dim):
X = relay.var("X", shape=(1, rnn_dim))
W = relay.var("y", shape=(3 * rnn_dim, rnn_dim))
matmul = relay.nn.dense(X, W)
splitted = relay.split(matmul, indices_or_sections=3, axis=1)
out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
return relay.Function([X, W], out)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def unit_numpy(X, W):
prod = np.dot(X, W.transpose())
splits = np.split(prod, indices_or_sections=3, axis=1)
return sigmoid(splits[0]) + np.tanh(splits[1]) * np.exp(splits[2])
dtype = "float32"
rnn_dim = 1000
x = np.random.rand(1, rnn_dim).astype(dtype)
y = np.random.rand(3 * rnn_dim, rnn_dim).astype(dtype) * 0.01 - 0.005
out_shape = (1, rnn_dim)
z = unit(rnn_dim)
for target, dev in tvm.testing.enabled_targets():
with tvm.transform.PassContext(opt_level=2):
graph, lib, params = relay.build(tvm.IRModule.from_expr(z), target)
m = graph_executor.create(graph, lib, dev)
m.set_input("X", tvm.nd.array(x.astype(dtype)))
m.set_input("y", tvm.nd.array(y.astype(dtype)))
m.set_input(**params)
m.run()
out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).numpy()
ref = unit_numpy(x, y)
tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)
def test_compile_nested_tuples():
x = relay.var("x", shape=(10,))
x1 = x + relay.const(1.0)
x2 = x1 + relay.const(1.0)
x3 = x2 + relay.const(1.0)
x4 = x3 + relay.const(1.0)
out = relay.Tuple([x1, relay.Tuple([relay.Tuple([x2, x3]), x4])])
func = relay.Function([x], out)
graph, lib, _ = relay.build(tvm.IRModule.from_expr(func), "llvm")
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
x_data = np.random.uniform(size=(10,)).astype(np.float32)
mod.set_input(x=x_data)
mod.run()
assert mod.get_num_outputs() == 4
ref = x_data + 1
for i in range(mod.get_num_outputs()):
out = mod.get_output(i).numpy()
tvm.testing.assert_allclose(out, ref, rtol=1e-5, atol=1e-5)
ref = ref + 1
def test_compile_return_empty_tuple():
x = relay.var("x", shape=[16], dtype="float32")
mod = tvm.IRModule.from_expr(relay.Function([x], relay.Tuple([])))
graph, lib, _ = relay.build(mod, "llvm")
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.run()
@tvm.testing.uses_gpu
def test_compile_fused_identity_cast():
# a fused function that would optimized to identity
x = relay.var("x", shape=[16], dtype="float32")
y = relay.cast(x, "float32")
func1 = relay.Function([x], y).with_attr("Primitive", 1)
# a fused function with param pass-through
x = relay.var("x", shape=[16], dtype="float32")
y = relay.add(x, relay.const(3.14, "float32"))
func2 = relay.Function([x], relay.Tuple([x, y])).with_attr("Primitive", 1)
x_global = relay.var("xx", shape=[16], dtype="float32")
tup = func2(x_global)
y_global = func1(relay.TupleGetItem(tup, 0) + relay.TupleGetItem(tup, 1))
mod = tvm.IRModule.from_expr(relay.Function([x_global], y_global))
for target, device in tvm.testing.enabled_targets():
with tvm.transform.PassContext(opt_level=2):
graph, lib, _ = relay.build(mod, target=target)
executor = graph_executor.create(graph, lib, device=device)
executor.run()
def test_graph_executor_nested_tuples():
x, y, z, w = [relay.var(c, shape=(2, 3), dtype="float32") for c in "xyzw"]
out = relay.Tuple([x, relay.Tuple([y, relay.Tuple([z, w])])])
func = relay.Function([x, y, z, w], out)
f = relay.create_executor(
kind="graph", mod=tvm.IRModule.from_expr(func), device=tvm.cpu(0), target="llvm"
).evaluate()
data = [np.random.uniform(size=(2, 3)).astype("float32") for _ in "xyzw"]
out = f(*data)
assert len(out) == 2
tvm.testing.assert_allclose(out[0].numpy(), data[0])
assert len(out[1]) == 2
tvm.testing.assert_allclose(out[1][0].numpy(), data[1])
assert len(out[1][1]) == 2
tvm.testing.assert_allclose(out[1][1][0].numpy(), data[2])
tvm.testing.assert_allclose(out[1][1][1].numpy(), data[3])
def test_graph_executor_api():
dname_0, dname_1 = "data_0", "data_1"
data_0, data_1 = [relay.var(c, shape=(1, 1), dtype="float32") for c in [dname_0, dname_1]]
net = relay.add(data_0, data_1)
func = relay.Function((data_0, data_1), net)
lib = relay.build(tvm.IRModule.from_expr(func), "llvm")
mod = graph_executor.GraphModule(lib["default"](tvm.cpu(0)))
assert mod.get_input_index(dname_1) == 1
assert mod.get_input_index(dname_0) == 0
assert mod.get_input_index("Invalid") == -1
shape_dict, dtype_dict = mod.get_input_info()
assert isinstance(shape_dict, tvm.container.Map)
assert isinstance(dtype_dict, tvm.container.Map)
for data in [data_0, data_1]:
name = data.name_hint
ty = data.type_annotation
# verify shape
assert name in shape_dict
assert isinstance(shape_dict[name], tvm.runtime.container.ShapeTuple)
assert shape_dict[name] == tvm.runtime.container.ShapeTuple([i.value for i in ty.shape])
# verify dtype
assert name in dtype_dict
assert isinstance(dtype_dict[name], tvm.runtime.container.String)
assert dtype_dict[name] == ty.dtype
@tvm.testing.requires_llvm
def test_benchmark():
mod, params = mlp.get_workload(1)
lib = relay.build(mod, target="llvm", params=params)
exe = graph_executor.create(lib.get_graph_json(), lib.lib, tvm.cpu())
data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"))
result = exe.benchmark(tvm.cpu(), data=data, func_name="run", repeat=2, number=1)
assert result.mean == result.median
assert result.mean > 0
assert len(result.results) == 2
with patch.object(
tvm.runtime.module.Module,
"time_evaluator",
return_value=lambda: tvm.runtime.module.BenchmarkResult([1, 2, 2, 5]),
) as method:
result = exe.benchmark(tvm.cpu(), data=data, func_name="run", repeat=2, number=1)
assert result.mean == 2.5
assert result.median == 2.0
assert result.max == 5
assert result.min == 1
assert result.std == 1.5
@tvm.testing.parametrize_targets("cuda", "llvm")
def test_benchmark_end_to_end(dev, target):
mod, params = mlp.get_workload(1)
lib = relay.build(mod, target=target, params=params)
exe = graph_executor.create(lib.get_graph_json(), lib.lib, dev)
data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"))
result = exe.benchmark(dev, data=data, func_name="run", repeat=2, number=1, end_to_end=True)
assert result.mean > 0
assert len(result.results) == 2
@tvm.testing.requires_cuda
def test_benchmark_end_to_end_rpc():
server = rpc.Server("127.0.0.1")
remote = rpc.connect(server.host, server.port)
mod, params = mlp.get_workload(1)
lib = relay.build(mod, target="cuda", params=params)
temp = utils.tempdir()
path = temp.relpath("library.so")
lib.export_library(path)
remote.upload(path)
rlib = remote.load_module("library.so")
dev = remote.device("cuda")
exe = graph_executor.create(lib.get_graph_json(), rlib, dev)
data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32"), device=dev)
result = exe.benchmark(dev, data=data, func_name="run", repeat=2, number=1, end_to_end=True)
assert result.mean > 0
assert len(result.results) == 2
if __name__ == "__main__":
tvm.testing.main()
| 18,656 | 34.135593 | 96 | py |
tvm | tvm-main/tests/python/relay/test_sparse_dense_convert.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import itertools
import numpy as np
import scipy.sparse as sp
import tvm
from tvm.ir import IRModule
from tvm import relay
def random_bsr_matrix(M, N, BS_R, BS_C, density, dtype="float32"):
Y = np.zeros((M, N), dtype=dtype)
assert M % BS_R == 0
assert N % BS_C == 0
nnz = int(density * M * N)
num_blocks = int(nnz / (BS_R * BS_C)) + 1
candidate_blocks = np.asarray(list(itertools.product(range(0, M, BS_R), range(0, N, BS_C))))
assert candidate_blocks.shape[0] == M // BS_R * N // BS_C
chosen_blocks = candidate_blocks[
np.random.choice(candidate_blocks.shape[0], size=num_blocks, replace=False)
]
for i in range(len(chosen_blocks)):
r, c = chosen_blocks[i]
Y[r : r + BS_R, c : c + BS_C] = np.random.randn(BS_R, BS_C)
s = sp.bsr_matrix(Y, blocksize=(BS_R, BS_C))
assert s.data.shape == (num_blocks, BS_R, BS_C)
assert s.data.size >= nnz
assert s.indices.shape == (num_blocks,)
assert s.indptr.shape == (M // BS_R + 1,)
return s
def run_func(func, params, x):
with tvm.transform.PassContext(opt_level=3):
graph, lib, new_params = relay.build(func, "llvm", params=params)
from tvm.contrib import graph_executor
dev = tvm.cpu(0)
dtype = "float32"
m = graph_executor.create(graph, lib, dev)
# set inputs
m.set_input("data", tvm.nd.array(x.astype(dtype)))
m.set_input(**new_params)
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
return tvm_output.numpy()
def test_bsr_sparse_dense():
data = relay.var("data", shape=(1, 128), dtype="float32")
x = relay.nn.relu(data)
w = relay.var("weight", shape=(768, 128), dtype="float32")
y = relay.nn.dense(x, w)
z = relay.nn.relu(y)
func = relay.Function(relay.analysis.free_vars(z), z)
params = {"weight": tvm.nd.array(random_bsr_matrix(768, 128, 32, 1, 0.1).todense())}
x_np = np.random.randn(1, 128).astype("float32")
# dense output
dense_output = run_func(func, params, x_np)
# sparse
sparse_func, params = relay.data_dep_optimization.bsr_dense.convert(func, params, (32, 1), 0.2)
sparse_output = run_func(sparse_func, params, x_np)
np.testing.assert_allclose(sparse_output, dense_output, atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_bsr_sparse_dense()
| 3,122 | 33.318681 | 99 | py |
tvm | tvm-main/tests/python/relay/test_op_level5.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level5 operator test cases.
"""
import math
import platform
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import relay, te
from tvm.relay.testing import run_infer_type
executor_kind = tvm.testing.parameter("graph", "vm")
def test_resize1d_infer_type():
n, c, w = te.size_var("n"), te.size_var("c"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, w), "int8"))
tw = te.var("tw")
z = relay.image.resize1d(x, (tw,))
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, tw), "int8")
x = relay.var("x", relay.TensorType((n, c, w), "int8"))
z = relay.image.resize1d(x, (200,), None, "NCW", "linear", "align_corners")
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, 200), "int8")
class TestResize1D:
interpolate_method = tvm.testing.parameter("nearest_neighbor", "linear", "cubic")
coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel")
layout = tvm.testing.parameter("NWC", "NCW")
dshape, scale = tvm.testing.parameters(
((1, 4, 4), 2),
((2, 8, 17), 3),
((2, 8, 17), 3),
((3, 4, 5), 5),
)
def test_resize(
self, target, dev, executor_kind, dshape, scale, interpolate_method, layout, coord_trans
):
target_kind = tvm.target.Target(target).kind.name
if (
target_kind == "vulkan"
and dshape == (3, 4, 5)
and scale == 5
and interpolate_method == "nearest_neighbor"
and coord_trans == "align_corners"
):
pytest.xfail("Known failing case for these parameters")
if layout == "NWC":
size = (dshape[1] * scale,)
else:
size = (dshape[2] * scale,)
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.resize1d_python(
x_data, (scale,), layout, interpolate_method, coord_trans
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.image.resize1d(
x, size, None, layout, interpolate_method, coordinate_transformation_mode=coord_trans
)
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-4)
def test_resize2d_infer_type():
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
th, tw = te.var("th"), te.var("tw")
z = relay.image.resize2d(x, (th, tw))
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, th, tw), "int8")
x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
z = relay.image.resize2d(x, (100, 200), None, "NCHW", "linear", "align_corners")
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, 100, 200), "int8")
class TestResize2D:
interpolate_method = tvm.testing.parameter("nearest_neighbor", "linear", "cubic")
coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel")
layout = tvm.testing.parameter("NHWC", "NCHW")
dshape, scale = tvm.testing.parameters(
((1, 4, 4, 4), 2),
((2, 8, 17, 20), 3),
((2, 8, 17, 20), 3),
((3, 4, 5, 6), 5),
)
def test_resize(
self, target, dev, executor_kind, dshape, scale, interpolate_method, layout, coord_trans
):
target_kind = tvm.target.Target(target).kind.name
if (
target_kind == "vulkan"
and dshape == (3, 4, 5, 6)
and scale == 5
and interpolate_method == "nearest_neighbor"
and coord_trans == "align_corners"
):
pytest.xfail("Known failing case for these parameters")
if layout == "NHWC":
size = (dshape[1] * scale, dshape[2] * scale)
else:
size = (dshape[2] * scale, dshape[3] * scale)
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.resize2d_python(
x_data, (scale, scale), layout, interpolate_method, coord_trans
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.image.resize2d(
x, size, None, layout, interpolate_method, coordinate_transformation_mode=coord_trans
)
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-4)
def test_resize3d_infer_type():
n, c, d, h, w = (
te.size_var("n"),
te.size_var("c"),
te.size_var("d"),
te.size_var("h"),
te.size_var("w"),
)
x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8"))
td, th, tw = te.var("td"), te.var("th"), te.var("tw")
z = relay.image.resize3d(x, (td, th, tw))
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, td, th, tw), "int8")
x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8"))
z = relay.image.resize3d(x, (10, 10, 20), None, "NCDHW", "linear", "align_corners")
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, 10, 10, 20), "int8")
class TestResize3D:
interpolate_method = tvm.testing.parameter("nearest_neighbor", "linear", "cubic")
coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel")
layout = tvm.testing.parameter("NDHWC", "NCDHW")
dshape, scale = tvm.testing.parameters(
((1, 4, 4, 4, 4), 2),
)
def test_resize(
self, target, dev, executor_kind, dshape, scale, interpolate_method, layout, coord_trans
):
if layout == "NDHWC":
size = (dshape[1] * scale, dshape[2] * scale, dshape[3] * scale)
else:
size = (dshape[2] * scale, dshape[3] * scale, dshape[4] * scale)
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.resize3d_python(
x_data, (scale, scale, scale), layout, interpolate_method, coord_trans
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.image.resize3d(x, size, None, layout, interpolate_method, coord_trans)
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6)
class TestCropAndResize:
interpolate_method = tvm.testing.parameter("bilinear", "nearest_neighbor")
layout = tvm.testing.parameter("NHWC", "NCHW")
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673",
)
def test_crop_and_resize(self, target, dev, executor_kind, layout, interpolate_method):
target_kind = tvm.target.Target(target).kind.name
if (
target_kind == "vulkan"
and layout == "NHWC"
and interpolate_method == "nearest_neighbor"
):
pytest.xfail("Known failing case for these parameters")
extrapolation_value = 0.0
np.random.seed(0)
eps = 1e-4
if layout == "NHWC":
img_shape = (10, 224, 224, 3)
boxes = np.random.uniform(size=(2, 4)).astype("float32")
box_indices = np.array([1, 0]).astype("int32")
crop_size = np.array([20, 30]).astype("int32")
elif layout == "NCHW":
img_shape = (5, 3, 255, 255)
boxes = np.random.uniform(size=(2, 4)).astype("float32")
box_indices = np.array([0, 1]).astype("int32")
crop_size = np.array([30, 30]).astype("int32")
else:
raise ValueError(f"Unknown layout: {layout}")
image_data = np.random.uniform(size=img_shape).astype("float32")
ref_res = tvm.topi.testing.crop_and_resize_python(
image_data,
boxes,
box_indices,
crop_size,
layout,
interpolate_method,
extrapolation_value,
)
img = relay.var("img", relay.TensorType(img_shape, "float32"))
bx = relay.var("bx", relay.TensorType(boxes.shape, "float32"))
bx_idx = relay.var("bx_idx", relay.TensorType(box_indices.shape, "int32"))
z = relay.image.crop_and_resize(
img, bx, bx_idx, list(crop_size), layout, interpolate_method, extrapolation_value
)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([img, bx, bx_idx], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
image_data, boxes, box_indices
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-04)
@tvm.testing.uses_gpu
def test_multibox_prior(executor_kind):
def get_ref_result(
dshape, sizes=(1.0,), ratios=(1.0,), steps=(-1.0, -1.0), offsets=(0.5, 0.5), clip=True
):
in_height = dshape[2]
in_width = dshape[3]
num_sizes = len(sizes)
num_ratios = len(ratios)
size_ratio_concat = sizes + ratios
steps_h = steps[0] if steps[0] > 0 else 1.0 / in_height
steps_w = steps[1] if steps[1] > 0 else 1.0 / in_width
offset_h = offsets[0]
offset_w = offsets[1]
oshape = (1, in_height * in_width * (num_sizes + num_ratios - 1), 4)
dtype = "float32"
np_out = np.zeros(oshape).astype(dtype)
for i in range(in_height):
center_h = (i + offset_h) * steps_h
for j in range(in_width):
center_w = (j + offset_w) * steps_w
for k in range(num_sizes + num_ratios - 1):
w = (
size_ratio_concat[k] * in_height / in_width / 2.0
if k < num_sizes
else size_ratio_concat[0]
* in_height
/ in_width
* math.sqrt(size_ratio_concat[k + 1])
/ 2.0
)
h = (
size_ratio_concat[k] / 2.0
if k < num_sizes
else size_ratio_concat[0] / math.sqrt(size_ratio_concat[k + 1]) / 2.0
)
count = (
i * in_width * (num_sizes + num_ratios - 1)
+ j * (num_sizes + num_ratios - 1)
+ k
)
np_out[0][count][0] = center_w - w
np_out[0][count][1] = center_h - h
np_out[0][count][2] = center_w + w
np_out[0][count][3] = center_h + h
if clip:
np_out = np.clip(np_out, 0, 1)
return np_out
def verify_multibox_prior(
x,
dshape,
ref_res,
sizes=(1.0,),
ratios=(1.0,),
steps=(-1.0, -1.0),
offsets=(0.5, 0.5),
clip=True,
check_size=False,
check_type_only=False,
):
z = relay.vision.multibox_prior(x, sizes, ratios, steps, offsets, clip)
zz = run_infer_type(z)
if check_size:
assert "sizes=" in z.astext()
assert zz.checked_type == relay.TensorType(
(1, dshape[2] * dshape[3] * (len(sizes) + len(ratios) - 1), 4), "float32"
)
if check_type_only:
return
data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
func = relay.Function([x], z)
func = run_infer_type(func)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
sizes = (0.3, 1.5, 0.7)
ratios = (1.3, 2.4)
steps = (2.0, 1.5)
offsets = (0.2, 0.3)
dshape = (1, 3, 56, 56)
ref_res = get_ref_result(dshape, sizes, ratios, steps, offsets)
x = relay.var("x", relay.TensorType(dshape, "float32"))
verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True)
y = relay.var("y", relay.TensorType((te.size_var("n"), 3, 56, 56), "float32"))
verify_multibox_prior(
x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True, check_type_only=True
)
dshape = (1, 24, 32, 32)
ref_res = get_ref_result(dshape, clip=False)
x = relay.var("x", relay.TensorType(dshape, "float32"))
verify_multibox_prior(x, dshape, ref_res, clip=False)
y = relay.var("y", relay.TensorType((te.size_var("n"), 24, 32, 32), "float32"))
verify_multibox_prior(x, dshape, ref_res, clip=False, check_type_only=True)
@tvm.testing.uses_gpu
def test_get_valid_counts():
def verify_get_valid_counts(dshape, score_threshold, id_index, score_index):
dtype = "float32"
batch_size, num_anchor, elem_length = dshape
np_data = np.random.uniform(low=-2, high=2, size=dshape).astype(dtype)
np_out1 = np.zeros(shape=(batch_size,))
np_out2 = np.zeros(shape=dshape).astype(dtype)
np_out3 = np.zeros(shape=(batch_size, num_anchor))
for i in range(batch_size):
np_out1[i] = 0
inter_idx = 0
for j in range(num_anchor):
score = np_data[i, j, score_index]
if score > score_threshold and (id_index < 0 or np_data[i, j, id_index] >= 0):
for k in range(elem_length):
np_out2[i, inter_idx, k] = np_data[i, j, k]
np_out1[i] += 1
np_out3[i, inter_idx] = j
inter_idx += 1
if j >= np_out1[i]:
for k in range(elem_length):
np_out2[i, j, k] = -1.0
np_out3[i, j] = -1
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
z = relay.vision.get_valid_counts(x, score_threshold, id_index, score_index)
assert "score_threshold" in z.astext()
func = relay.Function([x], z.astuple())
func = run_infer_type(func)
for target, dev in tvm.testing.enabled_targets():
out = relay.create_executor("vm", device=dev, target=target).evaluate(func)(np_data)
tvm.testing.assert_allclose(out[0].numpy(), np_out1, rtol=1e-3, atol=1e-04)
tvm.testing.assert_allclose(out[1].numpy(), np_out2, rtol=1e-3, atol=1e-04)
tvm.testing.assert_allclose(out[2].numpy(), np_out3, rtol=1e-3, atol=1e-04)
verify_get_valid_counts((1, 2500, 6), 0, 0, 1)
verify_get_valid_counts((1, 2500, 5), -1, -1, 0)
verify_get_valid_counts((3, 1000, 6), 0.55, 1, 0)
verify_get_valid_counts((16, 500, 5), 0.95, -1, 0)
@tvm.testing.uses_gpu
def test_non_max_suppression(executor_kind):
def verify_nms(
x0_data,
x1_data,
x2_data,
x3_data,
dshape,
ref_res,
ref_indices_res,
iou_threshold=0.5,
force_suppress=False,
top_k=-1,
check_type_only=False,
):
x0 = relay.var("x0", relay.ty.TensorType(dshape, "float32"))
x1 = relay.var("x1", relay.ty.TensorType((dshape[0],), "int32"))
x2 = relay.var("x2", relay.ty.TensorType((dshape[0], dshape[1]), "int32"))
x3 = relay.var("x3", relay.ty.TensorType((), "int32"))
z = relay.vision.non_max_suppression(
x0,
x1,
x2,
x3,
iou_threshold=iou_threshold,
force_suppress=force_suppress,
top_k=top_k,
return_indices=False,
)
z_indices = relay.vision.non_max_suppression(
x0,
x1,
x2,
x3,
iou_threshold=iou_threshold,
force_suppress=force_suppress,
top_k=top_k,
return_indices=True,
)
if isinstance(z_indices, relay.expr.TupleWrapper):
z_indices = z_indices.astuple()
zz = run_infer_type(z)
zz_indices = run_infer_type(z_indices)
assert zz.checked_type == relay.ty.TensorType(dshape, "float32")
assert zz_indices.checked_type == relay.ty.TupleType(
[
relay.ty.TensorType((dshape[0], dshape[1]), "int32"),
relay.ty.TensorType((dshape[0], 1), "int32"),
]
)
if check_type_only:
return
func = relay.Function([x0, x1, x2, x3], z)
func = run_infer_type(func)
func_indices = relay.Function([x0, x1, x2, x3], z_indices)
func_indices = run_infer_type(func_indices)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x0_data, x1_data, x2_data, x3_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
op_indices_res = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(func_indices)(x0_data, x1_data, x2_data, x3_data)
tvm.testing.assert_allclose(op_indices_res[0].numpy(), ref_indices_res, rtol=1e-5)
np_data = np.array(
[
[
[0, 0.8, 1, 20, 25, 45],
[1, 0.7, 30, 60, 50, 80],
[0, 0.4, 4, 21, 19, 40],
[2, 0.9, 35, 61, 52, 79],
[1, 0.5, 100, 60, 70, 110],
]
]
).astype("float32")
np_valid_count = np.array([4]).astype("int32")
np_indices = np.array([[0, 1, 3, 4, -1]]).astype("int32")
np_max_output_size = -1
np_result = np.array(
[
[
[2, 0.9, 35, 61, 52, 79],
[0, 0.8, 1, 20, 25, 45],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
]
]
)
np_indices_result = np.array([[4, 0, -1, -1, -1]])
num_anchors = 5
dshape = (te.size_var("n"), num_anchors, 6)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
force_suppress=True,
top_k=2,
check_type_only=True,
)
dshape = (1, num_anchors, 6)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
force_suppress=True,
top_k=2,
check_type_only=False,
)
np_result = np.array(
[
[
[2, 0.9, 35, 61, 52, 79],
[0, 0.8, 1, 20, 25, 45],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
]
]
)
np_indices_result = np.array([[4, 0, -1, -1, -1]])
np_max_output_size = 2
dshape = (te.size_var("n"), num_anchors, 6)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
check_type_only=True,
)
dshape = (1, num_anchors, 6)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
top_k=2,
)
np_data = np.array(
[
[
[0, 0.8, 1, 20, 25, 45, 1, 2, 3, 4],
[1, 0.7, 30, 60, 50, 80, 5, 6, 7, 8],
[0, 0.4, 4, 21, 19, 40, 9, 10, 11, 12],
[2, 0.9, 35, 61, 52, 79, 13, 14, 15, 16],
[1, 0.5, 100, 60, 70, 110, 17, 18, 19, 20],
]
]
).astype("float32")
np_result = np.array(
[
[
[2, 0.9, 35, 61, 52, 79, 13, 14, 15, 16],
[0, 0.8, 1, 20, 25, 45, 1, 2, 3, 4],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
]
]
)
dshape = (1, 5, 10)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
force_suppress=True,
top_k=2,
check_type_only=False,
)
@tvm.testing.uses_gpu
def test_multibox_transform_loc(executor_kind):
def test_default_value():
num_anchors = 3
num_classes = 3
np_cls_prob = np.array([[[0.2, 0.5, 0.3], [0.25, 0.3, 0.45], [0.7, 0.1, 0.2]]]).astype(
"float32"
)
np_loc_preds = np.array(
[[0.1, -0.2, 0.3, 0.2, 0.2, 0.4, 0.5, -0.3, 0.7, -0.2, -0.4, -0.8]]
).astype("float32")
np_anchors = np.array(
[[[-0.1, -0.1, 0.1, 0.1], [-0.2, -0.2, 0.2, 0.2], [1.2, 1.2, 1.5, 1.5]]]
).astype("float32")
expected_np_out = np.array(
[
[
[1, 0.69999999, 0, 0, 0.10818365, 0.10008108],
[0, 0.44999999, 1, 1, 1, 1],
[0, 0.30000001, 0, 0, 0.22903419, 0.20435292],
]
]
)
cls_prob = relay.var(
"cls_prob", relay.ty.TensorType((1, num_anchors, num_classes), "float32")
)
loc_pred = relay.var("loc_pred", relay.ty.TensorType((1, num_anchors * 4), "float32"))
anchors = relay.var("anchors", relay.ty.TensorType((1, num_anchors, 4), "float32"))
mtl = relay.vision.multibox_transform_loc(
cls_prob=cls_prob, loc_pred=loc_pred, anchor=anchors
)
ret = run_infer_type(mtl.astuple())
ref_type = relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((1, num_anchors, 6), "float32"),
relay.ty.TensorType((1,), "int"),
]
)
)
assert ret.checked_type == ref_type
nms = relay.vision.non_max_suppression(mtl[0], mtl[1], mtl[0], return_indices=False)
func = relay.Function([cls_prob, loc_pred, anchors], nms)
func = run_infer_type(func)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_cls_prob, np_loc_preds, np_anchors
)
tvm.testing.assert_allclose(op_res.numpy(), expected_np_out, rtol=1e-5)
def test_threshold():
num_anchors = 5
num_classes = 5
n = te.size_var("n")
cls_prob = relay.var(
"cls_prob", relay.ty.TensorType((n, num_anchors, num_classes), "float32")
)
loc_pred = relay.var("loc_pred", relay.ty.TensorType((n, num_anchors * 4), "float32"))
anchors = relay.var("anchors", relay.ty.TensorType((1, num_anchors, 4), "float32"))
threshold = 0.02
variances = (0.2, 0.2, 0.3, 0.3)
ret = relay.vision.multibox_transform_loc(
cls_prob=cls_prob,
loc_pred=loc_pred,
anchor=anchors,
threshold=threshold,
variances=variances,
)
ret = run_infer_type(ret.astuple())
ref_type = relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((n, num_anchors, 6), "float32"),
relay.ty.TensorType((n,), "int"),
]
)
)
assert ret.checked_type == ref_type
test_default_value()
test_threshold()
@tvm.testing.uses_gpu
def test_roi_align(executor_kind):
def verify_roi_align(
data_shape,
rois_shape,
channel,
in_size,
pooled_size,
spatial_scale,
sample_ratio,
mode,
layout,
ref_func,
):
data = relay.var("data", relay.ty.TensorType(data_shape, "float32"))
rois = relay.var("rois", relay.ty.TensorType(rois_shape, "float32"))
z = relay.vision.roi_align(
data,
rois,
pooled_size=(pooled_size, pooled_size),
spatial_scale=spatial_scale,
sample_ratio=sample_ratio,
mode=mode,
layout=layout,
)
zz = run_infer_type(z)
num_roi = rois_shape[0]
if layout == "NCHW":
assert zz.checked_type == relay.ty.TensorType(
(num_roi, channel, pooled_size, pooled_size), "float32"
)
else:
assert zz.checked_type == relay.ty.TensorType(
(num_roi, pooled_size, pooled_size, channel), "float32"
)
func = relay.Function([data, rois], z)
func = run_infer_type(func)
np_data = np.random.uniform(size=data_shape).astype("float32")
np_rois = np.random.uniform(size=rois_shape).astype("float32") * in_size
np_rois[:, 0] = np.random.randint(low=0, high=data_shape[0], size=num_roi)
ref_res = ref_func(
np_data,
np_rois,
pooled_size=pooled_size,
spatial_scale=spatial_scale,
sample_ratio=sample_ratio,
mode=mode,
)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_data, np_rois
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, atol=1e-6, rtol=1e-3)
def verify_roi_align_nchw(
data_shape, rois_shape, pooled_size, spatial_scale, sample_ratio, mode
):
_, channel, in_size, _ = data_shape
return verify_roi_align(
data_shape,
rois_shape,
channel,
in_size,
pooled_size,
spatial_scale,
sample_ratio,
mode,
"NCHW",
tvm.topi.testing.roi_align_nchw_python,
)
def verify_roi_align_nhwc(
data_shape, rois_shape, pooled_size, spatial_scale, sample_ratio, mode
):
_, in_size, _, channel = data_shape
return verify_roi_align(
data_shape,
rois_shape,
channel,
in_size,
pooled_size,
spatial_scale,
sample_ratio,
mode,
"NHWC",
tvm.topi.testing.roi_align_nhwc_python,
)
verify_roi_align_nchw(
(1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1, mode="avg"
)
verify_roi_align_nchw(
(4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2, mode="avg"
)
verify_roi_align_nchw(
(1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1, mode="max"
)
verify_roi_align_nchw(
(4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2, mode="max"
)
verify_roi_align_nhwc(
(1, 16, 16, 4), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1, mode="avg"
)
verify_roi_align_nhwc(
(4, 16, 16, 4), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2, mode="avg"
)
verify_roi_align_nhwc(
(1, 16, 16, 4), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1, mode="max"
)
verify_roi_align_nhwc(
(4, 16, 16, 4), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2, mode="max"
)
@tvm.testing.uses_gpu
def test_roi_pool(executor_kind):
def verify_roi_pool(data_shape, rois_shape, pooled_size, spatial_scale):
data = relay.var("data", relay.ty.TensorType(data_shape, "float32"))
rois = relay.var("rois", relay.ty.TensorType(rois_shape, "float32"))
z = relay.vision.roi_pool(
data,
rois,
pooled_size=(pooled_size, pooled_size),
spatial_scale=spatial_scale,
layout="NCHW",
)
zz = run_infer_type(z)
batch, channel, in_size, _ = data_shape
num_roi = rois_shape[0]
assert zz.checked_type == relay.ty.TensorType(
(num_roi, channel, pooled_size, pooled_size), "float32"
)
func = relay.Function([data, rois], z)
func = run_infer_type(func)
np_data = np.random.uniform(size=data_shape).astype("float32")
np_rois = np.random.uniform(size=rois_shape).astype("float32") * in_size
np_rois[:, 0] = np.random.randint(low=0, high=batch, size=num_roi).astype("float32")
ref_res = tvm.topi.testing.roi_pool_nchw_python(
np_data, np_rois, pooled_size=pooled_size, spatial_scale=spatial_scale
)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_data, np_rois
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
verify_roi_pool((1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0)
verify_roi_pool((4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5)
@tvm.testing.uses_gpu
def test_proposal(executor_kind):
def verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs):
cls_prob = relay.var("cls_prob", relay.ty.TensorType(np_cls_prob.shape, "float32"))
bbox_pred = relay.var("bbox_pred", relay.ty.TensorType(np_bbox_pred.shape, "float32"))
im_info = relay.var("im_info", relay.ty.TensorType(np_im_info.shape, "float32"))
z = relay.vision.proposal(cls_prob, bbox_pred, im_info, **attrs)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(np_out.shape, "float32")
func = relay.Function([cls_prob, bbox_pred, im_info], z)
func = run_infer_type(func)
for target in ["llvm", "cuda"]:
if not tvm.testing.device_enabled(target):
print("Skip test because %s is not enabled." % target)
continue
dev = tvm.device(target, 0)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_cls_prob, np_bbox_pred, np_im_info
)
tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-4)
attrs = {
"scales": (0.5,),
"ratios": (0.5,),
"feature_stride": 16,
"iou_loss": False,
"rpn_min_size": 16,
"threshold": 0.7,
"rpn_pre_nms_top_n": 200,
"rpn_post_nms_top_n": 4,
}
np_cls_prob = np.array(
[
[
[[0.3, 0.6, 0.2], [0.4, 0.7, 0.5], [0.1, 0.4, 0.3]],
[[0.7, 0.5, 0.3], [0.6, 0.4, 0.8], [0.9, 0.2, 0.5]],
]
],
dtype="float32",
)
np_bbox_pred = np.array(
[
[
[[0.5, 1.0, 0.6], [0.8, 1.2, 2.0], [0.9, 1.0, 0.8]],
[[0.5, 1.0, 0.7], [0.8, 1.2, 1.6], [2.1, 1.5, 0.7]],
[[1.0, 0.5, 0.7], [1.5, 0.9, 1.6], [1.4, 1.5, 0.8]],
[[1.0, 0.5, 0.6], [1.5, 0.9, 2.0], [1.8, 1.0, 0.9]],
]
],
dtype="float32",
)
np_im_info = np.array([[48.0, 48.0, 1.0]], dtype="float32")
np_out = np.array(
[
[0.0, 0.0, 2.8451548, 28.38012, 18.154846],
[0.0, 0.0, 15.354933, 41.96971, 41.245064],
[0.0, 18.019852, 1.0538368, 51.98015, 25.946163],
[0.0, 27.320923, -1.266357, 55.0, 24.666357],
],
dtype="float32",
)
verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)
np_out = np.array(
[
[0.0, -5.25, -2.5, 21.75, 19.0],
[0.0, 11.25, -2.0, 37.25, 18.5],
[0.0, 26.849998, -2.3000002, 53.45, 18.6],
[0.0, -4.95, 13.799999, 22.25, 35.5],
],
dtype="float32",
)
attrs["iou_loss"] = True
verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)
def test_yolo_reorg_infer_shape():
def verify_yolo_reorg(shape, stride, out_shape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.vision.yolo_reorg(x, stride=stride)
zz = run_infer_type(z)
assert "stride=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(out_shape, "float32")
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
idxd = tvm.tir.indexdiv
verify_yolo_reorg((n, c, 20, 20), 10, (n, c * 10 * 10, 2, 2))
verify_yolo_reorg((n, c, h, w), 2, (n, c * 2 * 2, idxd(h, 2), idxd(w, 2)))
@tvm.testing.uses_gpu
def test_yolo_reorg(executor_kind):
def verify_yolo_reorg(shape, stride):
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = tvm.topi.testing.reorg_python(x_data, stride)
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.vision.yolo_reorg(x, stride=stride)
zz = run_infer_type(z)
assert "stride=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_yolo_reorg((1, 100, 20, 20), 10)
verify_yolo_reorg((1, 4, 6, 6), 2)
class TestDeformableConv2D:
batch, in_channel, size, out_channel, deformable_groups = tvm.testing.parameters(
(1, 4, 16, 4, 4),
(2, 4, 16, 4, 1),
)
kernel_size = tvm.testing.parameter((3, 3))
groups = tvm.testing.parameter(1, 2)
layout = tvm.testing.parameter("NCHW", "NHWC")
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture
def data_shape(self, layout, batch, in_channel, size):
if layout == "NCHW":
return (batch, in_channel, size, size)
elif layout == "NHWC":
return (batch, size, size, in_channel)
@tvm.testing.fixture
def kernel_shape(self, layout, in_channel, out_channel, groups, kernel_size):
if layout == "NCHW":
return (out_channel, in_channel // groups, kernel_size[0], kernel_size[1])
elif layout == "NHWC":
return (kernel_size[0], kernel_size[1], in_channel // groups, out_channel)
@tvm.testing.fixture
def out_shape(self, layout, batch, out_channel, size):
if layout == "NCHW":
return (batch, out_channel, size, size)
elif layout == "NHWC":
return (batch, size, size, out_channel)
@tvm.testing.fixture
def offset_shape(self, layout, batch, kernel_size, deformable_groups, out_shape):
if layout == "NCHW":
return (
batch,
2 * kernel_size[0] * kernel_size[1] * deformable_groups,
out_shape[2],
out_shape[3],
)
elif layout == "NHWC":
return (
batch,
out_shape[1],
out_shape[2],
2 * kernel_size[0] * kernel_size[1] * deformable_groups,
)
@tvm.testing.fixture
def kernel_layout(self, layout):
return {"NCHW": "OIHW", "NHWC": "HWIO"}[layout]
@tvm.testing.fixture
def relay_setup(
self,
dtype,
data_shape,
layout,
kernel_layout,
kernel_size,
deformable_groups,
groups,
out_channel,
):
data = relay.var("data", shape=data_shape, dtype=dtype)
offset = relay.var("offset", dtype=dtype)
kernel = relay.var("kernel", dtype=dtype)
expr = relay.nn.deformable_conv2d(
data,
offset,
kernel,
strides=(1, 1),
padding=(1, 1),
dilation=(1, 1),
data_layout=layout,
kernel_layout=kernel_layout,
kernel_size=kernel_size,
deformable_groups=deformable_groups,
groups=groups,
channels=out_channel,
)
func = relay.Function([data, offset, kernel], expr)
return expr, func
def test_infer_type(self, relay_setup, out_shape, offset_shape, kernel_shape):
expr, func = relay_setup
yy = run_infer_type(expr)
assert yy.checked_type == relay.TensorType(out_shape), yy.checked_type
assert yy.args[1].checked_type == relay.TensorType(offset_shape), yy.args[1].checked_type
assert yy.args[2].checked_type == relay.TensorType(kernel_shape), yy.args[2].checked_type
# The reference python implementation only supports groups==1.
@pytest.mark.parametrize("groups", [1])
def test_run(
self,
target,
dev,
dtype,
executor_kind,
data_shape,
offset_shape,
kernel_shape,
relay_setup,
deformable_groups,
groups,
layout,
):
target = tvm.target.Target(target)
if layout == "NHWC" and target.kind.name != "llvm":
pytest.xfail("Can only run NHWC layout on llvm")
expr, func = relay_setup
data = np.random.uniform(size=data_shape).astype(dtype)
offset = np.random.uniform(size=offset_shape).astype(dtype)
kernel = np.random.uniform(size=kernel_shape).astype(dtype)
if layout == "NCHW":
ref_res = tvm.topi.testing.deformable_conv2d_nchw_python(
data,
offset,
kernel,
stride=(1, 1),
padding=(1, 1),
dilation=(1, 1),
deformable_groups=deformable_groups,
groups=groups,
)
else:
ref_res = tvm.topi.testing.deformable_conv2d_nhwc_python(
data,
offset,
kernel,
stride=(1, 1),
padding=(1, 1),
dilation=(1, 1),
deformable_groups=deformable_groups,
groups=groups,
)
op_res1 = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data, offset, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_depth_to_space(executor_kind):
def verify_depth_to_space(dshape, block_size, layout, mode):
if layout == "NHWC":
out_shape = [
dshape[0],
dshape[1] * block_size,
dshape[2] * block_size,
dshape[3] / (block_size * block_size),
]
else:
out_shape = [
dshape[0],
dshape[1] / (block_size * block_size),
dshape[2] * block_size,
dshape[3] * block_size,
]
x_data = np.random.uniform(size=dshape).astype("float32")
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 3, 1, 2])
ref_res = tvm.topi.testing.depth_to_space_python(x_data, block_size, mode=mode)
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 2, 3, 1])
ref_res = np.transpose(ref_res, axes=[0, 2, 3, 1])
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.nn.depth_to_space(x, block_size, layout, mode)
assert "block_size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
for layout in ["NHWC", "NCHW"]:
for mode in ["DCR", "CDR"]:
verify_depth_to_space((1, 4, 4, 4), 2, layout, mode)
@tvm.testing.uses_gpu
def test_space_to_depth(executor_kind):
def verify_space_to_depth(dshape, block_size, layout):
if layout == "NHWC":
out_shape = [
dshape[0],
dshape[1] / block_size,
dshape[2] / block_size,
dshape[3] * (block_size * block_size),
]
else:
out_shape = [
dshape[0],
dshape[1] * (block_size * block_size),
dshape[2] / block_size,
dshape[3] / block_size,
]
x_data = np.random.uniform(size=dshape).astype("float32")
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 3, 1, 2])
ref_res = tvm.topi.testing.space_to_depth_python(x_data, block_size)
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 2, 3, 1])
ref_res = np.transpose(ref_res, axes=[0, 2, 3, 1])
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.nn.space_to_depth(x, block_size, layout)
assert "block_size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
for layout in ["NHWC", "NCHW"]:
verify_space_to_depth((1, 4, 4, 4), 2, layout)
def test_dilation2d_infer_type():
# symbolic in batch dimension
n, h, w, c = te.var("n"), 224, 224, 10
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32"))
kc, kh, kw = 10, 8, 8
w = relay.var("w", relay.ty.TensorType((kc, kw, kh), "float32"))
y = relay.image.dilation2d(
x,
w,
# kernel_size=(3, 3),
strides=[1, 1, 1, 1],
dilations=[1, 1, 1, 1],
padding=[0, 0, 0, 0],
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 217, 217), "float32")
class TestDilation2DRun:
data_layout, kernel_layout = tvm.testing.parameters(("NCHW", "IHW"), ("NHWC", "HWI"))
dtype = tvm.testing.parameter("float32")
config = tvm.testing.parameter(
dict(
image=[[[[0.1], [0.2]], [[0.3], [0.4]]]],
kernel=[[[0.4], [0.3]], [[0.1], [0.0]]],
out=[[[[0.5]]]],
),
dict(
image=[[[[0.1], [0.2]], [[0.3], [0.4]]]],
kernel=[[[0.4], [0.3]], [[0.1], [0.0]]],
out=[[[[0.5], [0.6]], [[0.7], [0.8]]]],
padding=[0, 0, 1, 1],
),
dict(
image=[[[[0.1, 0.2, 0.0], [0.2, 0.3, 0.1]], [[0.3, 0.4, 0.2], [0.4, 0.5, 0.3]]]],
kernel=[[[0.4, 0.5, 0.3], [0.3, 0.4, 0.2]], [[0.1, 0.2, 0.0], [0.0, 0.1, -0.1]]],
out=[[[[0.5, 0.7, 0.3], [0.6, 0.8, 0.4]], [[0.7, 0.9, 0.5], [0.8, 1.0, 0.6]]]],
padding=[0, 0, 1, 1],
),
dict(
image=[[[[0.1], [0.2]], [[0.3], [0.4]]], [[[0.2], [0.3]], [[0.4], [0.5]]]],
kernel=[[[0.4], [0.3]], [[0.1], [0.0]]],
out=[[[[0.5], [0.6]], [[0.7], [0.8]]], [[[0.6], [0.7]], [[0.8], [0.9]]]],
padding=[0, 0, 1, 1],
),
dict(
image=[[[[0.1], [0.2]], [[0.3], [0.4]]]],
kernel=[[[0.4], [0.3]]],
out=[[[[0.5]], [[0.7]]]],
),
dict(
image=[[[[0.1], [0.2], [0.3]], [[0.4], [0.5], [0.6]], [[0.7], [0.8], [0.9]]]],
kernel=[[[0.4], [0.3]], [[0.1], [0.2]]],
out=[[[[0.7], [0.8], [0.6]], [[1.0], [1.1], [0.9]], [[0.8], [0.9], [0.9]]]],
padding=[1, 1],
dilations=[2, 2],
),
dict(
image=[
[
[[0.1], [0.2], [0.3], [0.4]],
[[0.5], [0.6], [0.7], [0.8]],
[[0.9], [1.0], [1.1], [1.2]],
]
],
kernel=[[[0.4], [0.3]], [[0.1], [0.2]]],
out=[[[[0.8], [1.0]], [[1.2], [1.4]]]],
strides=[1, 2],
),
)
@tvm.testing.fixture
def test_case(self, config, data_layout, dtype):
indata = np.array(config["image"], dtype=dtype)
kernel = np.array(config["kernel"], dtype=dtype)
out = np.array(config["out"], dtype=dtype)
if data_layout == "NHWC":
pass
elif data_layout == "NCHW":
indata = indata.transpose([0, 3, 1, 2])
kernel = kernel.transpose([2, 0, 1])
out = out.transpose([0, 3, 1, 2])
else:
raise ValueError(f"Unsupported layout '{data_layout}'")
return indata, kernel, out
@tvm.testing.parametrize_targets("llvm")
def test_dilation2d(
self,
target,
dev,
test_case,
dtype,
config,
data_layout,
kernel_layout,
):
strides = config.get("strides", [1, 1])
padding = config.get("padding", [0, 0])
dilations = config.get("dilations", [1, 1])
indata, kernel, out = test_case
x = relay.var("x", shape=indata.shape, dtype=dtype)
w = relay.var("w", shape=kernel.shape, dtype=dtype)
y = relay.image.dilation2d(
x,
w,
strides=strides,
dilations=dilations,
padding=padding,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
func = relay.Function([x, w], y)
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
indata, kernel
)
tvm.testing.assert_allclose(op_res.numpy(), out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_affine_grid(executor_kind):
def verify_affine_grid(num_batch, target_shape):
dtype = "float32"
data_shape = (num_batch, 2, 3)
data = relay.var("data", relay.ty.TensorType(data_shape, dtype))
y = relay.image.affine_grid(data, target_shape)
yy = run_infer_type(y)
assert yy.checked_type == relay.ty.TensorType(
(num_batch, len(target_shape), *target_shape), dtype
)
func = relay.Function([data], y)
data_np = np.random.uniform(size=data_shape).astype(dtype)
ref_res = tvm.topi.testing.affine_grid_python(data_np, target_shape)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5, atol=1e-5)
verify_affine_grid(1, (16, 32))
verify_affine_grid(4, (16, 32))
@tvm.testing.uses_gpu
def test_grid_sample(executor_kind):
def verify_grid_sample(
data_shape, grid_shape, method="bilinear", padding_mode="zeros", align_corners=True
):
dtype = "float32"
data = relay.var("data", relay.ty.TensorType(data_shape, dtype))
grid = relay.var("grid", relay.ty.TensorType(grid_shape, dtype))
if len(data_shape) == 4:
layout = "NCHW"
batch, channel, _, _ = data_shape
_, _, out_height, out_width = grid_shape
tensor_type = relay.TensorType((batch, channel, out_height, out_width), dtype)
else: # len(data_shape) == 5:
layout = "NCDHW"
batch, channel, _, _, _ = data_shape
_, _, out_depth, out_height, out_width = grid_shape
tensor_type = relay.TensorType(
(batch, channel, out_depth, out_height, out_width), dtype
)
y = relay.image.grid_sample(
data,
grid,
method=method,
layout=layout,
padding_mode=padding_mode,
align_corners=align_corners,
)
yy = run_infer_type(y)
assert yy.checked_type == tensor_type
func = relay.Function([data, grid], y)
data_np = np.random.uniform(size=data_shape).astype(dtype)
grid_np = np.random.uniform(size=grid_shape, low=-1.5, high=1.5).astype(dtype)
ref_res = tvm.topi.testing.grid_sample_python(
data_np, grid_np, method, layout, padding_mode, align_corners
)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np, grid_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5, atol=1e-5)
methods = ["nearest", "bilinear", "bicubic"]
padding_modes = ["zeros", "border", "reflection"]
align_corners = [True, False]
data_2D_shape = (4, 4, 8, 8)
grid_2D_shape = (4, 2, 16, 16)
# choosing smaller sizes to be testable on weaker GPUs
data_3D_shape = (4, 4, 4, 4, 4)
grid_3D_shape = (4, 3, 8, 8, 8)
for _method in methods:
for _padding in padding_modes:
for _align in align_corners:
verify_grid_sample(data_2D_shape, grid_2D_shape, _method, _padding, _align)
# 3D "bicubic"(tricubic) is not supported in pytorch
if _method != "bicubic":
verify_grid_sample(data_3D_shape, grid_3D_shape, _method, _padding, _align)
@tvm.testing.uses_gpu
def test_space_to_batch_nd(executor_kind):
def verify_space_to_batch_nd(dshape, block_shape, paddings):
x_data = np.random.uniform(size=dshape).astype("float32")
pad_before, pad_after = map(list, zip(*paddings))
ref_res = tvm.topi.testing.space_to_batch_nd_python(
x_data, block_shape, pad_before, pad_after
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.nn.space_to_batch_nd(x, block_shape, paddings)
assert "block_shape=" in z.astext()
assert "paddings=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
verify_space_to_batch_nd([3, 3, 2, 1], [3], [[0, 0]])
verify_space_to_batch_nd([2, 2, 4, 1], [2, 2], [[0, 0], [2, 0]])
@tvm.testing.uses_gpu
def test_batch_to_space_nd(executor_kind):
def verify_batch_to_space_nd(dshape, block_shape, crops):
x_data = np.random.uniform(size=dshape).astype("float32")
crop_begin_list, crop_end_list = map(list, zip(*crops))
ref_res = tvm.topi.testing.batch_to_space_nd_python(
x_data, block_shape, crop_begin_list, crop_end_list
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.nn.batch_to_space_nd(x, block_shape, crops)
assert "block_shape=" in z.astext()
assert "crops=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
verify_batch_to_space_nd([4, 1, 1, 3], [2, 2], [[0, 0], [0, 0]])
verify_batch_to_space_nd([8, 1, 3, 1], [2, 2], [[0, 0], [2, 0]])
@tvm.testing.uses_gpu
def test_all_class_non_max_suppression(executor_kind):
def verify_all_class_non_max_suppression(
boxes_np,
scores_np,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected_indices,
):
boxes = relay.var("boxes", relay.ty.TensorType(boxes_np.shape, "float32"))
scores = relay.var("scores", relay.ty.TensorType(scores_np.shape, "float32"))
out = relay.vision.all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
)
func = relay.Function([boxes, scores], out.astuple())
func = run_infer_type(func)
for target, dev in tvm.testing.enabled_targets():
selected_indices, num_detections = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(func)(boxes_np, scores_np)
tvm_res = selected_indices.numpy()[: num_detections.numpy()[0]]
np.testing.assert_equal(tvm_res, expected_indices)
boxes = np.array(
[
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[0.5, 0.5, 1.0, 1.0],
],
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.5, 0.5, 0.95, 0.95],
[0.5, 0.5, 0.96, 0.96],
[0.5, 0.5, 1.0, 1.0],
],
]
).astype("float32")
scores = np.array(
[
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
]
).astype("float32")
max_output_boxes_per_class = 2
iou_threshold = 0.8
score_threshold = 0.0
expected = np.array(
[[0, 0, 4], [0, 0, 2], [0, 1, 4], [0, 1, 2], [1, 0, 4], [1, 0, 1], [1, 1, 4], [1, 1, 1]]
)
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0],
]
]
).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = 3
iou_threshold = 0.5
score_threshold = 0.4
expected = np.array([[0, 0, 3], [0, 0, 0]])
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
if __name__ == "__main__":
tvm.testing.main()
| 56,875 | 34.793581 | 100 | py |
tvm | tvm-main/tests/python/relay/test_pass_alter_op_layout.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test alter op layout pass"""
import pytest
import tvm
from tvm import relay, topi
from tvm.relay import transform, analysis
from tvm.relay.testing.temp_op_attr import TempOpAttr
from tvm.relay.testing import run_infer_type
import numpy as np
import tvm.testing
from tvm.relay import testing
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_alter_op():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
weight = relay.multiply(weight, relay.const(2.0, "float32"))
return relay.nn.conv2d(data, weight, **attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
relay.multiply(weight, relay.const(2.0, "float32")),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.nn.relu(y)
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_return_none():
"""Test doing nothing by returning 'None'"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
y = relay.nn.global_max_pool2d(x)
y = relay.Function([x], y)
return y
called = [False]
def alter_conv2d(attrs, inputs, tinfos, out_type):
called[0] = True
return None
with TempOpAttr("nn.global_max_pool2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(before(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
assert called[0]
def test_alter_layout():
"""Test alternating the layout of a conv2d.
The layout of broadcast operators and the weight should be changed accordingly.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias")
weight = relay.var("weight")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.bias_add(y, bias)
# a useless tuple, which will be eliminated
y = relay.Tuple([y])[0]
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2))
y = relay.cast(y, "int32")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
new_attrs["kernel_layout"] = "OIHW16i"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.layout_transform(x, "NCHW", "NCHW16c")
w = relay.layout_transform(weight, "OIHW", "OIHW16i")
y = relay.nn.conv2d(
y,
w,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
kernel_layout="OIHW16i",
data_layout="NCHW16c",
)
b = relay.expand_dims(bias, axis=1, num_newaxis=2)
b = relay.expand_dims(b, axis=0, num_newaxis=1)
b = relay.layout_transform(b, "NCHW", "NCHW16c")
y = relay.add(y, b)
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NCHW16c")
y = relay.cast(y, "int32")
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.nn.batch_flatten(y)
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_multi():
"""Test alternating the layout of a conv2d.
The layout of broadcast operators and the weight should be changed accordingly.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight")
y = relay.nn.conv2d(x, weight, channels=128, kernel_size=(3, 3), padding=(1, 1))
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
new_attrs["kernel_layout"] = "OHWI16i64o2i"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(128, 64, 3, 3))
y = relay.layout_transform(x, "NCHW", "NCHW16c")
w = relay.layout_transform(weight, "OIHW", "OHWI16i64o2i")
y = relay.nn.conv2d(
y,
w,
channels=128,
kernel_size=(3, 3),
padding=(1, 1),
kernel_layout="OHWI16i64o2i",
data_layout="NCHW16c",
)
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_lrn():
"""Test alternating the layout of a conv2d.
The layout of broadcast operators and the weight should be changed accordingly.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias")
weight = relay.var("weight")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.max_pool2d(y, pool_size=(2, 2))
y = relay.nn.lrn(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
new_attrs["kernel_layout"] = "OIHW16i"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias", shape=(64,))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.layout_transform(x, "NCHW", "NCHW16c")
w = relay.layout_transform(weight, "OIHW", "OIHW16i")
y = relay.nn.conv2d(
y,
w,
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
kernel_layout="OIHW16i",
data_layout="NCHW16c",
)
y = relay.nn.max_pool2d(y, pool_size=(2, 2), layout="NCHW16c")
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.nn.lrn(y)
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_dual_path():
"""
Test alternating the layout with two outputs.
One path continues to use the new layout while one path fall backs to old layout.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y1 = relay.nn.conv2d(y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1))
y1 = relay.nn.relu(y1)
y2 = relay.nn.batch_flatten(y)
ret = relay.Tuple([y1, y2])
y = relay.Function(analysis.free_vars(ret), ret)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.nn.relu(y)
y1 = relay.nn.conv2d(
y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y1 = relay.nn.relu(y1)
y1 = relay.layout_transform(y1, "NCHW16c", "NCHW")
y2 = relay.layout_transform(y, "NCHW16c", "NCHW")
y2 = relay.nn.batch_flatten(y2)
ret = relay.Tuple([y1, y2])
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_resnet():
"""Test alternating the layout of a residual block
This also tests the elimination of duplicated transformation.
If a same transformation applies to a same node twice, only one transformation will be created.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(x, weight2, channels=32, kernel_size=(1, 1))
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y)
return relay.Function(analysis.free_vars(y), y)
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
x = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.nn.relu(y)
y2 = relay.nn.conv2d(x, weight2, channels=32, kernel_size=(1, 1), data_layout="NCHW16c")
y2 = relay.nn.relu(y2)
y = y + y2
y = relay.nn.global_max_pool2d(y, layout="NCHW16c")
y = relay.layout_transform(y, "NCHW16c", "NCHW")
return relay.Function(analysis.free_vars(y), y)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_broadcast_op():
"""Test boradcast operators"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias", shape=(64,))
scale = relay.var("scale", shape=(64, 1, 1))
weight = relay.var("weight")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.bias_add(y, bias) # test broadcasting to lhs
y = relay.multiply(scale, y) # test broadcasting to rhs
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
bias = relay.var("bias", shape=(64,))
scale = relay.var("scale", shape=(64, 1, 1))
weight = relay.var("weight")
x = relay.layout_transform(x, "NCHW", "NCHW16c")
bias = relay.expand_dims(bias, 1, 2)
bias = relay.expand_dims(bias, 0, 1)
bias = relay.layout_transform(bias, "NCHW", "NCHW16c")
scale = relay.expand_dims(scale, 0, 1)
scale = relay.layout_transform(scale, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.add(y, bias) # test broadcasting to lhs
y = relay.multiply(scale, y) # test broadcasting to rhs
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_broadcast_scalar_op():
"""Test alternating the layout of a conv2d.
The layout of broadcast operators and the weight should be changed accordingly.
"""
def before():
x = relay.var("x", shape=(1, 500, 500, 64))
kernel = relay.var("kernel", shape=(3, 3, 64, 64), dtype="float32")
bias = relay.var("bias", shape=(64,))
multiplier1 = relay.var("multiplier1", shape=(1,), dtype="float32")
multiplier2 = relay.var("multiplier2", shape=(1, 1), dtype="float32")
y = relay.nn.conv2d(x, kernel, data_layout="NHWC", kernel_layout="HWIO", kernel_size=(3, 3))
y = relay.add(bias, y)
y = relay.nn.relu(y)
y = relay.multiply(multiplier1, y)
y = relay.multiply(y, multiplier2)
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 500, 500, 64))
kernel = relay.var("kernel", shape=(3, 3, 64, 64), dtype="float32")
bias = relay.var("bias", shape=(64,))
multiplier1 = relay.var("multiplier1", shape=(1,), dtype="float32")
multiplier2 = relay.var("multiplier2", shape=(1, 1), dtype="float32")
b = relay.expand_dims(bias, axis=0, num_newaxis=3)
b = relay.layout_transform(b, "NHWC", "NCHW16c")
y = relay.layout_transform(x, "NHWC", "NCHW16c")
y = relay.nn.conv2d(
y, kernel, data_layout="NCHW16c", kernel_layout="HWIO", kernel_size=(3, 3)
)
y = relay.add(b, y)
y = relay.nn.relu(y)
y = relay.multiply(multiplier1, y)
y = relay.multiply(y, multiplier2)
y = relay.layout_transform(y, "NCHW16c", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_scalar():
"""Test alternating the layout of a conv2d.
The layout of broadcast operators and the weight should be changed accordingly.
"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight")
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.add(y, relay.const(1, "float32"))
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
w = relay.var("weight")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, w, channels=64, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.add(y, relay.const(1.0, "float32"))
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_scalar_regression():
"""regression test where scalar fails"""
def before():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 16))
bias = relay.var("bias", shape=(1, 1, 1, 16))
y = relay.nn.conv2d(
x,
weight,
channels=16,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.add(y, bias)
mean = relay.mean(y, axis=3, exclude=True)
var = relay.variance(y, axis=3, exclude=True)
gamma = relay.var("gamma")
beta = relay.var("beta")
y = relay.nn.batch_norm(y, gamma, beta, mean, var, axis=3)
y = y[0]
return relay.Function(analysis.free_vars(y), y)
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 56, 56, 64))
weight = relay.var("weight", shape=(3, 3, 64, 16))
bias = relay.var("bias", shape=(1, 1, 1, 16))
x = relay.layout_transform(x, src_layout="NHWC", dst_layout="NCHW")
x = relay.layout_transform(x, src_layout="NCHW", dst_layout="NCHW16c")
weight = relay.layout_transform(weight, src_layout="HWIO", dst_layout="OIHW")
y = relay.nn.conv2d(
x, weight, channels=16, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
bias = relay.layout_transform(bias, src_layout="NHWC", dst_layout="NCHW")
bias = relay.layout_transform(bias, src_layout="NCHW", dst_layout="NCHW16c")
add = relay.add(y, bias)
mean = relay.mean(add, axis=[1, 4], exclude=True)
var = relay.variance(add, axis=[1, 4], exclude=True)
denom = relay.const(1.0) / relay.sqrt(var + relay.const(1e-05))
gamma = relay.var("gamma", shape=(16,))
denom_c16c = denom * relay.layout_transform(gamma, src_layout="C", dst_layout="C16c")
denom = relay.layout_transform(denom_c16c, src_layout="C16c", dst_layout="C")
denom_expand1 = relay.expand_dims(denom, axis=1, num_newaxis=2)
denom_expand2 = relay.expand_dims(denom_expand1, axis=0)
denom_nchwc16 = relay.layout_transform(
denom_expand2, src_layout="NCHW", dst_layout="NCHW16c"
)
out = add * denom_nchwc16
beta = relay.var("beta", shape=(16,))
numerator_c16c = (-mean) * denom_c16c + relay.layout_transform(
beta, src_layout="C", dst_layout="C16c"
)
numerator = relay.layout_transform(numerator_c16c, src_layout="C16c", dst_layout="C")
numerator_expand1 = relay.expand_dims(numerator, axis=1, num_newaxis=2)
numerator_expand2 = relay.expand_dims(numerator_expand1, axis=0)
numerator_nchwc16 = relay.layout_transform(
numerator_expand2, src_layout="NCHW", dst_layout="NCHW16c"
)
out = out + numerator_nchwc16
out = relay.layout_transform(out, src_layout="NCHW16c", dst_layout="NCHW")
y = relay.layout_transform(out, src_layout="NCHW", dst_layout="NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
desired_layouts = {"nn.conv2d": ["NCHW", "default"], "nn.batch_norm": ["NHWC", "default"]}
a = run_opt_pass(
a,
[
transform.InferType(),
relay.transform.ConvertLayout(desired_layouts),
transform.SimplifyInference(),
transform.CanonicalizeOps(),
transform.AlterOpLayout(),
],
)
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_concatenate():
"""NCHW, NHWC and corner case concatenate layout transform."""
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
# NCHW layout transformation.
def before_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
y1 = relay.nn.conv2d(y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1))
ret = relay.concatenate([y, y1], axis=1)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y1 = relay.nn.conv2d(
y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.concatenate([y, y1], axis=1)
ret = relay.layout_transform(ret, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nchw()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nchw(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
# NHWC layout transformation.
def before_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.nn.conv2d(
x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NHWC"
)
y1 = relay.nn.conv2d(
y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NHWC"
)
ret = relay.concatenate([y, y1], axis=3)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
weight2 = relay.var("weight2")
y = relay.layout_transform(x, "NHWC", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y1 = relay.nn.conv2d(
y, weight2, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.concatenate([y, y1], axis=1)
ret = relay.layout_transform(ret, "NCHW16c", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nhwc()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nhwc(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_nchw_upsamping_op():
"""Test upsamping operators"""
def before():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight", shape=(32, 32, 3, 3))
y = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.upsampling(y, scale_h=2, scale_w=2)
y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2))
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight")
x = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.nn.upsampling(y, scale_h=2, scale_w=2, layout="NCHW16c")
y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2), layout="NCHW16c")
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_nchw_dyn_upsamping_op():
"""Test upsamping operators"""
def before():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight", shape=(32, 32, 3, 3))
y = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.upsampling(y, scale_h=relay.const(2), scale_w=relay.const(2))
y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2))
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight")
x = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.nn.upsampling(y, scale_h=relay.const(2), scale_w=relay.const(2), layout="NCHW16c")
y = relay.nn.avg_pool2d(y, pool_size=(2, 2), strides=(2, 2), layout="NCHW16c")
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
@tvm.testing.parametrize_targets("llvm")
def test_alter_layout_strided_slice(target, dev):
"""Test rewriting strided_slice during alter_iop_layout"""
def before():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight", shape=(32, 32, 3, 3))
y = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1))
y = relay.strided_slice(y, begin=[0, 16], end=[1, 33], strides=[1, 1])
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW4c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 32, 28, 28))
weight = relay.var("weight", shape=(32, 32, 3, 3))
weight = relay.layout_transform(weight, "OIHW", "OIHW4i4o")
x = relay.layout_transform(x, "NCHW", "NCHW4c")
y = relay.op.nn.contrib_conv2d_nchwc(
x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW4c"
)
y = relay.strided_slice(y, begin=[0, 4], end=[1, 21], strides=[1, 1])
y = relay.layout_transform(y, "NCHW4c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
b = run_opt_pass(expected(), transform.InferType())
# Verify inference result
mod_before = tvm.IRModule()
mod_new = tvm.IRModule()
mod_before["main"] = a
mod_new["main"] = b
mod_before = transform.InferType()(mod_before)
mod_new = transform.InferType()(mod_new)
with relay.build_config(opt_level=3):
for kind in ["graph", "debug", "vm"]:
np_data = np.random.uniform(size=(1, 32, 28, 28)).astype("float32")
np_weight = np.random.uniform(size=(32, 32, 3, 3)).astype("float32")
f_before = relay.create_executor(
kind, mod=mod_before, device=dev, target=target
).evaluate()
result_before = f_before(np_data, np_weight)
f_new = relay.create_executor(kind, mod=mod_new, device=dev, target=target).evaluate()
result_new = f_new(np_data, np_weight)
tvm.testing.assert_allclose(
result_before.numpy(), result_new.numpy(), rtol=1e-5, atol=1e-5
)
def test_alter_layout_strided_slice_axes_nhwc():
"""Test rewriting strided_slice with axes during alter_iop_layout"""
def before():
x = relay.var("x", shape=(1, 28, 28, 32))
weight = relay.var("weight", shape=(3, 3, 32, 32))
y = relay.nn.conv2d(
x,
weight,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.strided_slice(y, begin=[0, 16], end=[1, 32], strides=[1, 1], axes=[0, 3])
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NHWC4c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 28, 28, 32))
weight = relay.var("weight", shape=(3, 3, 32, 32))
x = relay.layout_transform(x, "NHWC", "NHWC4c")
y = relay.op.nn.conv2d(
x,
weight,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC4c",
kernel_layout="HWIO",
)
y = relay.strided_slice(y, begin=[0, 4], end=[1, 8], strides=[1, 1], axes=[0, 3])
y = relay.layout_transform(y, "NHWC4c", "NHWC")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
mod_before = tvm.IRModule()
mod_new = tvm.IRModule()
mod_before["main"] = a
mod_new["main"] = b
assert tvm.ir.structural_equal(mod_before, mod_new)
def test_alter_layout_depthwise_conv2d():
"""Test depthwise_conv2d operator"""
def before():
x = relay.var("x", shape=(1, 32, 56, 56))
w = relay.var("w", shape=(32, 1, 3, 3))
y = relay.nn.conv2d(x, w, padding=(1, 1), channels=32, kernel_size=(3, 3), groups=32)
y = relay.Function(analysis.free_vars(y), y)
return y
from tvm import topi
def alter_conv2d(attrs, inputs, tinfos, out_type):
with tvm.target.Target("llvm -mcpu=core-avx2"):
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
def expected():
x = relay.var("x", shape=(1, 32, 56, 56))
w = relay.var("w", shape=(32, 1, 3, 3))
x = relay.layout_transform(x, "NCHW", "NCHW8c")
w = relay.layout_transform(w, "OIHW", "OIHW1i8o")
y = relay.nn.contrib_depthwise_conv2d_nchwc(
x,
w,
padding=(1, 1, 1, 1),
channels=32,
kernel_size=(3, 3),
groups=32,
data_layout="NCHW8c",
kernel_layout="OIHW1i8o",
out_layout="NCHW8c",
)
y = relay.layout_transform(y, "NCHW8c", "NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b)
def test_alter_layout_prelu():
"""Test PRelu operator"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight")
alpha = relay.var("alpha", relay.IncompleteType())
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.prelu(y, alpha)
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
w = relay.var("weight")
alpha = relay.var("alpha", relay.IncompleteType())
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, w, channels=64, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
y = relay.layout_transform(y, "NCHW16c", "NCHW")
y = relay.nn.prelu(y, alpha)
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, [transform.CanonicalizeOps(), transform.AlterOpLayout()])
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b)
def test_alter_layout_pad():
"""Check NCHW, NHWC and corner case for pad layout conversion"""
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
# Check NCHW conversion.
def before_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
ret = relay.nn.pad(y, pad_width=((0, 0), (0, 0), (1, 1), (1, 1)))
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.nn.pad(y, pad_width=((0, 0), (0, 0), (1, 1), (1, 1), (0, 0)))
ret = relay.layout_transform(ret, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nchw()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nchw(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
# Check NHWC conversion.
def before_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(
x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NHWC"
)
ret = relay.nn.pad(y, pad_width=((0, 0), (1, 1), (1, 1), (0, 0)))
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NHWC", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.nn.pad(y, pad_width=((0, 0), (0, 0), (1, 1), (1, 1), (0, 0)))
ret = relay.layout_transform(ret, "NCHW16c", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nhwc()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nhwc(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
# Check that conversion does not happen when padding along split axis.
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
ret = relay.nn.pad(y, pad_width=((0, 0), (1, 1), (1, 1), (1, 1)))
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.layout_transform(y, "NCHW16c", "NCHW")
ret = relay.nn.pad(ret, pad_width=((0, 0), (1, 1), (1, 1), (1, 1)))
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_pool():
"""Check NCHW, NHWC pool layout conversion"""
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
# Check NCHW conversion.
def before_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
ret = relay.nn.avg_pool2d(y, pool_size=(1, 1))
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.nn.avg_pool2d(y, pool_size=(1, 1), layout="NCHW16c")
ret = relay.layout_transform(ret, "NCHW16c", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nchw()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nchw(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
# Check NHWC conversion.
def before_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(
x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NHWC"
)
ret = relay.nn.avg_pool2d(y, pool_size=(1, 1), layout="NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NHWC", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.nn.avg_pool2d(y, pool_size=(1, 1), layout="NCHW16c")
ret = relay.layout_transform(ret, "NCHW16c", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nhwc()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nhwc(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_sum():
"""Check NCHW, NHWC sum layout conversion"""
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
return relay.nn.conv2d(data, weight, **new_attrs)
# Check NCHW conversion.
def before_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1))
ret = relay.sum(y, axis=1, keepdims=True)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nchw():
x = relay.var("x", shape=(1, 64, 56, 56))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NCHW", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.sum(y, axis=[1, 4], keepdims=True)
ret = relay.layout_transform(ret, "NCHW1c", "NCHW")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nchw()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nchw(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
# Check NHWC conversion.
def before_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
y = relay.nn.conv2d(
x, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NHWC"
)
ret = relay.sum(y, axis=3, keepdims=True)
y = relay.Function(analysis.free_vars(ret), ret)
return y
def expected_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1")
y = relay.layout_transform(x, "NHWC", "NCHW16c")
y = relay.nn.conv2d(
y, weight1, channels=32, kernel_size=(3, 3), padding=(1, 1), data_layout="NCHW16c"
)
ret = relay.sum(y, axis=[1, 4], keepdims=True)
ret = relay.layout_transform(ret, "NCHW1c", "NHWC")
y = relay.Function(analysis.free_vars(ret), ret)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nhwc()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nhwc(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_nhwc_arm():
"""Check that AlterOplayout does not alter NHWC data layout."""
def alter_conv2d(attrs, inputs, tinfos, out_type):
from tvm import topi
with tvm.target.Target("llvm -device=arm_cpu"):
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
# Check NHWC conversion.
def before_nhwc():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 64))
weight2 = relay.var("weight2", shape=(3, 3, 64, 64))
y = relay.nn.conv2d(
x, weight1, channels=64, kernel_size=(3, 3), data_layout="NHWC", kernel_layout="HWIO"
)
y = relay.nn.relu(y)
y = relay.nn.avg_pool2d(y, pool_size=(1, 1), layout="NHWC")
y = relay.nn.conv2d(
y, weight2, channels=64, kernel_size=(3, 3), data_layout="NHWC", kernel_layout="HWIO"
)
y = relay.nn.relu(y)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected_nhwc():
return before_nhwc()
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nhwc()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nhwc(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_layout_nhwc_int8_aarch64():
"""Check that AlterOplayout does not alter NHWC data layout."""
from tvm import autotvm
expected_workload_shape = (20, 42, 4, 16)
# We use Int8Fallback to disable the fallback flag
# and to test the new workload produced during the pass
class Int8Fallback(autotvm.FallbackContext):
def _query_inside(self, target, workload):
key = (target, workload)
if key in self.memory:
return self.memory[key]
cfg = autotvm.task.space.FallbackConfigEntity()
cfg.is_fallback = False
cfg.cost = 0
self.memory[key] = cfg
return cfg
def update(self, target, workload, cfg):
key = (str(target), workload)
assert workload[2][1] == expected_workload_shape
assert workload[0] == "conv2d_NHWC_quantized_interleaved_without_transform.arm_cpu"
self.memory[key] = cfg
def alter_conv2d(attrs, inputs, tinfos, out_type):
from tvm import topi
with tvm.target.Target("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu"):
with Int8Fallback():
tmp = topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
return tmp
# Check NHWC conversion.
def before_nhwc_int8():
x = relay.var("x", shape=(1, 56, 56, 73), dtype="int8")
weight = relay.var("weight1", shape=(3, 3, 73, 79), dtype="int8")
y = relay.nn.conv2d(
x,
weight,
channels=79,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected_nhwc_int8():
x = relay.var("x", shape=(1, 56, 56, 73), dtype="int8")
weight = relay.var("weight1", shape=(3, 3, 73, 79), dtype="int8")
tile_rows = 4
tile_cols = 16
weight_transformed = relay.nn.contrib_conv2d_gemm_weight_transform(
weight, tile_rows, tile_cols
)
y = relay.nn.contrib_conv2d_gemm_without_weight_transform(
x,
weight_transformed,
channels=79,
kernel_size=(3, 3),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
y = relay.Function(analysis.free_vars(y), y)
return y
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before_nhwc_int8()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected_nhwc_int8(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_alter_op_with_global_var():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
mod = tvm.IRModule()
foo = relay.GlobalVar("foo")
mod[foo] = relay.Function([x, weight], y)
mod = transform.InferType()(mod)
mod["main"] = relay.Function([x, weight], foo(x, weight))
mod = transform.InferType()(mod)
return mod
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
weight = relay.multiply(weight, relay.const(2.0, "float32"))
return relay.nn.conv2d(data, weight, **attrs)
def expected():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(
x,
relay.multiply(weight, relay.const(2.0, "float32")),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.nn.relu(y)
mod = tvm.IRModule()
foo = relay.GlobalVar("foo")
mod[foo] = relay.Function([x, weight], y)
mod = transform.InferType()(mod)
mod["main"] = relay.Function([x, weight], foo(x, weight))
return mod
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = before()
a = transform.AlterOpLayout()(a)
b = transform.InferType()(expected())
assert tvm.ir.structural_equal(a, b, map_free_vars=True), "Actual = \n" + str(a)
def test_alter_op_dense():
def before():
x = relay.var("x", shape=(32, 1, 128))
weight = relay.var("weight", shape=(48, 64))
avg1d = relay.nn.adaptive_avg_pool1d(x, [64])
squeeze = relay.squeeze(avg1d, axis=[1])
y = relay.nn.dense(squeeze, weight)
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(32, 1, 128))
weight = relay.var("weight", shape=(48, 64))
target_layout = "NC16n"
weight_transform = relay.layout_transform(weight, "NC", target_layout)
avg1d = relay.nn.adaptive_avg_pool1d(x, [64])
squeeze = relay.squeeze(avg1d, axis=[1])
y = relay.nn.contrib_dense_pack(
squeeze, weight_transform, target_layout, units=None, out_dtype="float32"
)
y = relay.Function(analysis.free_vars(y), y)
return y
target = "llvm -mcpu=core-avx2"
with tvm.target.Target(target):
with TempOpAttr(
"nn.dense", "FTVMAlterOpLayout", topi.x86.dense_alter_op._alter_dense_layout
):
a = before()
a = run_opt_pass(a, transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b)
def test_not_inplace_modify():
def func():
x = relay.var("x", shape=(1, 64, 56, 56))
weight = relay.var("weight", shape=(64, 64, 3, 3))
y = relay.nn.conv2d(x, weight, channels=64, kernel_size=(3, 3), padding=(1, 1))
y = relay.nn.relu(y)
y = relay.nn.max_pool2d(y, pool_size=[2, 2], strides=[2, 2], padding=[0, 0, 0, 0])
y = relay.Function([x, weight], y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW16c"
new_attrs["kernel_layout"] = "OIHW16i"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
before = func()
run_opt_pass(before, [transform.AlterOpLayout()])
assert before.body.attrs.layout == "NCHW"
def test_alter_op_dense_packed_data():
def before():
x = relay.var("x", shape=(1, 32, 8, 8))
weight = relay.var("conv2d_weight", shape=(32, 32, 3, 3))
conv = relay.nn.conv2d(x, weight, channels=32, kernel_size=(3, 3), padding=(1, 1))
pool = relay.nn.avg_pool2d(conv, pool_size=[8, 8], padding=[0, 0, 0, 0])
squeeze = relay.squeeze(pool, axis=[2, 3])
dense = relay.nn.dense(squeeze, relay.var("dense_weight", shape=(16, 32)))
return relay.Function(analysis.free_vars(dense), dense)
def expected():
x = relay.var("x", shape=(1, 32, 8, 8))
conv_weight = relay.var("conv2d_weight", shape=(32, 32, 3, 3))
dense_weight = relay.var("dense_weight", shape=(16, 32))
conv = relay.nn.contrib_conv2d_nchwc(
relay.layout_transform(x, "NCHW", "NCHW8c"),
relay.layout_transform(conv_weight, "OIHW", "OIHW8i8o"),
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW8c",
kernel_layout="OIHW8i8o",
out_layout="NCHW8c",
)
pool = relay.nn.avg_pool2d(conv, pool_size=[8, 8], padding=[0, 0, 0, 0], layout="NCHW8c")
squeeze = relay.squeeze(pool, axis=[2, 3])
dense = relay.nn.contrib_dense_pack(
relay.layout_transform(squeeze, "NC8c", "NC"),
relay.layout_transform(dense_weight, "NC", "NC16n"),
"NC16n",
out_dtype="float32",
)
return relay.Function(analysis.free_vars(dense), dense)
with tvm.target.Target("llvm -mcpu=core-avx2"):
with TempOpAttr(
"nn.dense", "FTVMAlterOpLayout", topi.x86.dense_alter_op._alter_dense_layout
):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b)
def test_conv2d_strided_slice_packed_to_unpacked():
"""We do not support propagating through packed to unpacked layout"""
x_shape = (1, 1, 1, 1, 4)
w_shape = (9, 1, 3, 3, 4, 4)
def before():
x = relay.var("x", shape=x_shape)
weight = relay.var("weight", shape=w_shape)
y = relay.nn.conv2d(
x,
weight,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW4c",
kernel_layout="OIHW4i4o",
)
y = relay.strided_slice(y, begin=[0, 0], end=[1, -1], strides=[1, 8])
return relay.Function([x, weight], y)
def expected():
x = relay.var("x", shape=x_shape)
weight = relay.var("weight", shape=w_shape)
x_nchw = relay.layout_transform(x, src_layout="NCHW4c", dst_layout="NCHW")
weight_oihw = relay.layout_transform(weight, src_layout="OIHW4i4o", dst_layout="OIHW")
y = relay.nn.conv2d(
x_nchw,
weight_oihw,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NCHW",
kernel_layout="OIHW",
)
y = relay.layout_transform(y, src_layout="NCHW", dst_layout="NCHW4c")
y = relay.strided_slice(y, begin=[0, 0], end=[1, -1], strides=[1, 8])
return relay.Function([x, weight], y)
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW"
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b)
def test_conv2d_strided_slice_arbitrary_stride():
"""Test rewriting strided_slice with arbitrary stride"""
def before():
x = relay.var("x", shape=(4, 12, 1, 1))
weight = relay.var("weight", shape=(9, 12, 1, 1))
y = relay.nn.conv2d(x, weight, channels=9, kernel_size=(1, 1), padding=(0, 0))
y = relay.strided_slice(y, begin=[3], end=[6], strides=[3], axes=[1])
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW3c"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
run_opt_pass(before(), transform.AlterOpLayout())
def test_conv2d_reduce_channels():
x = relay.var("data", shape=(1, 8, 48, 48))
y = relay.nn.conv2d(
data=x,
weight=relay.var("weight"),
kernel_size=(1, 1),
channels=8,
dilation=1,
strides=(47, 47),
)
z = relay.argmin(y, axis=1)
mod, params = testing.create_workload(z)
with tvm.transform.PassContext(opt_level=3):
relay.build(mod, params=params, target="llvm")
def test_alter_layout_nonscalar_broadcast():
"""Test boradcast operators"""
def before():
x = relay.var("x", shape=(1, 16, 3, 3))
weight = relay.var("weight", shape=(16, 16, 1, 1))
y = relay.nn.conv2d(
x, weight, channels=16, kernel_size=(1, 1), padding=(0, 0), data_layout="NCHW"
)
z = relay.var("z", shape=(1, 3, 3))
y = y + z
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 16, 3, 3))
weight = relay.var("weight", shape=(16, 16, 1, 1))
x = relay.layout_transform(x, src_layout="NCHW", dst_layout="NCHW4c")
weight = relay.layout_transform(weight, src_layout="OIHW", dst_layout="OIHW4i4o")
y = relay.nn.conv2d(
x,
weight,
channels=16,
kernel_size=(1, 1),
padding=(0, 0),
data_layout="NCHW4c",
kernel_layout="OIHW4i4o",
)
z = relay.var("z", shape=(1, 3, 3))
z = relay.expand_dims(z, 0)
z = relay.layout_transform(z, src_layout="NCHW", dst_layout="NCHW1c")
y = y + z
y = relay.layout_transform(y, src_layout="NCHW4c", dst_layout="NCHW")
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW4c"
new_attrs["kernel_layout"] = "OIHW4i4o"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\nExpected = \n" + str(b)
inp = np.random.uniform(size=(1, 16, 3, 3)).astype(np.float32)
weight = np.random.uniform(size=(16, 16, 1, 1)).astype(np.float32)
z = np.random.uniform(size=(1, 3, 3)).astype(np.float32)
mod = tvm.IRModule.from_expr(before())
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
with tvm.transform.PassContext(opt_level=4):
res = relay.build_module.create_executor(
"graph", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
with tvm.transform.PassContext(opt_level=0):
res1 = relay.build_module.create_executor(
"debug", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
np.testing.assert_allclose(res.numpy(), res1.numpy())
def test_alter_layout_blocked_no_broadcast():
"""Test boradcast operators working on already blocked layout"""
def before():
dtype = "float32"
input_shape = (1, 8, 16, 16, 4)
filter_shape = (1, 8, 4, 4, 4, 4)
bias_shape = (1, 1, 1, 1, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
C = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW4c",
kernel_layout="OIHW4i4o",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=4,
kernel_size=(4, 4),
)
bias = relay.op.add(conv, C)
bias = relay.Function(analysis.free_vars(bias), bias)
return bias
def expected():
return before()
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW4c"
new_attrs["kernel_layout"] = "OIHW4i4o"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\nExpected = \n" + str(b)
inp = np.random.uniform(size=(1, 8, 16, 16, 4)).astype(np.float32)
weight = np.random.uniform(size=(1, 8, 4, 4, 4, 4)).astype(np.float32)
z = np.random.uniform(size=(1, 1, 1, 1, 4)).astype(np.float32)
mod = tvm.IRModule.from_expr(before())
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
with tvm.transform.PassContext(opt_level=4):
res = relay.build_module.create_executor(
"graph", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
with tvm.transform.PassContext(opt_level=0):
res1 = relay.build_module.create_executor(
"debug", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
np.testing.assert_allclose(res.numpy(), res1.numpy())
def test_alter_layout_blocked_broadcast():
"""Test boradcast operators working on already blocked layout"""
def before():
dtype = "float32"
input_shape = (1, 8, 16, 16, 4)
filter_shape = (1, 8, 4, 4, 4, 4)
bias_shape = (1, 1, 1, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
C = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW4c",
kernel_layout="OIHW4i4o",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=4,
kernel_size=(4, 4),
)
bias = relay.op.add(conv, C)
bias = relay.Function(analysis.free_vars(bias), bias)
return bias
def expected():
return before()
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW4c"
new_attrs["kernel_layout"] = "OIHW4i4o"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\nExpected = \n" + str(b)
inp = np.random.uniform(size=(1, 8, 16, 16, 4)).astype(np.float32)
weight = np.random.uniform(size=(1, 8, 4, 4, 4, 4)).astype(np.float32)
z = np.random.uniform(size=(1, 1, 1, 1, 1)).astype(np.float32)
mod = tvm.IRModule.from_expr(before())
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
with tvm.transform.PassContext(opt_level=4):
res = relay.build_module.create_executor(
"graph", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
with tvm.transform.PassContext(opt_level=0):
res1 = relay.build_module.create_executor(
"debug", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
np.testing.assert_allclose(res.numpy(), res1.numpy())
def test_alter_layout_re_blocking_broadcast():
"""Test of re-blocking shapes with boradcast operators"""
def before():
dtype = "float32"
input_shape = (1, 8, 16, 16, 4)
filter_shape = (1, 8, 4, 4, 4, 4)
bias_shape = (1, 1, 1, 1, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
C = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW4c",
kernel_layout="OIHW4i4o",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=4,
kernel_size=(4, 4),
)
bias = relay.op.add(conv, C)
bias = relay.Function(analysis.free_vars(bias), bias)
return bias
def expected():
dtype = "float32"
input_shape = (1, 8, 16, 16, 4)
filter_shape = (1, 8, 4, 4, 4, 4)
bias_shape = (1, 1, 1, 1, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
C = relay.var("bias", shape=bias_shape, dtype=dtype)
A = relay.layout_transform(A, src_layout="NCHW4c", dst_layout="NCHW2c")
B = relay.layout_transform(B, src_layout="OIHW4i4o", dst_layout="OIHW2i2o")
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW2c",
kernel_layout="OIHW2i2o",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=4,
kernel_size=(4, 4),
)
C = relay.layout_transform(C, src_layout="NCHW4c", dst_layout="NCHW2c")
bias = relay.op.add(conv, C)
bias = relay.layout_transform(bias, src_layout="NCHW2c", dst_layout="NCHW4c")
bias = relay.Function(analysis.free_vars(bias), bias)
return bias
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW2c"
new_attrs["kernel_layout"] = "OIHW2i2o"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\nExpected = \n" + str(b)
inp = np.random.uniform(size=(1, 8, 16, 16, 4)).astype(np.float32)
weight = np.random.uniform(size=(1, 8, 4, 4, 4, 4)).astype(np.float32)
z = np.random.uniform(size=(1, 1, 1, 1, 4)).astype(np.float32)
mod = tvm.IRModule.from_expr(before())
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
with tvm.transform.PassContext(opt_level=4):
res = relay.build_module.create_executor(
"graph", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
with tvm.transform.PassContext(opt_level=0):
res1 = relay.build_module.create_executor(
"debug", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
np.testing.assert_allclose(res.numpy(), res1.numpy(), rtol=1e-5, atol=1e-5)
def test_broadcast_non_adaptable():
"""NCHW4c + [x, x, 4] and NCHW4c is being altered to NCHW"""
def before():
x = relay.var("x", shape=(1, 4, 3, 3, 4))
weight = relay.var("weight", shape=(4, 4, 1, 1, 4, 4))
y = relay.nn.conv2d(
x,
weight,
channels=16,
kernel_size=(1, 1),
padding=(0, 0),
data_layout="NCHW4c",
kernel_layout="OIHW4i4o",
)
z = relay.var("z", shape=(3, 3, 4))
y = y + z
y = relay.Function(analysis.free_vars(y), y)
return y
def expected():
x = relay.var("x", shape=(1, 4, 3, 3, 4))
weight = relay.var("weight", shape=(4, 4, 1, 1, 4, 4))
x = relay.layout_transform(x, src_layout="NCHW4c", dst_layout="NCHW")
weight = relay.layout_transform(weight, src_layout="OIHW4i4o", dst_layout="OIHW")
y = relay.nn.conv2d(
x,
weight,
channels=16,
kernel_size=(1, 1),
padding=(0, 0),
data_layout="NCHW",
kernel_layout="OIHW",
)
z = relay.var("z", shape=(3, 3, 4))
y = relay.layout_transform(y, src_layout="NCHW", dst_layout="NCHW4c")
y = y + z
y = relay.Function(analysis.free_vars(y), y)
return y
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW"
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d(data, weight, **new_attrs)
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
a = run_opt_pass(before(), transform.AlterOpLayout())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "\nExpected = \n" + str(b)
inp = np.random.uniform(size=(1, 4, 3, 3, 4)).astype(np.float32)
weight = np.random.uniform(size=(4, 4, 1, 1, 4, 4)).astype(np.float32)
z = np.random.uniform(size=(3, 3, 4)).astype(np.float32)
mod = tvm.IRModule.from_expr(before())
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
with tvm.transform.PassContext(opt_level=4):
res = relay.build_module.create_executor(
"graph", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
with tvm.transform.PassContext(opt_level=0):
res1 = relay.build_module.create_executor(
"debug", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight, z)
np.testing.assert_allclose(res.numpy(), res1.numpy())
def test_broadcast_respect_input_layouts():
def before():
x = relay.var("x", shape=(1, 16, 1, 1))
w = relay.var("w", shape=(16, 16, 1, 1))
x = relay.nn.conv2d(
x,
w,
kernel_size=(1, 1),
padding=(0, 0),
channels=16,
)
y1 = relay.min(x, axis=[2])
y2 = relay.min(x, axis=[3])
z = y1 + y2
z = relay.Function(analysis.free_vars(z), z)
return z
def alter_conv2d(attrs, inputs, tinfos, out_type):
data, weight = inputs
new_attrs = dict(attrs)
new_attrs["data_layout"] = "NCHW4c"
new_attrs["kernel_layout"] = "OIHW4i4o"
return relay.nn.conv2d(data, weight, **new_attrs)
inp = np.random.uniform(size=(1, 16, 1, 1)).astype(np.float32)
weight = np.random.uniform(size=(16, 16, 1, 1)).astype(np.float32)
mod = tvm.IRModule.from_expr(before())
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", alter_conv2d):
with tvm.transform.PassContext(opt_level=4):
res = relay.build_module.create_executor(
"graph", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight)
with tvm.transform.PassContext(opt_level=0):
res1 = relay.build_module.create_executor(
"debug", mod, target="llvm", device=tvm.cpu()
).evaluate()(inp, weight)
np.testing.assert_allclose(res.numpy(), res1.numpy())
def test_axis_semantic_change():
x = relay.var("x", shape=(1, 1, 24, 48))
w1 = relay.const(np.random.uniform(size=(1, 1, 1, 1)))
w2 = relay.const(np.random.uniform(size=(1, 1, 1, 1)))
y = relay.nn.conv2d(x, w1, kernel_size=(1, 1), padding=(0, 0), channels=1)
y = relay.transpose(y, (0, 1, 3, 2))
z = relay.nn.conv2d(y, w2, kernel_size=(1, 1), padding=(0, 0), channels=1)
func = relay.Function([x], z)
mod = tvm.IRModule.from_expr(func)
with tvm.transform.PassContext(opt_level=3):
relay.build(mod, target="llvm")
def test_alter_with_subfunc():
v1 = relay.var("v", shape=[1, 256, 10, 10], dtype="float32")
v2 = relay.image.resize2d(v1, size=[16, 16], roi=[0.0, 0.0, 0.0, 0.0], rounding_method="")
sub_func = relay.Function([v1], v2)
x1 = relay.var("x", shape=[1, 256, 10, 10], dtype="float32")
x2 = sub_func(x1)
x3 = relay.image.resize2d(x2, size=[8, 8], roi=[0.0, 0.0, 0.0, 0.0], rounding_method="")
func = relay.Function([x1], x3)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
assert tvm.ir.structural_equal(relay.transform.AlterOpLayout()(mod), mod)
def test_alter_with_reduce():
x = relay.var("x", shape=(1, 1, 1, 1))
y = relay.image.resize2d(x, (2, 4))
z = relay.mean(y, axis=0)
a = relay.image.resize1d(z, (1,))
func = relay.Function((x,), a)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
with tvm.transform.PassContext(opt_level=4):
relay.build(mod, target="llvm")
if __name__ == "__main__":
tvm.testing.main()
| 75,255 | 37.553279 | 100 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.