repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/tests/python/relay/test_pass_unmatched_cases.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay.prelude import Prelude
from tvm.relay.analysis import unmatched_cases
import pytest
def test_empty_match_block():
# empty match block will not match anything, so it should return a wildcard pattern
v = relay.Var("v")
match = relay.Match(v, [])
unmatched = unmatched_cases(match)
assert len(unmatched) == 1
assert isinstance(unmatched[0], relay.PatternWildcard)
def test_trivial_matches():
# a match clause with a wildcard will match anything
v = relay.Var("v")
match = relay.Match(v, [relay.Clause(relay.PatternWildcard(), v)])
assert len(unmatched_cases(match)) == 0
# same with a pattern var
w = relay.Var("w")
match = relay.Match(v, [relay.Clause(relay.PatternVar(w), w)])
assert len(unmatched_cases(match)) == 0
def test_single_constructor_adt():
mod = tvm.IRModule()
box = relay.GlobalTypeVar("box")
a = relay.TypeVar("a")
box_ctor = relay.Constructor("box", [a], box)
box_data = relay.TypeData(box, [a], [box_ctor])
mod[box] = box_data
v = relay.Var("v")
match = relay.Match(
v, [relay.Clause(relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]), v)]
)
# with one constructor, having one pattern constructor case is exhaustive
assert len(unmatched_cases(match, mod)) == 0
# this will be so if we nest the constructors too
nested_pattern = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
box_ctor,
[
relay.PatternConstructor(
box_ctor,
[relay.PatternConstructor(box_ctor, [relay.PatternWildcard()])],
)
],
),
v,
)
],
)
assert len(unmatched_cases(nested_pattern, mod)) == 0
def test_too_specific_match():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
v = relay.Var("v")
match = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternWildcard(),
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternWildcard()]
),
],
),
v,
)
],
)
unmatched = unmatched_cases(match, mod)
# will not match nil or a list of length 1
nil_found = False
single_length_found = False
assert len(unmatched) == 2
for case in unmatched:
assert isinstance(case, relay.PatternConstructor)
if case.constructor == nil:
nil_found = True
if case.constructor == cons:
assert isinstance(case.patterns[1], relay.PatternConstructor)
assert case.patterns[1].constructor == nil
single_length_found = True
assert nil_found and single_length_found
# if we add a wildcard, this should work
new_match = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternWildcard(),
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternWildcard()]
),
],
),
v,
),
relay.Clause(relay.PatternWildcard(), v),
],
)
assert len(unmatched_cases(new_match, mod)) == 0
def test_multiple_constructor_clauses():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
v = relay.Var("v")
match = relay.Match(
v,
[
# list of length exactly 1
relay.Clause(
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternConstructor(nil, [])]
),
v,
),
# list of length exactly 2
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternWildcard(),
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternConstructor(nil, [])]
),
],
),
v,
),
# empty list
relay.Clause(relay.PatternConstructor(nil, []), v),
# list of length 2 or more
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternWildcard(),
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternWildcard()]
),
],
),
v,
),
],
)
assert len(unmatched_cases(match, mod)) == 0
def test_missing_in_the_middle():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
v = relay.Var("v")
match = relay.Match(
v,
[
# list of length exactly 1
relay.Clause(
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternConstructor(nil, [])]
),
v,
),
# empty list
relay.Clause(relay.PatternConstructor(nil, []), v),
# list of length 3 or more
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternWildcard(),
relay.PatternConstructor(
cons,
[
relay.PatternWildcard(),
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternWildcard()]
),
],
),
],
),
v,
),
],
)
# fails to match a list of length exactly two
unmatched = unmatched_cases(match, mod)
assert len(unmatched) == 1
assert isinstance(unmatched[0], relay.PatternConstructor)
assert unmatched[0].constructor == cons
assert isinstance(unmatched[0].patterns[1], relay.PatternConstructor)
assert unmatched[0].patterns[1].constructor == cons
assert isinstance(unmatched[0].patterns[1].patterns[1], relay.PatternConstructor)
assert unmatched[0].patterns[1].patterns[1].constructor == nil
def test_mixed_adt_constructors():
mod = tvm.IRModule()
box = relay.GlobalTypeVar("box")
a = relay.TypeVar("a")
box_ctor = relay.Constructor("box", [a], box)
box_data = relay.TypeData(box, [a], [box_ctor])
mod[box] = box_data
p = Prelude(mod)
_, cons, nil = p.mod.get_type("List")
v = relay.Var("v")
box_of_lists_inc = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
box_ctor,
[
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternWildcard()]
)
],
),
v,
)
],
)
# will fail to match a box containing an empty list
unmatched = unmatched_cases(box_of_lists_inc, mod)
assert len(unmatched) == 1
assert isinstance(unmatched[0], relay.PatternConstructor)
assert unmatched[0].constructor == box_ctor
assert len(unmatched[0].patterns) == 1 and unmatched[0].patterns[0].constructor == nil
box_of_lists_comp = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(box_ctor, [relay.PatternConstructor(nil, [])]), v
),
relay.Clause(
relay.PatternConstructor(
box_ctor,
[
relay.PatternConstructor(
cons, [relay.PatternWildcard(), relay.PatternWildcard()]
)
],
),
v,
),
],
)
assert len(unmatched_cases(box_of_lists_comp, mod)) == 0
list_of_boxes_inc = relay.Match(
v,
[
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]),
relay.PatternWildcard(),
],
),
v,
)
],
)
# fails to match empty list of boxes
unmatched = unmatched_cases(list_of_boxes_inc, mod)
assert len(unmatched) == 1
assert isinstance(unmatched[0], relay.PatternConstructor)
assert unmatched[0].constructor == nil
list_of_boxes_comp = relay.Match(
v,
[
# exactly one box
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]),
relay.PatternConstructor(nil, []),
],
),
v,
),
# exactly two boxes
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]),
relay.PatternConstructor(
cons,
[
relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]),
relay.PatternConstructor(nil, []),
],
),
],
),
v,
),
# exactly three boxes
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]),
relay.PatternConstructor(
cons,
[
relay.PatternConstructor(box_ctor, [relay.PatternWildcard()]),
relay.PatternConstructor(
cons,
[
relay.PatternConstructor(
box_ctor, [relay.PatternWildcard()]
),
relay.PatternConstructor(nil, []),
],
),
],
),
],
),
v,
),
# one or more boxes
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternWildcard(), relay.PatternWildcard()]),
v,
),
# no boxes
relay.Clause(relay.PatternConstructor(nil, []), v),
],
)
assert len(unmatched_cases(list_of_boxes_comp, mod)) == 0
def test_tuple_match():
a = relay.Var("a")
b = relay.Var("b")
clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
x = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])
assert len(unmatched_cases(x)) == 0
def test_inf_loop_case():
code = """
#[version = "0.0.5"]
type Arith[A] {
Zero,
Const(A),
Plus(Arith[A], Arith[A])
}
def @shallow_opt[A](%a: Arith[A]) -> Arith[A] {
match (%a) {
Plus(Zero, %r) => %r,
Plus(%l, Zero) => %l,
_ => %a
}
}
"""
tvm.relay.fromtext(code)
# fromtext parse the module, then checked it (which include strictness checking).
def test_expanding_ctor_with_no_args():
code = """
#[version = "0.0.5"]
type List[A] {
Cons(A, List[A]),
Nil,
}
def @expand_on_nil_match(%a: List[(List[()],)]) -> int {
match (%a) {
Cons((Nil), Nil) => 1,
_ => 2,
}
}
"""
# exhausion checks:
# * hits Cons((Nil), Nil), expands to Cons(*, *), Nil()
# Nil() fails Cons((Nil), Nil), passes _
# Cons(*, *) hits Cons((Nil), Nil), expands to Cons((*), Cons(*, *)), Cons((*), Nil())
# Cons((*), Cons(*, *)) fails Cons((Nil), Nil), passes _
# Cons((*), Nil()) hits Cons((Nil), Nil), expands to Cons((Nil), Nil), Cons((Cons(*, *)), Nil)
# Cons((Nil), Nil) passes the first pattern
# Cons((Cons(*, *)), Nil) fails the first pattern, passes _
# Note Nil() is passed to ExpandWildcardsConstructor many times in the above!
tvm.relay.fromtext(code)
def test_expanding_empty_tuple():
# same principle as above, but with empty tuple
code = """
#[version = "0.0.5"]
type List[A] {
Cons(A, List[A]),
Nil,
}
def @expand_on_empty_tuple_match(%a: (List[()], ())) -> int {
match (%a) {
(Cons((), Nil), ()) => 1,
_ => 2,
}
}
"""
tvm.relay.fromtext(code)
if __name__ == "__main__":
tvm.testing.main()
| 14,616 | 30.03397 | 99 | py |
tvm | tvm-main/tests/python/relay/test_pass_merge_composite.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for merge composite."""
import pytest
import tvm
from tvm import relay, tir
from tvm.relay.dataflow_pattern import TuplePattern, TupleGetItemPattern, is_op, wildcard
from tvm.relay.testing import run_opt_pass
"""
The merge composite pass is designed to merge multiple relay operators, that
match a given pattern, and combine them into a single relay function.
For example suppose we have the graph:
conv2d
| (merge composite pass)
bias_add ====> conv2d_bias_relu
| (our target)
relu
Our Relay IR before the pass:
fn (%data: Tensor[(1, 512, 28, 28), float32], %kernel: Tensor[(256, 512, 1, 1), float32],
%bias: Tensor[(256), float32]) -> Tensor[(1, 256, 28, 28), float32] {
%0 = nn.conv2d(%data, %kernel, kernel_size=[1, 1])
/* ty=Tensor[(1, 256, 28, 28), float32] */;
%1 = nn.bias_add(%0, %bias) /* ty=Tensor[(1, 256, 28, 28), float32] */;
nn.relu(%1) /* ty=Tensor[(1, 256, 28, 28), float32] */
}
Our Relay IR after the pass:
fn (%data: Tensor[(1, 512, 28, 28), float32], %kernel: Tensor[(256, 512, 1, 1), float32],
%bias: Tensor[(256), float32]) -> Tensor[(1, 256, 28, 28), float32] {
%2 = fn (%x: Tensor[(1, 512, 28, 28), float32], %y: Tensor[(256, 512, 1, 1), float32],
%z: Tensor[(256), float32], Primitive=1, Composite="conv2d_bias_relu") ->
Tensor[(1, 256, 28, 28), float32] {
%0 = nn.conv2d(%x, %y, kernel_size=[1, 1]) /* ty=Tensor[(1, 256, 28, 28), float32] */;
%1 = nn.bias_add(%0, %z) /* ty=Tensor[(1, 256, 28, 28), float32] */;
nn.relu(%1) /* ty=Tensor[(1, 256, 28, 28), float32] */
};
%2(%data, %kernel, %bias) /* ty=Tensor[(1, 256, 28, 28), float32] */
}
As you can see in the second relay example, the pattern we specified has been wrapped
in a function. The function is then called, producing the same result as the first relay
example.
One convenient use for this pass is to offload multiple operators to a single external
codegen function.
"""
def make_add_sub_mul_pattern():
r"""Create a pattern to match the following graph.
add sub
\ /
\ /
mul
"""
x = wildcard()
y = wildcard()
return (x + y) * (x - y)
def make_add_relu_pattern():
r"""Create a pattern to match the following graph.
add
|
relu
"""
add_node = wildcard() + wildcard()
r = is_op("nn.relu")(add_node)
return r
def make_conv_bias_relu_pattern():
r"""Create a pattern to match the following graph.
conv2d
|
bias_add
|
relu
"""
x = wildcard()
y = wildcard()
z = wildcard()
conv_node = is_op("nn.conv2d")(x, y)
bias_node = is_op("nn.bias_add")(conv_node, z)
r = is_op("nn.relu")(bias_node)
return r
def make_pattern_with_optional():
r"""Create a pattern to match the following graph. Note that relu is optinal.
conv2d
|
bias_add
|
(relu)
"""
x = wildcard()
y = wildcard()
z = wildcard()
conv_node = is_op("nn.conv2d")(x, y)
bias_node = is_op("nn.bias_add")(conv_node, z)
r = bias_node.optional(lambda x: is_op("nn.relu")(x))
return r
def make_add_add_add_pattern():
r"""Create a pattern to match the following graph.
Useful for testing re-using a call node.
x y
/ \ /
| add
\ | \
add |
| /
add
"""
x = wildcard()
y = wildcard()
add_node = is_op("add")(x, y)
add_node_1 = is_op("add")(x, add_node)
r = is_op("add")(add_node_1, add_node)
return r
def make_bn_relu_pattern():
r"""Create a pattern to match the following graph.
batch_norm
|
TupleGetItem(0)
|
relu
"""
x = wildcard()
gamma = wildcard()
beta = wildcard()
moving_mean = wildcard()
moving_var = wildcard()
bn_node = is_op("nn.batch_norm")(x, gamma, beta, moving_mean, moving_var)
tuple_get_item_node = TupleGetItemPattern(bn_node, 0)
r = is_op("nn.relu")(tuple_get_item_node)
return r
def check_result(pattern_table, graph, expected_graph, import_prelude=False):
"""Utility function to check merge composite results."""
result = run_opt_pass(
graph, relay.transform.MergeComposite(pattern_table), import_prelude=import_prelude
)
assert not relay.analysis.free_vars(result), "Found free vars in the result graph: {0}".format(
str(result)
)
expected = run_opt_pass(expected_graph, relay.transform.InferType())
assert tvm.ir.structural_equal(
result, expected, map_free_vars=True
), "Graph mismatch: output vs. expected\n{0}\n=====\n{1}".format(str(result), str(expected))
def test_simple_merge():
r"""Test composite function is correctly produced from simple graph.
We could expect the pattern `make_add_relu_pattern` to be merged
into a single op `add_relu`.
a b
\ / a b
add ====> \ /
| add_relu
relu
"""
pattern_table = [("add_relu", make_add_relu_pattern())]
def before():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
add_node = relay.add(a, b)
r = relay.nn.relu(add_node)
return relay.Function([a, b], r)
def expected():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
# add_relu function
in_1 = relay.var("in_1", shape=(10, 10))
in_2 = relay.var("in_2", shape=(10, 10))
add_node = relay.add(in_1, in_2)
relu_node = relay.nn.relu(add_node)
add_relu = relay.Function([in_1, in_2], relu_node)
add_relu = add_relu.with_attr("Composite", "add_relu")
add_relu = add_relu.with_attr("PartitionedFromPattern", "add_nn.relu_")
# merged function
r = relay.Call(add_relu, [a, b])
return relay.Function([a, b], r)
check_result(pattern_table, before(), expected())
def test_branch_merge():
r"""Test composite function is correctly produced from branching graph.
We would expect the pattern `make_add_sub_mul_pattern` to be merged
into a single op `add_sub_mul`.
a b a b
\/ \/
add sub a b
\ / \/
\ / add_sub_mul
mul c |
/ \ \ |
c / c | ====> add_sub_mul
\/ \/ |
add sub |
\ / relu
\ /
mul
|
|
relu
"""
pattern_table = [("add_sub_mul", make_add_sub_mul_pattern())]
def before():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
c = relay.var("c", shape=(10, 10))
add_node = relay.add(a, b)
sub_node = relay.subtract(a, b)
mul_node = relay.multiply(add_node, sub_node)
add_node_2 = relay.add(c, mul_node)
sub_node_2 = relay.subtract(c, mul_node)
mul_node_2 = relay.multiply(add_node_2, sub_node_2)
r = relay.nn.relu(mul_node_2)
return relay.Function([a, b, c], r)
def expected():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
c = relay.var("c", shape=(10, 10))
# add_sub_mul function
in_1 = relay.var("in_1", shape=(10, 10))
in_2 = relay.var("in_2", shape=(10, 10))
add_node = relay.add(in_1, in_2)
sub_node = relay.subtract(in_1, in_2)
mul_node = relay.multiply(add_node, sub_node)
add_sub_mul = relay.Function([in_1, in_2], mul_node)
add_sub_mul = add_sub_mul.with_attr("Composite", "add_sub_mul")
add_sub_mul = add_sub_mul.with_attr("PartitionedFromPattern", "add_subtract_multiply_")
# add_sub_mul1 function
in_3 = relay.var("in_3", shape=(10, 10))
in_4 = relay.var("in_4", shape=(10, 10))
add_node_1 = relay.add(in_3, in_4)
sub_node_1 = relay.subtract(in_3, in_4)
mul_node_1 = relay.multiply(add_node_1, sub_node_1)
add_sub_mul_1 = relay.Function([in_3, in_4], mul_node_1)
add_sub_mul_1 = add_sub_mul_1.with_attr("Composite", "add_sub_mul")
add_sub_mul_1 = add_sub_mul_1.with_attr("PartitionedFromPattern", "add_subtract_multiply_")
# merged function
m_add_sub_mul_1 = relay.Call(add_sub_mul, [a, b])
m_add_sub_mul_2 = relay.Call(add_sub_mul_1, [c, m_add_sub_mul_1])
r = relay.nn.relu(m_add_sub_mul_2)
return relay.Function([a, b, c], r)
check_result(pattern_table, before(), expected())
def test_reuse_call_merge():
r"""Test composite function is correctly produced from simple graph
which re-uses call nodes.
We could expect the pattern `make_add_add_add` to be merged
into a single op `add_add_add`.
x y
\ / \
sub | x y
/ | / \ / |
| add ====> sub |
\ | \ | /
add | add_add_add
| /
add
"""
pattern_table = [("add_add_add", make_add_add_add_pattern())]
def before():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
sub_node = relay.subtract(a, b)
# pattern
add_node = relay.add(sub_node, b)
add_node_1 = relay.add(sub_node, add_node)
r = relay.add(add_node_1, add_node)
return relay.Function([a, b], r)
def expected():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
# add_relu_add function
in_1 = relay.var("in_1", shape=(10, 10))
in_2 = relay.var("in_2", shape=(10, 10))
add_node = relay.add(in_1, in_2)
add_node_1 = relay.add(in_1, add_node)
add_node_2 = relay.add(add_node_1, add_node)
add_add_add = relay.Function([in_1, in_2], add_node_2)
add_add_add = add_add_add.with_attr("Composite", "add_add_add")
add_add_add = add_add_add.with_attr("PartitionedFromPattern", "add_add_add_")
# merged function
sub_node = relay.subtract(a, b)
call = relay.Call(add_add_add, [sub_node, b])
return relay.Function([a, b], call)
check_result(pattern_table, before(), expected())
def test_multiple_patterns():
r"""Test different patterns are merged correctly in the graph.
We would expect the pattern `make_conv_bias_relu_pattern` to be merged
into a single op `conv_bias_relu`. We would also expect `make_add_relu_pattern`
to be merged into a single op `add_relu`.
data kernel
\ /
\ /
conv2d data kernel bias
| \ | /
| bias conv2d_bias_relu
| / |
bias_add ====> | a
| | /
relu a add_relu
\ / |
add | b
| | /
relu b mul
| /
mul
"""
pattern_table = [
("conv2d_bias_relu", make_conv_bias_relu_pattern()),
("add_relu", make_add_relu_pattern()),
]
def before():
data = relay.var("data", shape=(1, 512, 28, 28))
kernel = relay.var("kernel", shape=(256, 512, 1, 1))
bias = relay.var("bias", shape=(256,))
a = relay.var("a", shape=(1, 256, 28, 28))
b = relay.var("b", shape=(1, 256, 28, 28))
conv_node = relay.nn.conv2d(
data, kernel, kernel_size=(1, 1), padding=(0, 0), strides=(1, 1)
)
bias_node = relay.nn.bias_add(conv_node, bias)
relu_node = relay.nn.relu(bias_node)
add_node = relay.add(relu_node, a)
relu_node_2 = relay.nn.relu(add_node)
r = relay.multiply(relu_node_2, b)
return relay.Function([data, kernel, bias, a, b], r)
def expected():
data = relay.var("data", shape=(1, 512, 28, 28))
kernel = relay.var("kernel", shape=(256, 512, 1, 1))
bias = relay.var("bias", shape=(256,))
a = relay.var("a", shape=(1, 256, 28, 28))
b = relay.var("b", shape=(1, 256, 28, 28))
# conv_bias_relu function
in_1 = relay.var("in_1", shape=(1, 512, 28, 28))
in_2 = relay.var("in_2", shape=(256, 512, 1, 1))
in_3 = relay.var("in_3", shape=(256,))
conv_node = relay.nn.conv2d(in_1, in_2, kernel_size=(1, 1), padding=(0, 0), strides=(1, 1))
bias_node = relay.nn.bias_add(conv_node, in_3)
r = relay.nn.relu(bias_node)
conv_bias_add_relu = relay.Function([in_1, in_2, in_3], r)
conv_bias_add_relu = conv_bias_add_relu.with_attr("Composite", "conv2d_bias_relu")
conv_bias_add_relu = conv_bias_add_relu.with_attr(
"PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_"
)
# add_relu function
in_4 = relay.var("in_4", shape=(1, 256, 28, 28))
in_5 = relay.var("in_5", shape=(1, 256, 28, 28))
add_node = relay.add(in_4, in_5)
r = relay.nn.relu(add_node)
add_relu = relay.Function([in_4, in_5], r)
add_relu = add_relu.with_attr("Composite", "add_relu")
add_relu = add_relu.with_attr("PartitionedFromPattern", "add_nn.relu_")
# merged function
conv_bias_add_relu_1 = relay.Call(conv_bias_add_relu, [data, kernel, bias])
add_relu_1 = relay.Call(add_relu, [conv_bias_add_relu_1, a])
r = relay.multiply(add_relu_1, b)
return relay.Function([data, kernel, bias, a, b], r)
check_result(pattern_table, before(), expected())
def test_optional_pattern():
r"""Test the pattern with optional operators. We can define a pattern with some operators
optional. The merge composite pass will create composite functions for all matched patterns,
but with different "PartitionedFromPattern" attribute. We expect the backend codegen to
analyze that attribute and determine the corresponding action.
Pattern: Matched Case A: Matched Case B:
conv2d conv2d conv2d
| | |
bias_add bias_add bias_add
| |
(relu) relu
In the above example, the composite function for matched case A would have
PartitionedFromPattern="nn.conv2d_nn.bias_add_nn.relu_" while the one for matched case B
woud be "nn.conv2d_nn.bias_add_".
"""
pattern_table = [("layer", make_pattern_with_optional())]
def before():
x = relay.var("x", shape=(1, 3, 7, 7))
w1 = relay.var("w", shape=(3, 3, 1, 1))
b1 = relay.var("b", shape=(3,))
w2 = relay.var("w", shape=(3, 3, 1, 1))
b2 = relay.var("b", shape=(3,))
conv = relay.nn.conv2d(x, w1, kernel_size=(1, 1))
bias = relay.nn.bias_add(conv, b1)
relu = relay.nn.relu(bias)
conv = relay.nn.conv2d(relu, w2, kernel_size=(1, 1))
bias = relay.nn.bias_add(conv, b2)
return relay.Function([x, w1, w2, b1, b2], bias)
def expected():
# Matched composite function A
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
conv = relay.nn.conv2d(x, w, kernel_size=(1, 1))
bias = relay.nn.bias_add(conv, b)
relu = relay.nn.relu(bias)
func1 = relay.Function([x, w, b], relu)
func1 = func1.with_attr("Composite", "layer")
func1 = func1.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_")
# Matched composite function B
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
conv = relay.nn.conv2d(x, w, kernel_size=(1, 1))
bias = relay.nn.bias_add(conv, b)
func2 = relay.Function([x, w, b], bias)
func2 = func2.with_attr("Composite", "layer")
func2 = func2.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_")
# Main function
x = relay.var("x", shape=(1, 3, 7, 7))
w1 = relay.var("w", shape=(3, 3, 1, 1))
b1 = relay.var("b", shape=(3,))
w2 = relay.var("w", shape=(3, 3, 1, 1))
b2 = relay.var("b", shape=(3,))
out1 = func1(x, w1, b1)
out2 = func2(out1, w2, b2)
return relay.Function([x, w1, w2, b1, b2], out2)
check_result(pattern_table, before(), expected())
def test_merge_order():
r"""Test that patterns are merged in the order they exist in the pattern table.
There can be cases where one pattern is a subgraph of another, in which case
it is not clear which match should take priority. The priority should come
from the order in which the patterns are declared in the pattern table. The
first patterns will be merged with highest priority and the last with lowest.
A: B: C:
add add abs
| | |
abs abs relu
|
relu
"""
def pattern_A():
x = wildcard()
y = wildcard()
out = is_op("add")(x, y)
out = is_op("abs")(out)
out = is_op("nn.relu")(out)
return out
def pattern_B():
x = wildcard()
y = wildcard()
out = is_op("add")(x, y)
out = is_op("abs")(out)
return out
def pattern_C():
x = wildcard()
out = is_op("abs")(x)
out = is_op("nn.relu")(out)
return out
def before():
input_1 = relay.var("input_1", shape=(10, 10))
input_2 = relay.var("input_2", shape=(10, 10))
out = relay.add(input_1, input_2)
out = relay.abs(out)
out = relay.nn.relu(out)
return relay.Function([input_1, input_2], out)
def after_A_priority():
input_1 = relay.var("input_1", shape=(10, 10))
input_2 = relay.var("input_2", shape=(10, 10))
x = relay.var("x")
y = relay.var("y")
out = relay.add(x, y)
out = relay.abs(out)
out = relay.nn.relu(out)
merged_func = relay.Function([x, y], out)
merged_func = merged_func.with_attr("Composite", "A")
merged_func = merged_func.with_attr("PartitionedFromPattern", "add_abs_nn.relu_")
ret = relay.Call(merged_func, [input_1, input_2])
return relay.Function([input_1, input_2], ret)
def after_B_priority():
input_1 = relay.var("input_1", shape=(10, 10))
input_2 = relay.var("input_2", shape=(10, 10))
x = relay.var("x")
y = relay.var("y")
out = relay.add(x, y)
out = relay.abs(out)
merged_func = relay.Function([x, y], out)
merged_func = merged_func.with_attr("Composite", "B")
merged_func = merged_func.with_attr("PartitionedFromPattern", "add_abs_")
out = relay.Call(merged_func, [input_1, input_2])
ret = relay.nn.relu(out)
return relay.Function([input_1, input_2], ret)
def after_C_priority():
input_1 = relay.var("input_1", shape=(10, 10))
input_2 = relay.var("input_2", shape=(10, 10))
x = relay.var("x")
out = relay.abs(x)
out = relay.nn.relu(out)
merged_func = relay.Function([x], out)
merged_func = merged_func.with_attr("Composite", "C")
merged_func = merged_func.with_attr("PartitionedFromPattern", "abs_nn.relu_")
out = relay.add(input_1, input_2)
ret = relay.Call(merged_func, [out])
return relay.Function([input_1, input_2], ret)
# check A highest priority
pattern_table = [
("A", pattern_A()),
("B", pattern_B()),
("C", pattern_C()),
]
check_result(pattern_table, before(), after_A_priority())
# check B highest priority
pattern_table = [
("B", pattern_B()),
("C", pattern_C()),
("A", pattern_A()),
]
check_result(pattern_table, before(), after_B_priority())
# check C highest priority
pattern_table = [
("C", pattern_C()),
("A", pattern_A()),
("B", pattern_B()),
]
check_result(pattern_table, before(), after_C_priority())
def test_parallel_merge():
r"""Tests that parallel patterns relying on the same inputs are correctly merged.
The test graph is difficult to draw out as ascii art. It is essentially two parallel
add-sub-mul units which both consume input_1 and input_2 with their results being multiplied
to give the output. We expect both parallel branches should get merged and both should still
consume the same input variables, input_1 and input_2."""
def before():
input_1 = relay.var("input_1", shape=(10, 10))
input_2 = relay.var("input_2", shape=(10, 10))
branch_1_add = relay.add(input_1, input_2)
branch_1_sub = relay.subtract(input_1, input_2)
branch_1 = relay.multiply(branch_1_add, branch_1_sub)
branch_2_add = relay.add(input_1, input_2)
branch_2_sub = relay.subtract(input_1, input_2)
branch_2 = relay.multiply(branch_2_add, branch_2_sub)
out = relay.multiply(branch_1, branch_2)
return relay.Function([input_1, input_2], out)
def expected():
input_1 = relay.var("input_1", shape=(10, 10))
input_2 = relay.var("input_2", shape=(10, 10))
x = relay.var("x")
y = relay.var("y")
branch_1 = relay.multiply(relay.add(x, y), relay.subtract(x, y))
func_1 = relay.Function([x, y], branch_1)
func_1 = func_1.with_attr("Composite", "add_sub_mul")
func_1 = func_1.with_attr("PartitionedFromPattern", "add_subtract_multiply_")
call_1 = relay.Call(func_1, [input_1, input_2])
x1 = relay.var("x1")
y1 = relay.var("y1")
branch_2 = relay.multiply(relay.add(x1, y1), relay.subtract(x1, y1))
func_2 = relay.Function([x1, y1], branch_2)
func_2 = func_2.with_attr("Composite", "add_sub_mul")
func_2 = func_2.with_attr("PartitionedFromPattern", "add_subtract_multiply_")
call_2 = relay.Call(func_2, [input_1, input_2])
out = relay.multiply(call_1, call_2)
return relay.Function([input_1, input_2], out)
pattern_table = [("add_sub_mul", make_add_sub_mul_pattern())]
check_result(pattern_table, before(), expected())
def test_multiple_input_subgraphs():
r"""Test the case when multiple input subgraphs feed into another subgraph.
(1) (2) (3) (4)
add add add add
| | | |
relu relu relu relu
\ / \ /
\ / \ /
add sub
\ /
\ /
\ /
mul
----> When 1=3 and 2=4 (Case 'A')
add_relu add_relu
\ /
\ /
add_sub_mul
----> When 1!=3 and 2!=4 (Case 'B')
add_relu add_relu add_relu add_relu
\ / \ /
\ / \ /
add sub
\ /
-------- -----
\ /
mul
The difference in behaviour comes from the fact that add_sub_mul expects that the
inputs to add and sub are identical (the same two relay expressions). So when you
have 4 independent inputs, the pattern should not be merged.
"""
def before():
before_funcs = {}
inputs = [relay.var("input_" + str(i), shape=(10, 10)) for i in range(8)]
add_relu_1 = relay.add(inputs[0], inputs[1])
add_relu_1 = relay.nn.relu(add_relu_1)
add_relu_2 = relay.add(inputs[2], inputs[3])
add_relu_2 = relay.nn.relu(add_relu_2)
add_relu_3 = relay.add(inputs[4], inputs[5])
add_relu_3 = relay.nn.relu(add_relu_3)
add_relu_4 = relay.add(inputs[6], inputs[7])
add_relu_4 = relay.nn.relu(add_relu_4)
add = relay.add(add_relu_1, add_relu_2)
sub = relay.subtract(add_relu_3, add_relu_4)
out = relay.multiply(add, sub)
before_funcs["B"] = relay.Function(inputs, out)
sub = relay.subtract(add_relu_1, add_relu_2)
out = relay.multiply(add, sub)
before_funcs["A"] = relay.Function(inputs[:4], out)
return before_funcs
def after_A():
inputs = [relay.var("input_" + str(i), shape=(10, 10)) for i in range(4)]
x = relay.var("x")
y = relay.var("y")
add_relu_1 = relay.add(x, y)
add_relu_1 = relay.nn.relu(add_relu_1)
add_relu_1 = relay.Function([x, y], add_relu_1)
add_relu_1 = add_relu_1.with_attr("Composite", "add_relu")
add_relu_1 = add_relu_1.with_attr("PartitionedFromPattern", "add_nn.relu_")
add_relu_call_1 = relay.Call(add_relu_1, [inputs[0], inputs[1]])
x1 = relay.var("x1")
y1 = relay.var("y1")
add_relu_2 = relay.add(x1, y1)
add_relu_2 = relay.nn.relu(add_relu_2)
add_relu_2 = relay.Function([x1, y1], add_relu_2)
add_relu_2 = add_relu_2.with_attr("Composite", "add_relu")
add_relu_2 = add_relu_2.with_attr("PartitionedFromPattern", "add_nn.relu_")
add_relu_call_2 = relay.Call(add_relu_2, [inputs[2], inputs[3]])
x2 = relay.var("x2")
y2 = relay.var("y2")
add = relay.add(x2, y2)
sub = relay.subtract(x2, y2)
add_sub_mul = relay.multiply(add, sub)
add_sub_mul = relay.Function([x2, y2], add_sub_mul)
add_sub_mul = add_sub_mul.with_attr("Composite", "add_sub_mul")
add_sub_mul = add_sub_mul.with_attr("PartitionedFromPattern", "add_subtract_multiply_")
add_sub_mul_call = relay.Call(add_sub_mul, [add_relu_call_1, add_relu_call_2])
return relay.Function(inputs, add_sub_mul_call)
def after_B():
inputs = [relay.var("input_" + str(i), shape=(10, 10)) for i in range(8)]
add_relu_calls = []
for i in range(4):
x = relay.var("x" + str(i))
y = relay.var("x" + str(i))
add_relu = relay.add(x, y)
add_relu = relay.nn.relu(add_relu)
add_relu = relay.Function([x, y], add_relu)
add_relu = add_relu.with_attr("Composite", "add_relu")
add_relu = add_relu.with_attr("PartitionedFromPattern", "add_nn.relu_")
add_relu_call = relay.Call(add_relu, [inputs[i * 2], inputs[i * 2 + 1]])
add_relu_calls.append(add_relu_call)
add = relay.add(add_relu_calls[0], add_relu_calls[1])
sub = relay.subtract(add_relu_calls[2], add_relu_calls[3])
out = relay.multiply(add, sub)
return relay.Function(inputs, out)
pattern_table = [
("add_sub_mul", make_add_sub_mul_pattern()),
("add_relu", make_add_relu_pattern()),
]
check_result(pattern_table, before()["A"], after_A())
check_result(pattern_table, before()["B"], after_B())
def test_tuple_get_item_merge():
"""Test composite function can be merged from pattern containing TupleGetItem nodes."""
pattern_table = [("bn_relu", make_bn_relu_pattern())]
def before():
x = relay.var("x", shape=(1, 8))
gamma = relay.var("gamma", shape=(8,))
beta = relay.var("beta", shape=(8,))
moving_mean = relay.var("moving_mean", shape=(8,))
moving_var = relay.var("moving_var", shape=(8,))
bn_node = relay.nn.batch_norm(x, gamma, beta, moving_mean, moving_var)
tuple_get_item_node = bn_node[0]
r = relay.nn.relu(tuple_get_item_node)
return relay.Function([x, gamma, beta, moving_mean, moving_var], r)
def expected():
x = relay.var("x", shape=(1, 8))
beta = relay.var("beta", shape=(8,))
gamma = relay.var("gamma", shape=(8,))
moving_mean = relay.var("moving_mean", shape=(8,))
moving_var = relay.var("moving_var", shape=(8,))
# bn_relu function
in_1 = relay.var("x1", shape=(1, 8))
in_2 = relay.var("gamma1", shape=(8,))
in_3 = relay.var("beta1", shape=(8,))
in_4 = relay.var("moving_mean1", shape=(8,))
in_5 = relay.var("moving_var1", shape=(8,))
bn_node = relay.nn.batch_norm(in_1, in_2, in_3, in_4, in_5)
tuple_get_item_node = bn_node[0]
relu_node = relay.nn.relu(tuple_get_item_node)
bn_relu = relay.Function([in_1, in_2, in_3, in_4, in_5], relu_node)
bn_relu = bn_relu.with_attr("Composite", "bn_relu")
bn_relu = bn_relu.with_attr(
"PartitionedFromPattern", "nn.batch_norm_TupleGetItem0_nn.relu_"
)
# merged function
r = relay.Call(bn_relu, [x, gamma, beta, moving_mean, moving_var])
return relay.Function([x, gamma, beta, moving_mean, moving_var], r)
check_result(pattern_table, before(), expected())
def test_pattern_with_check():
def before():
x = relay.var("x", shape=(1, 10, 10, 10))
w = relay.var("w", shape=(10, 10, 3, 3))
b = relay.var("b", shape=(8,))
conv = relay.nn.conv2d(x, w, kernel_size=(3, 3), kernel_layout="OIHW", data_layout="NHWC")
bias = relay.nn.bias_add(conv, b)
relu = relay.nn.relu(bias)
return relay.Function([x, w, b], relu)
def _check_true(extract):
conv = extract.args[0].args[0]
return conv.attrs.data_layout == "NHWC"
def _check_false(extract):
conv = extract.args[0].args[0]
return conv.attrs.data_layout == "NCHW"
def expected():
x = relay.var("x")
w = relay.var("w")
b = relay.var("b")
conv = relay.nn.conv2d(x, w, kernel_size=(3, 3), kernel_layout="OIHW", data_layout="NHWC")
bias = relay.nn.bias_add(conv, b)
relu = relay.nn.relu(bias)
func = relay.Function([x, w, b], relu)
func = func.with_attr("Composite", "conv_bias_relu")
func = func.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_")
x = relay.var("x", shape=(1, 10, 10, 10))
w = relay.var("w", shape=(10, 10, 3, 3))
b = relay.var("b", shape=(8,))
return relay.Function([x, w, b], func(x, w, b))
pattern_table_false = [("conv_bias_relu", make_conv_bias_relu_pattern(), _check_false)]
check_result(pattern_table_false, before(), before())
pattern_table_true = [("conv_bias_relu", make_conv_bias_relu_pattern(), _check_true)]
check_result(pattern_table_true, before(), expected())
def test_diamond_not_merge():
r"""
The pattern on the left shouldn't match the structure on the right
relu relu
| \ | \
| clip | add
| / | |
mul | clip
| /
mul
"""
def get_pattern():
conv = make_conv_bias_relu_pattern()
clip = is_op("clip")(conv, wildcard(), wildcard())
return is_op("multiply")(conv, clip)
def get_net():
data = relay.var("data", shape=(1, 512, 28, 28))
kernel = relay.var("kernel", shape=(256, 512, 1, 1))
conv = relay.nn.conv2d(data, kernel, kernel_size=(1, 1), padding=(0, 0), strides=(1, 1))
bias = relay.nn.bias_add(conv, relay.var("bias", shape=(256,)))
relu = relay.nn.relu(bias)
add = relay.op.add(relu, relay.const(1.0))
clip2 = relay.op.clip(add, 0, 255)
mul = relay.op.multiply(relu, clip2)
return relay.Function(relay.analysis.free_vars(mul), mul)
pattern_table = [("pat", get_pattern())]
net = get_net()
check_result(pattern_table, net, net)
def test_type_check():
"""Test that we can query tensor types in the 'check' function."""
def before():
x = relay.var("x", shape=(1, 10, 10, 10))
w = relay.var("w", shape=(10, 10, 3, 3))
b = relay.var("b", shape=(8,))
add = relay.op.add(x, x)
relu = relay.nn.relu(add)
conv = relay.nn.conv2d(
relu, w, kernel_size=(3, 3), kernel_layout="OIHW", data_layout="NHWC"
)
bias = relay.nn.bias_add(conv, b)
relu2 = relay.nn.relu(bias)
return run_opt_pass(relay.Function([x, w, b], relu2), relay.transform.InferType())
def expected_false():
x = relay.var("x", shape=(1, 10, 10, 10))
w = relay.var("w", shape=(10, 10, 3, 3))
b = relay.var("b", shape=(8,))
x0 = relay.var("x")
add = relay.op.add(x0, x0)
relu = relay.nn.relu(add)
func = relay.Function([x0], relu)
func = func.with_attr("PartitionedFromPattern", "add_nn.relu_")
func = func.with_attr("Composite", "add_relu")
call = relay.Call(func, [x])
conv = relay.nn.conv2d(
call, w, kernel_size=(3, 3), kernel_layout="OIHW", data_layout="NHWC"
)
bias = relay.nn.bias_add(conv, b)
relu2 = relay.nn.relu(bias)
return relay.Function([x, w, b], relu2)
def expected_true():
x = relay.var("x", shape=(1, 10, 10, 10))
w = relay.var("w", shape=(10, 10, 3, 3))
b = relay.var("b", shape=(8,))
x0 = relay.var("x")
add = relay.op.add(x0, x0)
relu = relay.nn.relu(add)
func = relay.Function([x0], relu)
func = func.with_attr("PartitionedFromPattern", "add_nn.relu_")
func = func.with_attr("Composite", "add_relu")
call = relay.Call(func, [x])
x2 = relay.var("x")
w1 = relay.var("w")
b1 = relay.var("b")
conv = relay.nn.conv2d(x2, w1, kernel_size=(3, 3), kernel_layout="OIHW", data_layout="NHWC")
bias = relay.nn.bias_add(conv, b1)
relu2 = relay.nn.relu(bias)
func = relay.Function([x2, w1, b1], relu2)
func = func.with_attr("Composite", "conv_bias_relu")
func = func.with_attr("PartitionedFromPattern", "nn.conv2d_nn.bias_add_nn.relu_")
call = relay.Call(func, [call, w, b])
return relay.Function([x, w, b], call)
def _check_type_true(extract):
conv = extract.args[0].args[0]
typ = conv.checked_type
return bool(typ.shape[0] == 1)
def _check_type_false(extract):
conv = extract.args[0].args[0]
typ = conv.checked_type
return bool(typ.shape[0] != 1)
pattern_table_false = [
("add_relu", make_add_relu_pattern()),
("conv_bias_relu", make_conv_bias_relu_pattern(), _check_type_false),
]
check_result(pattern_table_false, before(), expected_false())
pattern_table_true = [
("add_relu", make_add_relu_pattern()),
("conv_bias_relu", make_conv_bias_relu_pattern(), _check_type_true),
]
check_result(pattern_table_true, before(), expected_true())
def test_einsum_reshape_pattern():
"""Test MergeComposite does not cause error with einsum operator."""
def make_einsum_reshape_pattern():
x = wildcard()
x = is_op("reshape")(x) | x
y = wildcard()
y = is_op("reshape")(y) | y
z = is_op("einsum")(TuplePattern([x, y]))
r = is_op("reshape")(z) | z
return r
pattern_table = [
(
"einsum_reshape",
make_einsum_reshape_pattern(),
)
]
def before():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
c = relay.reshape(a, [20, 5])
d = relay.reshape(b, [20, 5])
r = relay.einsum([c, d], "...ab,...cb->...ac")
return relay.Function([a, b], r)
def expected():
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
c = relay.reshape(a, [20, 5])
d = relay.reshape(b, [20, 5])
r = relay.einsum([c, d], "...ab,...cb->...ac")
func = relay.Function([a, b], r)
func = func.with_attr("Composite", "einsum_reshape")
func = func.with_attr("PartitionedFromPattern", "reshape_reshape_Tuple_einsum_")
input0 = relay.var("a", shape=(10, 10))
input1 = relay.var("b", shape=(10, 10))
output = func(input0, input1)
return relay.Function([input0, input1], output)
check_result(pattern_table, before(), expected())
if __name__ == "__main__":
tvm.testing.main()
| 37,324 | 35.273081 | 100 | py |
tvm | tvm-main/tests/python/relay/test_used_memory_annotator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""
Testing for the pass that annotates used memory for each primitive
Relay function.
"""
import pytest
import tvm
from tvm import relay
from tvm.relay.expr_functor import ExprVisitor
def AnnotateUsedMemory():
return relay.transform._ffi_api.AnnotateUsedMemory()
class CheckUsedMemoryAnnotation(ExprVisitor):
"""
Check that the annotations on each function in the graph match
what is expected.
"""
def __init__(self, expected_annotations, expected_io_annotation):
self.expected_annotations = expected_annotations
self.expected_io_annotation = expected_io_annotation
super().__init__()
def visit_function(self, fn):
if "Primitive" in fn.attrs:
assert (
"used_memory" in fn.attrs
), "Primitive function does not have used_memory annotation."
assert len(self.expected_annotations) > 0, "Not all expected annotations were compared"
expected_mem = self.expected_annotations.pop(0)
actual_mem = [int(x) for x in fn.attrs["used_memory"]]
assert expected_mem == actual_mem, (
f"Expected used memory annotation {expected_mem} "
f"did not match actual annotation {actual_mem}"
)
super().visit_function(fn)
def __call__(self, fn):
assert (
fn.attrs["io_used_memory"] == self.expected_io_annotation
), "Expected IO annotation did not match."
self.visit(fn.body)
def _check_used_memory_annotations(mod, expected_annotations, expected_io_annotation):
mod = relay.transform.InferType()(mod)
mod = relay.transform.ToANormalForm()(mod)
mod = relay.transform.InferType()(mod)
mod = AnnotateUsedMemory()(mod)
CheckUsedMemoryAnnotation(expected_annotations, expected_io_annotation)(mod["main"])
def _create_primitive_function(expr):
func = relay.Function(relay.analysis.free_vars(expr), expr)
func = func.with_attr("Primitive", 1)
return func
def test_simple():
"""
Test simple graph with one primitive function.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.max_pool2d(x)
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
call = relay.Call(get_inner_func(), [ifm])
mod = tvm.IRModule.from_expr(call)
expected_annotations = [
[2 * (1 * 2 * 2 * 4)],
]
expected_io_annotation = 2 * (1 * 2 * 2 * 4)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_multiple_functions():
"""
Test a graph with multiple primitive functions.
"""
def get_inner_func(ifm_shape):
x = relay.var("x", shape=ifm_shape, dtype="int8")
x = relay.nn.max_pool2d(x, pool_size=(2, 2), layout="NHWC")
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, 8, 8, 2), dtype="int8")
x = get_inner_func((1, 8, 8, 2))
x = relay.Call(x, [ifm])
y = get_inner_func((1, 7, 7, 2))
y = relay.Call(y, [x])
z = get_inner_func((1, 6, 6, 2))
z = relay.Call(z, [y])
mod = tvm.IRModule.from_expr(z)
expected_annotations = [
[(1 * 8 * 8 * 2) + (1 * 7 * 7 * 2)],
[(1 * 7 * 7 * 2) + (1 * 6 * 6 * 2)],
[(1 * 6 * 6 * 2) + (1 * 5 * 5 * 2)],
]
expected_io_annotation = (1 * 8 * 8 * 2) + (1 * 5 * 5 * 2)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_mixed_data_types():
"""
Test a graph with a primitive function that has mixed datatypes.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 2, 2, 2), dtype="int16")
x = relay.cast(x, dtype="uint32")
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, 2, 2, 2), dtype="int16")
x = get_inner_func()
x = relay.Call(x, [ifm])
mod = tvm.IRModule.from_expr(x)
expected_annotations = [
[(1 * 2 * 2 * 2) * 2 + (1 * 2 * 2 * 2) * 4],
]
expected_io_annotation = (1 * 2 * 2 * 2) * 2 + (1 * 2 * 2 * 2) * 4
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_parallel_function_call():
"""
Test a graph when the results of two functions are concatenated
into a single result. The second function will also have the result
of the first function alive so will be annotated with a larger
"used memory" value.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
x = relay.reshape(x, newshape=(1, 4, 30))
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, 4, 5, 6), dtype="int8")
x = relay.Call(get_inner_func(), [ifm])
y = relay.Call(get_inner_func(), [ifm])
z = relay.concatenate([x, y], axis=0)
mod = tvm.IRModule.from_expr(z)
expected_annotations = [
[(1 * 4 * 5 * 6) + (1 * 4 * 30)],
# the output tensor from the previous function is also alive
[(1 * 4 * 5 * 6) + (1 * 4 * 30) + (1 * 4 * 30)],
]
expected_io_annotation = (1 * 4 * 5 * 6) + (1 * 4 * 60)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_many_different_parallel_calls():
"""
Test a graph that calls many different functions in parallel.
input
/ | \
prim_func_1 prim_func_2 prim_func_3
\ | /
prim_func_4
"""
def get_inner_func_1():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
x = relay.tanh(x)
x = _create_primitive_function(x)
return x
def get_inner_func_2():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
x = relay.nn.max_pool2d(x, pool_size=(1, 1), layout="NHWC")
x = _create_primitive_function(x)
return x
def get_inner_func_3():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
x = relay.abs(x)
x = relay.nn.relu(x)
x = relay.exp(x)
x = _create_primitive_function(x)
return x
def get_inner_func_4():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
y = relay.var("y", shape=(1, 4, 5, 6), dtype="int8")
z = relay.var("z", shape=(1, 4, 5, 6), dtype="int8")
out = relay.concatenate([x, y, z], axis=3)
out = _create_primitive_function(out)
return out
ifm = relay.var("input", shape=(1, 4, 5, 6), dtype="int8")
x = relay.Call(get_inner_func_1(), [ifm])
y = relay.Call(get_inner_func_2(), [ifm])
z = relay.Call(get_inner_func_3(), [ifm])
a = relay.Call(get_inner_func_4(), [x, y, z])
mod = tvm.IRModule.from_expr(a)
expected_annotations = [
[(1 * 4 * 5 * 6) + (1 * 4 * 5 * 6)],
# output from prim_func_1 is also still alive
[(1 * 4 * 5 * 6) + (1 * 4 * 5 * 6) + (1 * 4 * 5 * 6)],
# outputs from prim_func_1 and prim_func_2 are also still alive
[(1 * 4 * 5 * 6) + (1 * 4 * 5 * 6) + (1 * 4 * 5 * 6) + (1 * 4 * 5 * 6)],
[(1 * 4 * 5 * 6) + (1 * 4 * 5 * 6) + (1 * 4 * 5 * 6) + (1 * 4 * 5 * 18)],
]
expected_io_annotation = (1 * 4 * 5 * 6) + (1 * 4 * 5 * 18)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_nested_branches():
"""
Tests a graph with branches that also branch.
input
/ \
/ \
prim_func_1 prim_func_2
/ \
/ \
prim_func_3 prim_func_4
"""
def get_generic_inner_func():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.relu(x)
return _create_primitive_function(x)
ifm = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
a = relay.Call(get_generic_inner_func(), [ifm])
b = relay.Call(get_generic_inner_func(), [ifm])
c = relay.Call(get_generic_inner_func(), [b])
d = relay.Call(get_generic_inner_func(), [b])
out = relay.concatenate([a, c, d], axis=3)
mod = tvm.IRModule.from_expr(out)
expected_annotations = [
[(1 * 2 * 2 * 4) + (1 * 2 * 2 * 4)],
# output from prim_func_1 is also still alive
[(1 * 2 * 2 * 4) + (1 * 2 * 2 * 4) + (1 * 2 * 2 * 4)],
# output from prim_func_1 is also still alive
[(1 * 2 * 2 * 4) + (1 * 2 * 2 * 4) + (1 * 2 * 2 * 4)],
# outputs from prim_func_1 and prim_func_3 are also still alive
[(1 * 2 * 2 * 4) + (1 * 2 * 2 * 4) + (1 * 2 * 2 * 4) + (1 * 2 * 2 * 4)],
]
expected_io_annotation = (1 * 2 * 2 * 4) + (1 * 2 * 2 * 12)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_composite_inner_function():
"""
Tests the typical BYOC use case where a primitive function
contains a composite function.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.max_pool2d(x, pool_size=(2, 2), layout="NHWC")
x = relay.Function(relay.analysis.free_vars(x), x)
x = x.with_attr("Composite", "my_composite_func")
y = relay.var("y", shape=(1, 2, 2, 4), dtype="int8")
z = relay.Call(x, [y])
return _create_primitive_function(z)
ifm = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
x = relay.Call(get_inner_func(), [ifm])
mod = tvm.IRModule.from_expr(x)
expected_annotations = [
[(1 * 2 * 2 * 4) + (1 * 1 * 1 * 4)],
]
expected_io_annotation = (1 * 2 * 2 * 4) + (1 * 1 * 1 * 4)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_multiple_calls_to_same_function():
"""
Tests the case when there are multiple calls to the same function.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.max_pool2d(x)
x = _create_primitive_function(x)
return x
inner_func = get_inner_func()
ifm = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
call1 = relay.Call(inner_func, [ifm])
call2 = relay.Call(inner_func, [call1])
mod = tvm.IRModule.from_expr(call2)
expected_annotations = [[2 * (1 * 2 * 2 * 4), 2 * (1 * 2 * 2 * 4)]]
expected_io_annotation = 2 * (1 * 2 * 2 * 4)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_parallel_calls_to_same_function():
"""
Test parallel calls to the same function.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.max_pool2d(x)
x = _create_primitive_function(x)
return x
inner_func = get_inner_func()
ifm = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
call1 = relay.Call(inner_func, [ifm])
call2 = relay.Call(inner_func, [ifm])
concat = relay.concatenate([call1, call2], axis=0)
mod = tvm.IRModule.from_expr(concat)
expected_annotations = [[2 * (1 * 2 * 2 * 4), 3 * (1 * 2 * 2 * 4)]]
expected_io_annotation = 3 * (1 * 2 * 2 * 4)
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_parallel_calls_with_non_ifm_input():
"""
Test a graph that calls many different functions in parallel where
the input is not the input to the function.
y = f(x)
/ | \
z0 = g0(y) ... zi = gi(y)
\ | /
concat
"""
def get_inner_func_1():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
x = relay.tanh(x)
x = _create_primitive_function(x)
return x
def get_inner_func_2():
x = relay.var("x", shape=(1, 4, 5, 6), dtype="int8")
x = relay.nn.max_pool2d(x, pool_size=(2, 2))
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, 4, 5, 6), dtype="int8")
y = relay.Call(get_inner_func_1(), [ifm])
g = get_inner_func_2()
no_calls = 20
z = [relay.Call(g, [y]) for _ in range(0, no_calls)]
out = relay.concatenate(z, axis=3)
mod = tvm.IRModule.from_expr(out)
expected_annotations = [
[(1 * 4 * 5 * 6) + (1 * 4 * 5 * 6)],
[(1 * 4 * 5 * 6) + (1 * 4 * 4 * 5) * i for i in range(1, no_calls + 1)],
]
expected_io_annotation = (1 * 4 * 5 * 6) + (1 * 4 * 4 * (5 * no_calls))
_check_used_memory_annotations(mod, expected_annotations, expected_io_annotation)
def test_dynamic_io_tensor_not_supported():
"""
Test to check dynamic IO tensor error.
"""
def get_inner_func():
x = relay.var("x", shape=(1, 2, 2, 4), dtype="int8")
x = relay.nn.max_pool2d(x)
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, 2, 2, relay.Any()), dtype="int8")
call = relay.Call(get_inner_func(), [ifm])
mod = tvm.IRModule.from_expr(call)
err_rgx = r"AnnotateUsedMemory does not support dynamic shapes"
with pytest.raises(tvm.TVMError, match=err_rgx):
_check_used_memory_annotations(mod, [], [])
def test_dynamic_callsite_tensor_not_supported():
"""
Test to check dynamic callsite tensor error.
"""
def get_inner_func():
x = relay.var("x", shape=(relay.Any(), 2, 2, 4), dtype="int8")
x = relay.nn.max_pool2d(x)
x = _create_primitive_function(x)
return x
ifm = relay.var("input", shape=(1, 2, 2, 4), dtype="int8")
call = relay.Call(get_inner_func(), [ifm])
mod = tvm.IRModule.from_expr(call)
err_rgx = r"AnnotateUsedMemory does not support dynamic shapes"
with pytest.raises(tvm.TVMError, match=err_rgx):
_check_used_memory_annotations(mod, [], [])
| 14,738 | 32.882759 | 99 | py |
tvm | tvm-main/tests/python/relay/test_pass_partial_eval.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.prelude import Prelude
from tvm.relay import op, create_executor, transform
from tvm.relay import Var, TypeVar, TupleGetItem, Let, Function, const, RefRead, RefWrite, RefCreate
from tvm.relay import TensorType, Tuple, If, Clause, PatternConstructor, PatternVar, Match
from tvm.relay import GlobalVar, Call
from tvm.relay.transform import gradient
from tvm.relay.testing import make_nat_expr, run_infer_type
def check_eval(expr, expected_result, mod=None, rtol=1e-07):
dev = tvm.device("llvm", 0)
result = create_executor(mod=mod, device=dev, target="llvm").evaluate(expr)
np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def tipe(expr):
return run_opt_pass(expr, [transform.PartialEvaluate(), transform.InferType()])
def dcpe(expr, mod=None, grad=False, ignore_impurity=False):
passes = [
transform.PartialEvaluate(),
transform.InferType(),
transform.DeadCodeElimination(inline_once=True, ignore_impurity=ignore_impurity),
transform.InferType(),
]
if grad:
expr = gradient(run_infer_type(expr))
if mod:
assert isinstance(expr, Function)
mod["main"] = expr
seq = tvm.transform.Sequential(passes)
mod = seq(mod)
return mod["main"]
return run_opt_pass(expr, passes)
def test_tuple():
t = TypeVar("t")
x = Var("x", t)
body = TupleGetItem(relay.Tuple([relay.const(4.0), x]), 1)
f = Function([x], body, None, [t])
expected = relay.Function([x], x, None, [t])
expected = run_opt_pass(expected, transform.InferType())
assert tvm.ir.structural_equal(dcpe(f), expected)
def test_const_inline():
t = relay.TensorType([], "float32")
d = Var("d", t)
double = Function([d], d + d)
orig = double(const(4.0))
assert tvm.ir.structural_equal(dcpe(orig), const(8.0))
def test_ref():
t = relay.TensorType([], "float32")
d = relay.Var("d", t)
r = relay.Var("r", relay.RefType(t))
x = relay.Var("x")
body = relay.RefRead(r)
body = Let(x, RefWrite(r, RefRead(r) * RefRead(r)), body)
body = Let(r, RefCreate(d), body)
square = Function([d], body)
expected = run_opt_pass(Function([d], d * d), transform.InferType())
# TODO(mbs): Revisit once DCE eliminates dead writes.
actual = dcpe(square, ignore_impurity=True)
assert tvm.ir.structural_equal(actual, expected)
def test_empty_ad():
shape = (10, 10)
dtype = "float32"
t = TensorType(shape, dtype)
d = Var("d", t)
f = Function([d], d)
# TODO(mbs): Revisit once DCE eliminates dead writes.
g = dcpe(f, grad=True, ignore_impurity=True)
expected = Function([d], Tuple([d, Tuple([op.ones_like(d)])]))
expected = run_opt_pass(expected, transform.InferType())
assert tvm.ir.structural_equal(g, expected)
def test_ad():
shape = (10, 10)
dtype = "float32"
t = TensorType(shape, dtype)
d = Var("d", t)
f = Function([d], d * d)
# TODO(mbs): Revisit once DCE eliminates dead writes.
g = dcpe(f, grad=True, ignore_impurity=True)
m = d * d
x = relay.Var("x")
o = op.ones_like(x)
x1 = relay.Var("x1")
grad = op.zeros_like(d) + op.collapse_sum_like(x1 * d, d) + op.collapse_sum_like(x1 * d, d)
body = Tuple([x, Tuple([grad])])
body = relay.Let(x1, o, body)
expected = Function([d], relay.Let(x, m, body))
expected = run_opt_pass(expected, transform.InferType())
tvm.ir.assert_structural_equal(g, expected)
def test_if_ref():
shape = ()
dtype = "bool"
t = TensorType(shape, dtype)
d = Var("d", t)
r = Var("r")
update = Function([], RefWrite(r, RefRead(r) + RefRead(r)))
u = Var("u")
body = If(d, u(), u())
eff = Var("eff")
body = Let(eff, body, RefRead(r))
f = Function([d], Let(r, RefCreate(const(1)), Let(u, update, body)))
pe_f = tipe(f)
f_res = create_executor().evaluate(f)(const(True))
pe_f_res = create_executor().evaluate(pe_f)(const(True))
np.testing.assert_allclose(f_res.numpy(), 2 * np.ones_like(f_res.numpy()))
np.testing.assert_allclose(pe_f_res.numpy(), 2 * np.ones_like(pe_f_res.numpy()))
def test_function_invalidate():
shape = ()
dtype = "bool"
t = TensorType(shape, dtype)
d = Var("d", t)
r = Var("r")
fetch = Function([], RefRead(r))
fet = Var("fetch")
fet_obscured = Var("fetch_obscured")
u = Var("u")
body = If(d, fet_obscured(), fet_obscured())
body = Let(u, RefWrite(r, const(1)), body)
body = Let(fet_obscured, If(d, fet, fet), body)
body = Let(fet, fetch, body)
body = Let(r, RefCreate(const(0)), body)
f = Function([d], body)
pe_f = tipe(f)
f_res = create_executor().evaluate(f)(const(True))
pe_f_res = create_executor().evaluate(pe_f)(const(True))
np.testing.assert_allclose(f_res.numpy(), np.ones_like(f_res.numpy()))
np.testing.assert_allclose(pe_f_res.numpy(), np.ones_like(pe_f_res.numpy()))
def test_head_cons():
mod = tvm.IRModule()
p = Prelude(mod)
t = TypeVar("t")
x = Var("x", t)
rlist, cons, nil = p.mod.get_type("List")
hd = p.mod.get_global_var("hd")
body = hd(cons(x, nil()))
f = Function([x], body, None, [t])
res = dcpe(f, mod)
expected_mod = tvm.IRModule.from_expr(Function([x], x, t, [t]))
assert tvm.ir.structural_equal(res, expected_mod["main"])
def test_map():
mod = tvm.IRModule()
p = Prelude(mod)
rlist, cons, nil = p.mod.get_type("List")
rmap = p.mod.get_global_var("map")
f = GlobalVar("f")
t = TypeVar("t")
a = Var("a", t)
mod[f] = Function([a], a, t, [t])
orig = rmap(f, cons(const(1), cons(const(2), cons(const(3), nil()))))
expected = cons((const(1)), cons((const(2)), cons((const(3)), nil())))
expected = Function([], expected)
mod["main"] = expected
mod = transform.InferType()(mod)
expected = mod["main"]
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, expected.body)
def test_loop():
mod = tvm.IRModule()
t = TypeVar("t")
x = Var("x", t)
loop = GlobalVar("loop")
mod[loop] = Function([x], loop(x), t, [t])
expected = Call(loop, [const(1)])
mod["main"] = Function([], expected)
mod = transform.InferType()(mod)
expected = mod["main"].body
call = Function([], loop(const(1)))
res = dcpe(call, mod=mod)
assert tvm.ir.structural_equal(res.body, expected)
def test_swap_loop():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, _, _ = p.mod.get_type("nat")
x = Var("x", nat())
y = Var("y", nat())
loop = GlobalVar("loop")
mod[loop] = Function([x, y], loop(y, x), nat())
prog = loop(make_nat_expr(p, 1), make_nat_expr(p, 2))
res = Function([], prog)
res = dcpe(res, mod=mod)
assert tvm.ir.structural_equal(prog, res.body)
def test_abs_diff():
# TODO(@M.K.): refactor using tuple pattern (not yet implemented)
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
x = Var("x", nat())
y = Var("y", nat())
xp = Var("x'", nat())
yp = Var("y'", nat())
diff = GlobalVar("diff")
y_z_case = Clause(PatternConstructor(z, []), x)
y_s_case = Clause(PatternConstructor(s, [PatternVar(yp)]), diff(yp, xp))
x_z_case = Clause(PatternConstructor(z, []), y)
x_s_case = Clause(PatternConstructor(s, [PatternVar(xp)]), Match(y, [y_z_case, y_s_case]))
mod[diff] = Function([x, y], Match(x, [x_z_case, x_s_case]))
orig = diff(make_nat_expr(p, 7), make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 4))
def test_match_nat_id():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
x = Var("x", nat())
y = Var("y", nat())
nat_id = GlobalVar("nat_id")
z_case = Clause(PatternConstructor(z, []), z())
s_case = Clause(PatternConstructor(s, [PatternVar(y)]), s(y))
mod[nat_id] = Function([x], Match(x, [z_case, s_case]))
orig = nat_id(make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 3))
def test_nat_id():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, _, _ = p.mod.get_type("nat")
x = Var("x", nat())
y = Var("y", nat())
nat_id = GlobalVar("nat_id")
mod[nat_id] = Function([x], x)
orig = nat_id(make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 3))
def test_global_match_nat_id():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
x = Var("x", nat())
z_case = Clause(PatternConstructor(z, []), z())
s_case = Clause(PatternConstructor(s, [PatternVar(x)]), s(x))
orig = Match(make_nat_expr(p, 3), [z_case, s_case])
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 3))
def test_double():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
double = p.mod.get_global_var("nat_double")
orig = double(make_nat_expr(p, 3))
orig = Function([], orig)
res = dcpe(orig, mod=mod)
assert tvm.ir.structural_equal(res.body, make_nat_expr(p, 6))
def test_concat():
t = relay.TensorType([10], "float32")
x = Var("x", t)
y = Var("x", t)
orig = run_infer_type(Function([x, y], op.concatenate([x, y], axis=0)))
tvm.ir.assert_structural_equal(dcpe(orig), orig)
def test_triangle_number():
t = relay.TensorType([], "int32")
x = Var("x", t)
f_var = Var("f")
f = Function([x], If(op.equal(x, const(0)), const(0), x + f_var(x - const(1))))
orig = run_infer_type(Let(f_var, f, f_var(const(10))))
tvm.ir.assert_structural_equal(dcpe(orig), const(55))
def test_nat_update():
m = tvm.IRModule()
p = Prelude(m)
p.mod.import_from_std("nat.rly")
m = transform.ToANormalForm()(m)
transform.PartialEvaluate()(m)
def test_tuple_match():
a = relay.Var("a")
b = relay.Var("b")
clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
x = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])
tvm.ir.assert_structural_equal(dcpe(x), const(2))
if __name__ == "__main__":
tvm.testing.main()
| 11,823 | 32.307042 | 100 | py |
tvm | tvm-main/tests/python/relay/test_pass_flatten_atrous_conv.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-wildcard-import
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay
from tvm.contrib import graph_executor
def compare_expected_fac(expr, expected_expr, args):
mod_def = tvm.relay.transform.InferType()(tvm.IRModule.from_expr(expr))
mod_flat = tvm.relay.transform.FlattenAtrousConv()(mod_def)
mod_exp = tvm.relay.transform.InferType()(tvm.IRModule.from_expr(expected_expr))
assert expr is expected_expr or not tvm.ir.structural_equal(mod_def, mod_flat)
assert tvm.ir.structural_equal(mod_flat, mod_exp)
result_def = (
relay.create_executor("vm", mod=mod_def, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
result_flat = (
relay.create_executor("vm", mod=mod_flat, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
result_exp = (
relay.create_executor("vm", mod=mod_exp, device=tvm.cpu(), target="llvm")
.evaluate()(*args)
.numpy()
)
assert np.array_equal(result_def, result_flat)
assert np.array_equal(result_flat, result_exp)
def test_fac_block_shape_2():
# pattern entry with block_shape=[2, 2]
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
op2 = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op2, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expected_expr = relay.nn.conv2d(
data,
weight,
padding=[2, 2, 2, 2],
dilation=[2, 2],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_block_shape_4():
# pattern entry with block_shape=[4, 4]
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[4, 4], paddings=[[4, 7], [4, 7]])
op2 = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op2, block_shape=[4, 4], crops=[[0, 3], [0, 3]])
expected_expr = relay.nn.conv2d(
data,
weight,
padding=[4, 4, 4, 4],
dilation=[4, 4],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_quantize():
# quantize pattern entry
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="int8")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
op2 = relay.qnn.op.conv2d(
op1,
weight,
input_zero_point=relay.const(0),
kernel_zero_point=relay.const(0),
input_scale=relay.const(2.0),
kernel_scale=relay.const(1.0),
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op2, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expected_expr = relay.qnn.op.conv2d(
data,
weight,
input_zero_point=relay.const(0),
kernel_zero_point=relay.const(0),
input_scale=relay.const(2.0),
kernel_scale=relay.const(1.0),
padding=[2, 2, 2, 2],
dilation=[2, 2],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_surrounding():
# pattern entry with surrounding operations add
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op0 = relay.op.add(data, relay.const(1.0))
op1 = relay.nn.space_to_batch_nd(op0, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
op2 = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
op3 = relay.nn.batch_to_space_nd(op2, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expr = relay.op.add(op3, relay.const(-1.0))
op0 = relay.op.add(data, relay.const(1.0))
op1 = relay.nn.conv2d(
op0,
weight,
padding=[2, 2, 2, 2],
dilation=[2, 2],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expected_expr = relay.op.add(op1, relay.const(-1.0))
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_several():
# several pattern entries
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
op2 = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
op3 = relay.nn.batch_to_space_nd(op2, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
op4 = relay.nn.space_to_batch_nd(op3, block_shape=[4, 4], paddings=[[4, 7], [4, 7]])
op5 = relay.nn.conv2d(
op4,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op5, block_shape=[4, 4], crops=[[0, 3], [0, 3]])
op1 = relay.nn.conv2d(
data,
weight,
padding=[2, 2, 2, 2],
dilation=[2, 2],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expected_expr = relay.nn.conv2d(
op1,
weight,
padding=[4, 4, 4, 4],
dilation=[4, 4],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
compare_expected_fac(expr, expected_expr, [x_np])
def test__fac_only_s2b_conv():
# negative case, only operations space_to_batch_nd-conv2d
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
expr = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expected_expr = expr
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_only_s2b():
# negative case, only operation space_to_batch_nd
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
expr = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
expected_expr = expr
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_only_conv_b2s():
# negative case, only operations conv2d-batch_to_space_nd
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.conv2d(
data,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op1, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expected_expr = expr
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_only_b2s():
# negative case, only operation batch_to_space_nd
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
expr = relay.nn.batch_to_space_nd(data, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expected_expr = expr
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_op_btwn_s2b_conv():
# negative case, add operation between space_to_batch_nd-conv2d
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
op_1_5 = relay.op.add(op1, relay.const(1.0))
op2 = relay.nn.conv2d(
op_1_5,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op2, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expected_expr = expr
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_op_btwn_conv_b2s():
# negative case, add operation between conv2d-batch_to_space_nd
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
op2 = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
op_2_5 = relay.op.add(op2, relay.const(1.0))
expr = relay.nn.batch_to_space_nd(op_2_5, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
expected_expr = expr
compare_expected_fac(expr, expected_expr, [x_np])
def test_fac_relay_build():
# Check the default optimize pipeline
shape_x = [1, 5, 5, 4]
shape_w = [3, 3, 4, 1]
x_np = np.random.randint(-128, 127, size=shape_x, dtype="int8").astype("float32")
w_np = np.random.randint(-128, 127, size=shape_w, dtype="int8").astype("float32")
weight = relay.const(w_np)
data = relay.var("data", shape=shape_x, dtype="float32")
op1 = relay.nn.space_to_batch_nd(data, block_shape=[2, 2], paddings=[[2, 3], [2, 3]])
op2 = relay.nn.conv2d(
op1,
weight,
padding=[0, 0, 0, 0],
groups=4,
channels=4,
kernel_size=[3, 3],
data_layout="NHWC",
kernel_layout="HWOI",
)
expr = relay.nn.batch_to_space_nd(op2, block_shape=[2, 2], crops=[[0, 1], [0, 1]])
mod_def = tvm.relay.transform.InferType()(tvm.IRModule.from_expr(expr))
result_def = (
relay.create_executor("vm", mod=mod_def, device=tvm.cpu(), target="llvm")
.evaluate()(x_np)
.numpy()
)
graph, lib, params = relay.build(mod_def, "llvm", params=None)
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu())
rt_mod.set_input("data", x_np)
rt_mod.set_input(**params)
rt_mod.run()
result_flat = rt_mod.get_output(0).numpy()
assert "space_to_batch_nd" not in graph
assert "conv2d" in graph
assert "batch_to_space_nd" not in graph
assert np.array_equal(result_def, result_flat)
if __name__ == "__main__":
tvm.testing.main()
| 14,627 | 29.991525 | 90 | py |
tvm | tvm-main/tests/python/relay/test_pass_to_basic_block_normal_form.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm.relay.analysis import detect_feature
from tvm.relay import op, create_executor, transform
from tvm.relay.prelude import Prelude
from tvm.relay.testing import count, create_workload
from tvm.relay.analysis import Feature
from tvm.relay.analysis import check_basic_block_normal_form
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def check_eval(expr, expected_result, mod=None, rtol=1e-07):
dev = tvm.device("llvm", 0)
result = create_executor(mod=mod, device=dev, target="llvm").evaluate(expr)
np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
def test_no_explicit_bind():
x = relay.const(1)
y = op.add(x, x)
z = op.add(y, y)
f = relay.Function([], op.add(z, z))
"""
fn () {
%0 = add(1, 1);
%1 = add(%0, %0);
add(%1, %1)
}
"""
assert not Feature.fLet in detect_feature(f)
bblock = run_opt_pass(f, transform.ToBasicBlockNormalForm())
assert Feature.fLet not in detect_feature(bblock)
check_eval(f(), 8.0)
check_eval(bblock(), 8.0)
check_basic_block_normal_form(bblock)
def test_top_level_nested_if():
x = relay.var("x", shape=(), dtype="bool")
y = relay.var("y", shape=(), dtype="float32")
z = relay.var("z", shape=(), dtype="float32")
cond_t = relay.const(True)
cond_f = relay.const(False)
one = relay.const(1, dtype="float32")
three = relay.const(3, dtype="float32")
y2 = relay.add(y, y)
z2 = relay.add(z, z)
true_branch = relay.If(cond_t, relay.add(z2, y2), relay.add(three, y2))
false_branch = relay.If(cond_f, z2, one)
body = relay.If(x, true_branch, false_branch)
"""
free_var %x: bool
if (%x) {
if (True) {
free_var %z: float32
%0 = add(%z, %z);
free_var %y: float32
%1 = add(%y, %y);
add(%0, %1)
} else {
add(3f, %1)
}
} else {
if (False) {
%0
} else {
1f
}
}
"""
def expected():
x = relay.var("x", shape=(), dtype="bool")
y = relay.var("y", shape=(), dtype="float32")
z = relay.var("z", shape=(), dtype="float32")
cond_t = relay.const(True)
cond_f = relay.const(False)
one = relay.const(1, dtype="float32")
three = relay.const(3, dtype="float32")
y2 = relay.var("y2")
z2 = relay.var("z2")
true_branch = relay.If(cond_t, relay.add(z2, y2), relay.add(three, y2))
true_branch = relay.Let(y2, relay.add(y, y), true_branch)
false_branch = relay.If(cond_f, z2, one)
body = relay.If(x, true_branch, false_branch)
body = relay.Let(z2, relay.add(z, z), body)
return body
bblock = run_opt_pass(body, [transform.ToBasicBlockNormalForm(), transform.InferType()])
"""
free_var %z: float32
let %x: float32 = add(%z, %z) /* ty=float32 */;
free_var %x1: bool
if (%x1) {
free_var %y: float32
let %x2: float32 = add(%y, %y) /* ty=float32 */;
if (True /* ty=bool */) {
add(%x, %x2) /* ty=float32 */
} else {
add(3f /* ty=float32 */, %x2) /* ty=float32 */
}
} else {
if (False /* ty=bool */) {
%x
} else {
1f /* ty=float32 */
}
}
"""
expected_output = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(bblock, expected_output, map_free_vars=True)
def test_nested_if():
x = relay.var("x", shape=(), dtype="bool")
y = relay.var("y", shape=(), dtype="float32")
cond_t = relay.const(True)
cond_f = relay.const(False)
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
three = relay.const(3, dtype="float32")
y2 = relay.add(y, y)
true_branch = relay.If(cond_t, y2, relay.add(three, y2))
false_branch = relay.If(cond_f, two, one)
body = relay.If(x, true_branch, false_branch)
"""
free_var %x: bool
if (%x) {
if (True) {
free_var %y: float32
%0 = add(%y, %y);
%0
} else {
add(3f, %0)
}
} else {
if (False) {
2f
} else {
1f
}
}
"""
def expected():
x = relay.var("x", shape=(), dtype="bool")
y = relay.var("y", shape=(), dtype="float32")
cond_t = relay.const(True)
cond_f = relay.const(False)
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
three = relay.const(3, dtype="float32")
y2 = relay.var("y2")
true_branch = relay.If(cond_t, y2, relay.add(three, y2))
true_branch = relay.Let(y2, relay.add(y, y), true_branch)
false_branch = relay.If(cond_f, two, one)
body = relay.If(x, true_branch, false_branch)
return body
bblock = run_opt_pass(body, [transform.ToBasicBlockNormalForm(), transform.InferType()])
"""
free_var %x: bool
if (%x) {
free_var %y: float32
let %x1: float32 = add(%y, %y) /* ty=float32 */;
if (True /* ty=bool */) {
%x1
} else {
add(3f /* ty=float32 */, %x1) /* ty=float32 */
}
} else {
if (False /* ty=bool */) {
2f /* ty=float32 */
} else {
1f /* ty=float32 */
}
}
"""
expected_output = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(bblock, expected_output, map_free_vars=True)
check_basic_block_normal_form(bblock)
# make sure we do not infinite loop.
# it is too large so we won't check for the exact program.
def test_recursion():
"""
Program:
let f(n: i32) -> i32 = {
m = (n * 2)
if (n == 0) {
return m;
} else {
return m + f(n - 1);
}
}
f(5);
"""
mod = tvm.IRModule()
i64 = relay.TensorType((), "int64")
f = relay.GlobalVar("f")
n = relay.Var("n", i64)
m = n * relay.const(2, "int64")
cond = relay.equal(n, relay.const(0, "int64"))
false_branch = m + f(n - relay.const(1, "int64"))
funcbody = relay.If(cond, m, false_branch)
value = relay.Function([n], funcbody, i64, [])
mod[f] = value
check_eval(f(relay.const(5, "int64")), 30.0, mod=mod)
old_f = mod[f]
mod = transform.ToBasicBlockNormalForm()(mod)
f = mod[f]
check_eval(f(relay.const(5, "int64")), 30.0, mod=mod)
check_basic_block_normal_form(f)
def test_ref():
i = relay.Var("i")
iv = relay.Var("iv")
u = relay.Var("u")
uv = relay.Var("uv")
body = relay.add(iv, uv)
body = relay.Let(uv, relay.RefRead(i), body)
body = relay.Let(u, relay.RefWrite(i, relay.const(2)), body)
body = relay.Let(iv, relay.RefRead(i), body)
body = relay.Let(i, relay.RefCreate(relay.const(1)), body)
check_eval(body, 3)
opt_body = run_opt_pass(body, transform.ToBasicBlockNormalForm())
check_eval(opt_body, 3)
check_basic_block_normal_form(opt_body)
def test_nat_add():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
add = p.mod.get_global_var("nat_add")
dev = tvm.device("llvm", 0)
assert mod[add].checked_type == relay.FuncType([nat(), nat()], nat())
assert (
count(p, create_executor(mod=mod, device=dev, target="llvm").evaluate(add(s(z()), s(z()))))
== 2
)
expr = add(s(z()), s(z()))
f = relay.GlobalVar("f")
mod[f] = relay.Function([], expr)
mod = transform.InferType()(mod)
mod = transform.ToBasicBlockNormalForm()(mod)
opt_expr = mod["f"]
assert (
count(p, create_executor(mod=mod, device=dev, target="llvm").evaluate(opt_expr.body)) == 2
)
assert not Feature.fLet in detect_feature(mod[add])
check_basic_block_normal_form(opt_expr)
def test_let():
def test_let1():
x = relay.Var("x")
c = relay.const(4.0, "float32")
body = relay.Let(x, c, x)
body = run_opt_pass(body, transform.InferType())
"""
let %x: float32 = 4f /* ty=float32 */;
%x
"""
opt_body = run_opt_pass(body, transform.ToBasicBlockNormalForm())
assert tvm.ir.structural_equal(body, opt_body)
check_basic_block_normal_form(opt_body)
def test_let1_1():
x = relay.Var("y")
d = relay.const(4.0, "float32")
body = relay.Let(x, d, relay.add(x, x))
body = run_opt_pass(body, transform.InferType())
opt_body = run_opt_pass(body, transform.ToBasicBlockNormalForm())
assert tvm.ir.structural_equal(body, opt_body)
check_basic_block_normal_form(opt_body)
def test_let2():
x = relay.Var("x")
y = relay.Var("y")
d = relay.const(4.0, "float32")
body = relay.Let(y, x, x)
body = relay.Let(x, d, body)
body = run_opt_pass(body, transform.InferType())
check_eval(body, 4)
def expected():
x = relay.Var("x")
y = relay.Var("y")
d = relay.const(4.0, "float32")
body = relay.Let(y, x, y)
body = relay.Let(x, d, body)
return body
opt_body = run_opt_pass(body, transform.ToBasicBlockNormalForm())
expected_body = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(opt_body, expected_body)
check_basic_block_normal_form(opt_body)
def test_let3():
x = relay.Var("x")
y = relay.Var("y")
z = relay.Var("z")
c = relay.const(3.0, "float32")
d = relay.const(4.0, "float32")
body = relay.Let(z, x + y, x + z)
body = relay.Let(x, d, body)
body = relay.Let(y, c, body)
body = run_opt_pass(body, transform.InferType())
opt_body = run_opt_pass(body, transform.ToBasicBlockNormalForm())
assert tvm.ir.structural_equal(body, opt_body)
check_basic_block_normal_form(opt_body)
test_let1()
test_let1_1()
test_let2()
test_let3()
def test_function():
t = relay.TensorType((), "float32")
x = relay.Var("x", t)
f = relay.Function([x], x + x)
d = relay.const(4.0, "float32")
bblock = run_opt_pass(f, transform.ToBasicBlockNormalForm())
assert isinstance(bblock, relay.Function)
check_eval(f(d), 8)
check_eval(bblock(d), 8)
check_basic_block_normal_form(bblock)
def test_gradient_if():
x = relay.var("a", shape=(1, 16))
y = relay.var("y", shape=(1, 16))
cond = relay.var("cond", shape=(), dtype="uint1")
net = relay.If(cond, x, x)
net = relay.add(x, net)
net = relay.Function([cond, x, y], net)
mod = tvm.IRModule.from_expr(net)
mod = relay.transform.ToBasicBlockNormalForm()(mod)
mod = relay.transform.InferType()(mod)
net_grad = relay.transform.gradient(mod["main"], mode="higher_order")
mod["main"] = net_grad
mod_grad = relay.transform.ToBasicBlockNormalForm()(mod)
check_basic_block_normal_form(mod_grad["main"])
check_basic_block_normal_form(mod["main"])
def test_if():
def if_expr(x):
"""
free_var %x: float32
%0 = equal(%x, 2f);
if (%0) {
%1 = add(%x, 1f);
multiply(%1, 2f)
} else {
multiply(%1, 1f)
}
"""
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
v1 = relay.add(x, one)
v2 = relay.equal(x, two)
true_branch = relay.multiply(v1, two)
false_branch = relay.multiply(v1, one)
body = relay.If(v2, true_branch, false_branch)
return body
def expected_if_expr(x):
"""
free_var %x: float32
let %v1: float32 = add(%x, 1f /* ty=float32 */) /* ty=float32 */;
%0 = equal(%x, 2f /* ty=float32 */) /* ty=bool */;
if (%0) {
multiply(%v1, 2f /* ty=float32 */) /* ty=float32 */
} else {
multiply(%v1, 1f /* ty=float32 */) /* ty=float32 */
}
"""
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
v1 = relay.var("v1")
v2 = relay.equal(x, two)
true_branch = relay.multiply(v1, two)
false_branch = relay.multiply(v1, one)
body = relay.If(v2, true_branch, false_branch)
body = relay.Let(v1, relay.add(x, one), body)
return body
x = relay.var("x", shape=(), dtype="float32")
body = if_expr(x)
expected_body = expected_if_expr(x)
bblock = run_opt_pass(body, [transform.ToBasicBlockNormalForm(), transform.InferType()])
expected_bblock = run_opt_pass(expected_body, transform.InferType())
assert tvm.ir.structural_equal(bblock, expected_bblock, map_free_vars=True)
check_basic_block_normal_form(bblock)
func = relay.Function([x], body)
expected_func = relay.Function([x], expected_body)
bblock = run_opt_pass(func, [transform.ToBasicBlockNormalForm(), transform.InferType()])
expected_bblock = run_opt_pass(expected_func, transform.InferType())
assert tvm.ir.structural_equal(bblock, expected_bblock)
check_basic_block_normal_form(bblock)
def test_higher_order_return():
x = relay.var("x", shape=(1,), dtype="float32") # , a)
y = relay.var("y", shape=(1,), dtype="float32") # , a)
z = relay.var("z", shape=(1,), dtype="float32") # , a)
x2 = relay.add(x, x)
func_a = relay.Function([y], relay.add(x2, y)) # , a, [a])
func_b = relay.Function([z], relay.add(x2, z)) # , a, [a])
body = relay.Tuple([func_a, func_b])
body = relay.Function([x], body)
"""
fn (%x: Tensor[(1), float32]) {
%1 = fn (%y: Tensor[(1), float32]) {
%0 = add(%x, %x);
add(%0, %y)
};
%2 = fn (%z: Tensor[(1), float32]) {
add(%0, %z)
};
(%1, %2)
}
"""
bblock = run_opt_pass(body, transform.ToBasicBlockNormalForm())
check_basic_block_normal_form(bblock)
def test_higher_order_nested():
x = relay.var("x", dtype="float32", shape=(1,))
s = relay.var("s", dtype="float32", shape=(1,))
shared = relay.add(s, s)
func_true = relay.Function([x], relay.add(x, shared))
choice_t = relay.FuncType([], relay.scalar_type("bool"))
f = relay.Var("f", choice_t)
z = relay.Var("z")
body = relay.If(f(), func_true, relay.Function([z], relay.add(z, shared)))
top = relay.Function([f, s], body)
"""
fn (%f: fn () -> bool, %s: Tensor[(1), float32]) {
%0 = %f();
if (%0) {
fn (%x: Tensor[(1), float32]) {
%1 = add(%s, %s);
add(%x, %1)
}
} else {
fn (%z) {
add(%z, %1)
}
}
}
"""
bblock = run_opt_pass(top, transform.ToBasicBlockNormalForm())
check_basic_block_normal_form(bblock)
def test_immutability():
simple_net = relay.nn.conv2d(
data=relay.var("data", relay.TensorType((1, 3, 224, 224), "float32")),
weight=relay.var("weight"),
kernel_size=(5, 5),
channels=3,
padding=(1, 1),
)
simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net)
mod, _ = create_workload(simple_net)
old_mod = mod
with tvm.transform.PassContext(opt_level=4):
with tvm.target.Target("llvm"):
seq = tvm.transform.Sequential(passes=[transform.ToBasicBlockNormalForm()], opt_level=4)
new_mod = seq(mod)
assert old_mod.astext() == mod.astext()
assert old_mod.astext() != new_mod.astext()
if __name__ == "__main__":
tvm.testing.main()
| 16,688 | 31.218147 | 100 | py |
tvm | tvm-main/tests/python/relay/test_op_grad_level2.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
from tvm import topi
import tvm.topi.testing
import tvm
from tvm import te
from tvm import relay
from tvm.relay.testing import check_grad, run_infer_type, run_opt_pass
from tvm.relay.transform import gradient
import tvm.testing
executor_kind = tvm.testing.parameter("debug")
def verify_max_pool2d_grad(executor_kind, x_shape, pool_size, strides, padding, ceil_mode):
x = relay.var("x", relay.TensorType(x_shape, "float32"))
y = tvm.relay.nn.max_pool2d(
x, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode
)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data = np.random.rand(*x_shape).astype("float32")
ph, pw = padding
y_shape = topi.utils.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
ref_grad = tvm.topi.testing.pool_grad_nchw(
data,
out_grad,
pool_size=pool_size,
strides=strides,
padding=[ph, pw, ph, pw],
pool_type="max",
ceil_mode=ceil_mode,
)
for target, dev in tvm.testing.enabled_targets():
op_res, (op_grad,) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
@tvm.testing.uses_gpu
def test_max_pool2d_grad(executor_kind):
verify_max_pool2d_grad(
executor_kind,
(1, 4, 16, 16),
pool_size=(2, 2),
strides=(2, 2),
padding=(0, 0),
ceil_mode=False,
)
verify_max_pool2d_grad(
executor_kind,
(1, 4, 16, 16),
pool_size=(1, 1),
strides=(1, 1),
padding=(1, 1),
ceil_mode=False,
)
def verify_avg_pool2d_grad(
x_shape,
pool_size,
strides,
padding,
ceil_mode,
count_include_pad,
executor_kind,
dtype="float32",
):
for shape_dtype in ["int32", "int64"]:
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in x_shape], dtype=dtype)
y = tvm.relay.nn.avg_pool2d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data = np.random.rand(*x_shape).astype(dtype)
ph, pw = padding
y_shape = topi.utils.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
ref_grad = tvm.topi.testing.pool_grad_nchw(
data,
out_grad,
pool_size=pool_size,
strides=strides,
padding=[ph, pw, ph, pw],
pool_type="avg",
ceil_mode=ceil_mode,
)
for target, dev in tvm.testing.enabled_targets():
op_res, (op_grad,) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
@tvm.testing.uses_gpu
def test_avg_pool2d_grad(executor_kind):
verify_avg_pool2d_grad(
(1, 4, 16, 16),
pool_size=(2, 2),
strides=(2, 2),
padding=(0, 0),
ceil_mode=False,
count_include_pad=True,
executor_kind=executor_kind,
)
verify_avg_pool2d_grad(
(1, 4, 16, 16),
pool_size=(1, 1),
strides=(1, 1),
padding=(1, 1),
ceil_mode=False,
count_include_pad=False,
executor_kind=executor_kind,
)
verify_avg_pool2d_grad(
(1, 4, 16, 16),
pool_size=(1, 1),
strides=(1, 1),
padding=(1, 1),
ceil_mode=False,
count_include_pad=False,
executor_kind=executor_kind,
dtype="float16",
)
def verify_global_avg_pool2d_grad(executor_kind, x_shape):
x = relay.var("x", relay.TensorType(x_shape, "float32"))
y = tvm.relay.nn.global_avg_pool2d(x)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data = np.random.rand(*x_shape).astype("float32")
y_shape = topi.utils.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
ref_grad = tvm.topi.testing.pool_grad_nchw(
data,
out_grad,
pool_size=(x_shape[2], x_shape[3]),
strides=(1, 1),
padding=[0, 0, 0, 0],
pool_type="avg",
ceil_mode=False,
)
for target, dev in tvm.testing.enabled_targets():
op_res, (op_grad,) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
@tvm.testing.uses_gpu
def test_global_avg_pool2d_grad(executor_kind):
verify_global_avg_pool2d_grad(executor_kind, (1, 4, 16, 16))
verify_global_avg_pool2d_grad(executor_kind, (1, 8, 8, 24))
def verify_conv2d_grad(
dshape, wshape, strides, padding, dilation, groups=1, mode="higher_order", executor_kind="vm"
):
dtype = "float32"
data = relay.var("data", shape=dshape, dtype=dtype)
weight = relay.var("weight", shape=wshape, dtype=dtype)
conv = relay.nn.conv2d(
data,
weight,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
out_dtype=dtype,
)
fwd_func = relay.Function([data, weight], conv)
check_grad(fwd_func, mode=mode, executor_kind=executor_kind)
@tvm.testing.uses_gpu
def test_conv2d_grad(executor_kind):
verify_conv2d_grad(
(1, 4, 16, 16), (16, 4, 3, 3), [1, 1], [1, 1], [1, 1], executor_kind=executor_kind
)
verify_conv2d_grad(
(1, 4, 16, 16), (16, 4, 1, 1), [1, 1], [0, 0], [1, 1], executor_kind=executor_kind
)
verify_conv2d_grad(
(1, 4, 16, 16), (16, 4, 1, 1), [2, 2], [0, 0], [1, 1], executor_kind=executor_kind
)
verify_conv2d_grad(
(1, 4, 16, 16),
(16, 4, 3, 3),
[1, 1],
[1, 1],
[1, 1],
mode="first_order",
executor_kind=executor_kind,
)
def verify_dense_grad(d_shape, w_shape, executor_kind):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
weight = relay.var("weight", relay.TensorType(w_shape, "float32"))
fwd_func = relay.Function([data, weight], relay.nn.dense(data, weight))
check_grad(fwd_func, executor_kind=executor_kind)
def test_dense_grad(executor_kind):
verify_dense_grad((1, 8), (16, 8), executor_kind)
verify_dense_grad((1, 4), (3, 4), executor_kind)
verify_dense_grad((5, 4), (3, 4), executor_kind)
def verify_matmul_grad(a_shape, b_shape, transpose_a, transpose_b, executor_kind):
tensor_a = relay.var("tensor_a", relay.TensorType(a_shape, "float32"))
tensor_b = relay.var("tensor_b", relay.TensorType(b_shape, "float32"))
fwd_func = relay.Function(
[tensor_a, tensor_b],
relay.nn.matmul(tensor_a, tensor_b, transpose_a=transpose_a, transpose_b=transpose_b),
)
check_grad(fwd_func, executor_kind=executor_kind)
def test_matmul_grad(executor_kind):
verify_matmul_grad((1, 8), (8, 16), False, False, executor_kind)
verify_matmul_grad((4, 1), (4, 3), True, False, executor_kind)
verify_matmul_grad((4, 5), (3, 4), True, True, executor_kind)
def verify_batch_flatten_grad(d_shape, executor_kind):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
fwd_func = relay.Function([data], relay.nn.batch_flatten(data))
check_grad(fwd_func, executor_kind=executor_kind)
def test_batch_flatten_grad(executor_kind):
verify_batch_flatten_grad((1, 2, 3, 4), executor_kind)
verify_batch_flatten_grad((1, 8), executor_kind)
def verify_conv2d_backward_weight(
executor_kind, dy_shape, x_shape, kernel_size, stride, padding, groups=1, out_channels=None
):
dtype = "float32"
dy = relay.var("dy", shape=dy_shape, dtype=dtype)
x = relay.var("x", shape=x_shape, dtype=dtype)
dw_func = relay.Function(
[dy, x],
relay.nn.conv2d_backward_weight(
dy,
x,
strides=stride,
padding=padding,
kernel_size=kernel_size,
groups=groups,
channels=out_channels,
out_dtype=dtype,
),
)
dw_func_legalized = run_opt_pass(dw_func, relay.transform.Legalize())
for dw, target in [(dw_func_legalized, "llvm"), (dw_func, "cuda -libs=cudnn")]:
if "cudnn" in target and not tvm.contrib.cudnn.exists():
continue
dev = tvm.device(target, 0)
dy_np = np.random.randn(*dy_shape).astype(dtype)
x_np = np.random.randn(*x_shape).astype(dtype)
dw_np = (
relay.create_executor(executor_kind, device=dev, target=target)
.evaluate(dw)(dy_np, x_np)
.numpy()
)
ref_dw_np = tvm.topi.testing.conv2d_backward_weight_python(
dy_np, x_np, kernel_size, stride, padding, groups=groups, channels=out_channels
)
np.testing.assert_allclose(dw_np, ref_dw_np, rtol=1e-4, atol=1e-4)
def test_conv2d_backward_weight(executor_kind):
verify_conv2d_backward_weight(
executor_kind, (2, 8, 32, 32), (2, 4, 32, 32), (3, 3), (1, 1), (1, 1)
)
verify_conv2d_backward_weight(
executor_kind, (2, 16, 15, 15), (2, 3, 32, 32), (3, 3), (2, 2), (0, 0)
)
verify_conv2d_backward_weight(
executor_kind,
(1, 16, 32, 32),
(1, 16, 32, 32),
(3, 3),
(1, 1),
(1, 1),
groups=16,
out_channels=16,
)
def test_conv2d_backward_weight_infer_type():
# From https://github.com/apache/tvm/pull/10439
depthwise_conv_code = """
fn (%input0: Tensor[(1, 3, 32, 32), float32], %v0_weight: Tensor[(3, 1, 3, 3), float32], %v0_bias: Tensor[(3), float32]) {
%0 = nn.conv2d(%input0, %v0_weight, padding=[1, 1, 1, 1], groups=3, channels=3, kernel_size=[3, 3]);
nn.bias_add(%0, %v0_bias)
}
"""
normal_conv_code = """
fn (%input0: Tensor[(1, 3, 32, 32), float32], %v0_weight: Tensor[(3, 3, 3, 3), float32], %v0_bias: Tensor[(3), float32]) {
%0 = nn.conv2d(%input0, %v0_weight, padding=[1, 1, 1, 1], groups=1, channels=3, kernel_size=[3, 3]);
nn.bias_add(%0, %v0_bias)
}
"""
SEMVER = '#[version = "0.0.5"]\n'
for code in [normal_conv_code, depthwise_conv_code]:
expr = tvm.relay.parse_expr(SEMVER + code)
fmod = tvm.IRModule.from_expr(expr)
mod = relay.transform.InferType()(fmod)
bwd_expr = relay.transform.gradient(mod["main"], mode="first_order")
bwd_mod = tvm.IRModule.from_expr(bwd_expr)
bwd_mod = relay.transform.InferType()(bwd_mod)
if __name__ == "__main__":
tvm.testing.main()
| 11,840 | 31.17663 | 126 | py |
tvm | tvm-main/tests/python/relay/test_target_hooks.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for target hooks."""
import sys
import numpy as np
import pytest
import logging
import tvm
import tvm.testing
from tvm import relay, IRModule
from utils.external_codegen import (
parametrize_external_codegen_checks,
set_external_func_attr,
check_aot_executor_result,
check_graph_executor_result,
check_vm_result,
)
logging.basicConfig(level=logging.INFO)
@parametrize_external_codegen_checks
def test_tir_external_generation_inline_without_target_instance(check_result):
shape = (8,)
x_data = np.random.randint(255, size=shape).astype("float32")
y_data = np.random.randint(255, size=shape).astype("float32")
inputs = {"x": x_data, "y": y_data}
x0 = relay.var("x0", shape=shape, dtype="float32")
y0 = relay.var("y0", shape=shape, dtype="float32")
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "example_target_hook", "replace_add_with_subtract")
x = relay.var("x", shape=(8,), dtype="float32")
y = relay.var("y", shape=(8,), dtype="float32")
call = relay.Call(f, [x, y])
func = IRModule.from_expr(call)
check_result(func, inputs, (8,), x_data - y_data)
# TODO(mbs): The check_aot_executor_result does not support list-of-targets, mostly because
# tvm.testing.aot.compile_and_run requires the target to be a kind name string, and
# tvm.testing.aot.compile_models requires a single Target object. However, code outside of
# tvm.testing.aot is ready for this more general form.
@pytest.mark.parametrize("check_result", [check_graph_executor_result, check_vm_result])
def test_tir_external_generation_outline_with_target_instance(check_result):
shape = (8,)
x_data = np.random.randint(255, size=shape).astype("float32")
y_data = np.random.randint(255, size=shape).astype("float32")
inputs = {"x": x_data, "y": y_data}
# Compile with an instance of the hooked target kind to demonstrate plumbing target attributes
# into custom passes.
host_target = tvm.target.Target("llvm")
generic_target = tvm.target.Target("llvm", host=host_target)
extern_codegen_target = tvm.target.Target(
"example_target_hook -example_attribute=42", host=host_target
)
mod = tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(8), float32], %y: Tensor[(8), float32]) -> Tensor[(8), float32] {
@replace_add_with_subtract(%x, %y) * 2.0f
}
def @replace_add_with_subtract(%x: Tensor[(8), float32], %y: Tensor[(8), float32],
Inline=1,
Primitive=1,
Compiler="example_target_hook",
global_symbol="replace_add_with_subtract") -> Tensor[(8), float32] {
%x + %y // will be rewritten to TIR implementing %x - %y - 42.0f by custom pass
}
"""
)
check_result(
mod,
inputs,
(8,),
(x_data - y_data - 42.0) * 2.0,
target=[generic_target, extern_codegen_target],
)
@pytest.mark.parametrize("check_result", [check_aot_executor_result, check_graph_executor_result])
def test_runtime_module_generation(check_result):
shape = (8,)
x_data = np.random.randint(255, size=shape).astype("float32")
y_data = np.random.randint(255, size=shape).astype("float32")
inputs = {"x": x_data, "y": y_data}
x0 = relay.var("x0", shape=shape, dtype="float32")
y0 = relay.var("y0", shape=shape, dtype="float32")
z = x0 + y0
func = relay.Function([x0, y0], z)
func = set_external_func_attr(func, "example_target_hook", "replace_add_with_subtract")
# Test hook to trigger TIRToRuntime code generation
func = func.with_attr("tir_to_runtime", True)
x = relay.var("x", shape=(8,), dtype="float32")
y = relay.var("y", shape=(8,), dtype="float32")
call = relay.Call(func, [x, y])
func = IRModule.from_expr(call)
check_result(func, inputs, (8,), x_data * y_data)
if __name__ == "__main__":
tvm.testing.main()
| 4,914 | 37.700787 | 111 | py |
tvm | tvm-main/tests/python/relay/test_analysis_get_calibration_data.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.relay.testing
from tvm import relay
from tvm.relay import transform
from tvm.relay.analysis import get_calibration_data
def check_data_size(mod, data):
assert len(data) == len(mod.functions) - 1
for key, value in mod.functions.items():
if key.name_hint != "main":
assert len(data[key]["inputs"]) == len(value.params)
if isinstance(value.body, relay.Tuple):
assert len(data[key]["outputs"]) == len(value.body.fields)
else:
assert len(data[key]["outputs"]) == 1
def test_simple_graph():
# A module with two subgraphs
mod = tvm.IRModule()
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
z0 = x0 + y0
z1 = x0 - y0
z2 = relay.Tuple((z0, z1))
f0 = relay.Function([x0, y0], z2)
f0 = f0.with_attr("Compiler", "test_graph")
g0 = relay.GlobalVar("g0")
mod[g0] = f0
mod = relay.transform.InferType()(mod)
x1 = relay.var("x1", shape=(8, 8))
y1 = relay.var("y1", shape=(8, 8))
z1 = x1 - y1
f1 = relay.Function([x1, y1], z1)
f1 = f1.with_attr("Compiler", "test_graph")
g1 = relay.GlobalVar("g1")
mod[g1] = f1
mod = relay.transform.InferType()(mod)
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
z = relay.var("z", shape=(8, 8))
c0 = relay.Call(g0, [x, y])
c1 = relay.Call(g1, [relay.TupleGetItem(c0, 0), z])
fm = relay.Function([x, y, z], c1)
mod["main"] = fm
mod = relay.transform.InferType()(mod)
x_data = np.random.rand(8, 8).astype("float32")
y_data = np.random.rand(8, 8).astype("float32")
z_data = np.random.rand(8, 8).astype("float32")
data = get_calibration_data(mod, {"x": x_data, "y": y_data, "z": z_data})
# Check the number and orders
check_data_size(mod, data)
tvm.testing.assert_allclose(data[g0]["inputs"][0].numpy(), x_data)
tvm.testing.assert_allclose(data[g0]["inputs"][1].numpy(), y_data)
tvm.testing.assert_allclose(data[g0]["outputs"][0].numpy(), x_data + y_data)
tvm.testing.assert_allclose(data[g0]["outputs"][1].numpy(), x_data - y_data)
tvm.testing.assert_allclose(data[g1]["inputs"][0].numpy(), x_data + y_data)
tvm.testing.assert_allclose(data[g1]["inputs"][1].numpy(), z_data)
tvm.testing.assert_allclose(data[g1]["outputs"][0].numpy(), x_data + y_data - z_data)
def test_mobilenet_dnnl():
if not tvm.get_global_func("relay.ext.dnnl", True):
print("skip because DNNL codegen is not available")
return
dtype = "float32"
ishape = (1, 3, 224, 224)
mod, params = relay.testing.mobilenet.get_workload(batch_size=1, dtype="float32")
mod = transform.AnnotateTarget(["dnnl"])(mod)
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
data = get_calibration_data(mod, {"data": i_data, **params})
# Check the number and orders
check_data_size(mod, data)
if __name__ == "__main__":
test_simple_graph()
test_mobilenet_dnnl()
| 3,913 | 34.581818 | 89 | py |
tvm | tvm-main/tests/python/relay/test_pass_fold_explicit_padding.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import run_opt_pass
import numpy as np
def test_simplify_conv_pad():
convs = [relay.nn.conv1d, relay.nn.conv2d, relay.nn.conv3d]
def validate(ndim, pad_width, pad_value, pad_mode, orig_padding, layout, no_fold=False):
if layout[1] == "C":
shape = [1, 3] + [10] * ndim
wshape = [8, 3] + [3] * ndim
elif layout[-1] == "C":
shape = [1] + [10] * ndim + [3]
wshape = [8] + [3] * ndim + [3]
else:
raise ValueError("This test only supports NC* and N*C")
x = relay.var("x", shape=shape, dtype="float32")
w = relay.var("w", shape=wshape, dtype="float32")
pad = relay.nn.pad(x, pad_width, pad_value, pad_mode)
if layout[1] == "C":
conv = convs[ndim - 1](pad, w, padding=orig_padding)
else:
conv = convs[ndim - 1](
pad, w, padding=orig_padding, data_layout=layout, kernel_layout="DHWIO"[3 - ndim :]
)
if pad_mode == "constant" and pad_value == 0:
new_padding = []
for j in range(2):
for i in range(len(pad_width)):
if layout[i] in ["D", "H", "W"]:
new_padding.append(pad_width[i][j])
for i in range(len(new_padding)):
new_padding[i] += orig_padding[i]
if layout[1] == "C":
after = convs[ndim - 1](x, w, padding=new_padding)
else:
after = convs[ndim - 1](
x, w, padding=new_padding, data_layout=layout, kernel_layout="DHWIO"[3 - ndim :]
)
else:
after = conv
zz = run_opt_pass(conv, transform.FoldExplicitPadding())
expected = run_opt_pass(after, transform.InferType())
assert tvm.ir.structural_equal(zz, expected)
mod1 = tvm.IRModule.from_expr(conv)
mod2 = tvm.IRModule.from_expr(zz)
if not no_fold:
op_freqs = relay.analysis.list_op_freqs(mod2)
assert "nn.pad" not in op_freqs
with tvm.transform.PassContext():
func1 = relay.create_executor(
"vm", mod=mod1, device=tvm.cpu(), target="llvm"
).evaluate()
func2 = relay.create_executor("vm", mod=mod2, device=tvm.cpu(), target="llvm").evaluate()
x_np = np.random.rand(*shape).astype("float32")
w_np = np.random.rand(*wshape).astype("float32")
result1 = func1(x_np, w_np)
result2 = func2(x_np, w_np)
tvm.testing.assert_allclose(result1.numpy(), result2.numpy(), rtol=1e-5, atol=1e-5)
# Test fold cases
for orig_pad in [[0, 0], [2, 0], [0, 2]]:
for i_pad in [[0, 0], [1, 1], [1, 0]]:
for ndim in [1, 2, 3]:
for channels_last in [0, 1]:
if channels_last:
layout = "NDHWC"
layout = layout[0:1] + layout[4 - ndim : 4] + layout[-1:]
padding = [[0, 0]] + [i_pad] * ndim + [[0, 0]]
else:
layout = "NCDHW"
layout = layout[0:2] + layout[5 - ndim :]
padding = [[0, 0]] * 2 + [i_pad] * ndim
validate(ndim, padding, 0, "constant", orig_pad * ndim, layout)
# Test no fold cases
ndim = 2
# Conv only folds when pad_value=0
validate(
ndim, [[0, 0]] * 2 + [i_pad] * ndim, 1, "constant", orig_pad * ndim, "NCHW", no_fold=True
)
# Conv only folds when pad's pad_mode="constant"
validate(ndim, [[0, 0]] * 2 + [i_pad] * ndim, 0, "edge", orig_pad * ndim, "NCHW", no_fold=True)
def get_min_value(dtype):
if np.issubdtype(dtype, np.floating):
return np.finfo(dtype).min
elif np.issubdtype(dtype, np.integer):
return np.iinfo(dtype).min
else:
raise ValueError("Cannot get min value for dtypes that are not integer or not floating")
def test_simplify_pool_pad():
max_pools = [relay.nn.max_pool1d, relay.nn.max_pool2d, relay.nn.max_pool3d]
avg_pools = [relay.nn.avg_pool1d, relay.nn.avg_pool2d, relay.nn.avg_pool3d]
def validate(
pools,
ndim,
pad_width,
pad_value,
orig_padding,
layout,
pool_size,
pad_mode="constant",
dtype="float32",
no_fold=False,
**kwargs,
):
pad_value_const = relay.const(pad_value, dtype=dtype)
if layout[1] == "C":
shape = [1, 3] + [10] * ndim
elif layout[-1] == "C":
shape = [1] + [10] * ndim + [3]
else:
raise ValueError("This test only supports NC* and N*C")
x = relay.var("x", shape=shape, dtype=dtype)
pad = relay.nn.pad(x, pad_width, pad_value_const, pad_mode)
if layout[1] == "C":
pool = pools[ndim - 1](pad, padding=orig_padding, pool_size=pool_size, **kwargs)
else:
pool = pools[ndim - 1](
pad, padding=orig_padding, layout=layout, pool_size=pool_size, **kwargs
)
if pools == max_pools:
foldable_pad_value = get_min_value(dtype)
else:
foldable_pad_value = 0
if pad_mode == "constant" and pad_value == foldable_pad_value:
new_padding = []
for j in range(2):
for i in range(len(pad_width)):
if layout[i] in ["D", "H", "W"]:
new_padding.append(pad_width[i][j])
for i in range(len(new_padding)):
new_padding[i] += orig_padding[i]
if pools == avg_pools and all(v == 0 for v in orig_padding):
# If the orig padding for AvgPool is all zero and the pad op to fold
# has non-zero pad width, the resultant folded AvgPool will have
# count_include_pad=True so AvgPool's divisor is agnostic of pad boundaries
kwargs["count_include_pad"] = True
if layout[1] == "C":
after = pools[ndim - 1](x, padding=new_padding, pool_size=pool_size, **kwargs)
else:
after = pools[ndim - 1](
x, padding=new_padding, layout=layout, pool_size=pool_size, **kwargs
)
else:
after = pool
zz = run_opt_pass(pool, transform.FoldExplicitPadding())
expected = run_opt_pass(after, transform.InferType())
assert tvm.ir.structural_equal(zz, expected)
mod1 = tvm.IRModule.from_expr(pool)
mod2 = tvm.IRModule.from_expr(zz)
if not no_fold:
op_freqs = relay.analysis.list_op_freqs(mod2)
assert "nn.pad" not in op_freqs
with tvm.transform.PassContext():
func1 = relay.create_executor(
"vm", mod=mod1, device=tvm.cpu(), target="llvm"
).evaluate()
func2 = relay.create_executor("vm", mod=mod2, device=tvm.cpu(), target="llvm").evaluate()
x_np = np.random.rand(*shape).astype(dtype)
result1 = func1(x_np)
result2 = func2(x_np)
tvm.testing.assert_allclose(result1.numpy(), result2.numpy(), rtol=1e-5, atol=1e-5)
# Test fold cases
float_min_val = get_min_value("float32")
for orig_pad in [[0, 0], [2, 0]]:
for i_pad in [[1, 1], [1, 0]]:
for ndim in [1, 2, 3]:
for channels_last in [0, 1]:
if channels_last:
layout = "NDHWC"
layout = layout[0:1] + layout[4 - ndim : 4] + layout[-1:]
padding = [[0, 0]] + [i_pad] * ndim + [[0, 0]]
else:
layout = "NCDHW"
layout = layout[0:2] + layout[5 - ndim :]
padding = [[0, 0]] * 2 + [i_pad] * ndim
validate(max_pools, ndim, padding, float_min_val, orig_pad * ndim, layout, 2)
# Check Pool pad folding when pad width on pad op is all zero.
validate(max_pools, 1, [[0, 0], [0, 0], [0, 0]], float_min_val, [2, 0], "NCW", 2)
# Check MaxPool pad folding with uint dtype
int_min_val = get_min_value("uint8")
validate(
max_pools,
2,
[[0, 0], [0, 0], [0, 2], [2, 0]],
int_min_val,
[2, 0, 0, 0],
"NCHW",
2,
dtype="uint8",
)
# Fold when original AvgPool has its own padding but count_include_pad=True
validate(
avg_pools,
2,
[[0, 0], [0, 0], [0, 2], [2, 0]],
0,
[0, 0, 1, 0],
"NCHW",
2,
count_include_pad=True,
)
# Fold when count_include_pad=False but original AvgPool has no orig padding
validate(avg_pools, 2, [[0, 0], [0, 0], [0, 2], [2, 0]], 0, [0, 0, 0, 0], "NCHW", 2)
# Test no fold cases
# AvgPool only folds pad when count_include_pad (False by default) is True
validate(
avg_pools, 2, [[0, 0], [0, 0], [0, 2], [2, 0]], 0, [0, 0, 0, 0], "NCHW", 2, no_fold=True
)
# MaxPool only folds pad when pad_value is the min for its dtype
validate(max_pools, 1, [[0, 0], [0, 0], [0, 2]], 0, [0, 0], "NCHW", 2, no_fold=True)
# AvgPool only folds pad when pad_value=0
validate(avg_pools, 1, [[0, 0], [0, 0], [0, 2]], 1, [0, 0], "NCHW", 2, no_fold=True)
# Pools only fold when pad_mode="constant"
validate(
avg_pools, 1, [[0, 0], [0, 0], [0, 2]], 0, [0, 0], "NCHW", 2, pad_mode="edge", no_fold=True
)
def test_fold_pad_qconv2d():
def before():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
input_zero_point = 10
pad = relay.nn.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], pad_value=input_zero_point)
return relay.qnn.op.conv2d(
pad,
weight,
relay.const(input_zero_point, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(0, 0),
data_layout="NHWC",
kernel_layout="HWIO",
)
def expected():
x = relay.var("x", shape=(1, 56, 56, 64), dtype="int8")
weight = relay.var("weight", shape=(3, 3, 64, 64), dtype="int8")
input_zero_point = 10
return relay.qnn.op.conv2d(
x,
weight,
relay.const(input_zero_point, "int32"),
relay.const(1, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=64,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
a = run_opt_pass(before(), relay.transform.FoldExplicitPadding())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b, map_free_vars=True), "Actual = \n" + str(a)
def test_pad_qconv2d_no_fold():
def get_expr():
x = relay.var("x", shape=(1, 1, 2, 2), dtype="int8")
weight = relay.var("weight", shape=(1, 1, 2, 2), dtype="int8")
# Pad value and input zp are not equal
pad_value = 1
input_zero_point = 0
pad = relay.nn.pad(x, [[0, 0], [0, 0], [1, 1], [1, 1]], pad_value=pad_value)
return relay.qnn.op.conv2d(
pad,
weight,
relay.const(input_zero_point, "int32"),
relay.const(0, "int32"),
relay.const(1, "float32"),
relay.const(1, "float32"),
channels=1,
kernel_size=(2, 2),
padding=(0, 0),
)
a = run_opt_pass(get_expr(), relay.transform.FoldExplicitPadding())
b = run_opt_pass(get_expr(), transform.InferType())
assert tvm.ir.structural_equal(a, b, map_free_vars=True), (
"\nActual = \n" + str(a) + "\nExpected = \n" + str(b)
)
if __name__ == "__main__":
test_simplify_conv_pad()
test_simplify_pool_pad()
test_fold_pad_qconv2d()
test_pad_qconv2d_no_fold()
| 12,994 | 36.234957 | 100 | py |
tvm | tvm-main/tests/python/relay/test_memory_passes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
import tvm
from tvm import te
import numpy as np
from tvm import relay
def check_memory_plan(func, check_fn):
# Build Module
mod = tvm.IRModule().from_expr(func)
# Convert arguments.
args = []
for param in func.params:
param = param.type_annotation
sh = [int(sh) for sh in param.shape]
data = np.random.rand(*sh).astype(param.dtype)
args.append(tvm.nd.array(data))
# TODO(mbs): Why does the executor need to be shared? Seems wrong.
ex = relay.create_executor("vm", mod)
# Compute without memory planning.
no_plan_result = ex.evaluate()(*args)
# Compute with memory planning.
with tvm.transform.PassContext(opt_level=1, disabled_pass=["MemoryPlan"]):
plan_result = ex.evaluate()(*args)
# Compute Python result.
py_res = check_fn(*[arg.numpy() for arg in args])
# First check that the two VM results agree.
np.testing.assert_allclose(no_plan_result.numpy(), plan_result.numpy())
# Finally check that the results match the Python result.
np.testing.assert_allclose(plan_result.numpy(), py_res)
def storage_type(mod):
return relay.TypeCall(mod.get_global_type_var("Storage"), [])
def test_tyck_alloc_storage():
mod = tvm.IRModule()
mod.import_from_std("core.rly")
def test_tyck_alloc_tensor():
mod = tvm.IRModule()
mod.import_from_std("core.rly")
sto = relay.Var("x", storage_type(mod))
sh = relay.const(np.array([1, 2]), dtype="int64")
at = relay.op.memory.alloc_tensor(sto, relay.const(0, dtype="int64"), sh)
mod["main"] = relay.Function([sto], at)
relay.transform.InferType()(mod)
def check_add(x):
return x + x
def test_add():
x = relay.var("x", shape=(2,))
z = x + x
func = relay.Function(
[
x,
],
z,
)
check_memory_plan(func, check_add)
def check_add_sub(x, y):
z = x + x
return z - y
def test_add_sub():
x = relay.var("x", shape=(10,))
y = relay.var("y", shape=(10,))
z = x + x
z = z - y
func = relay.Function([x, y], z)
check_memory_plan(func, check_add_sub)
def check_no_fuse(x, y, w):
z = x + y
return np.matmul(z, np.transpose(w))
def test_no_fuse():
x = relay.var("x", shape=(5, 1))
y = relay.var("y", shape=(5, 1))
w = relay.var("w", shape=(5, 1))
z = x + y
out = relay.op.nn.dense(z, w)
func = relay.Function([x, y, w], out)
check_memory_plan(func, check_no_fuse)
if __name__ == "__main__":
test_tyck_alloc_tensor()
test_add()
test_add_sub()
| 3,353 | 26.268293 | 78 | py |
tvm | tvm-main/tests/python/relay/test_pass_to_cps.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.analysis import detect_feature
from tvm.relay.transform import to_cps, un_cps
from tvm.relay.analysis import Feature
from tvm.relay.prelude import Prelude
from tvm.relay.testing import make_nat_expr, rand, run_infer_type, run_opt_pass
from tvm.relay import create_executor
from tvm.relay import transform
def test_id():
x = relay.var("x", shape=[])
id = run_infer_type(relay.Function([x], x))
id_cps = run_infer_type(to_cps(id))
def test_double():
t = relay.TypeVar("t")
x = relay.var("x", t)
f = relay.var("f", relay.FuncType([t], t))
double = run_infer_type(relay.Function([f, x], f(f(x)), t, [t]))
double_cps = run_infer_type(to_cps(double))
# make sure cps work for recursion.
def test_recursion():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat_iterate = p.mod.get_global_var("nat_iterate")
shape = (10, 10)
dtype = "float32"
t = relay.TensorType(shape, dtype)
x = relay.var("x", t)
double = relay.Function([x], x + x)
i = relay.var("i", t)
func = relay.Function([i], nat_iterate(double, make_nat_expr(p, 3))(i))
mod["main"] = func
mod = relay.transform.InferType()(mod)
mod["main"] = to_cps(mod["main"], mod=mod)
mod = relay.transform.InferType()(mod)
mod["main"] = un_cps(mod["main"])
i_nd = rand(dtype, *shape)
forward = create_executor(mod=mod).evaluate()(i_nd)
tvm.testing.assert_allclose(forward.numpy(), 8 * i_nd.numpy())
# This serve as an integration test.
# It test that, given a program with reference,
# cps and pe can completely eliminate the allocation of reference.
def test_cps_pe():
def destroy_ref(x):
x = run_infer_type(x)
x = to_cps(x)
x = run_infer_type(x)
y = un_cps(x)
y = run_infer_type(y)
# TODO(mbs): Revisit once DCE can eliminate dead writes.
x = run_opt_pass(
x,
tvm.transform.Sequential(
[
transform.PartialEvaluate(),
transform.InferType(),
transform.DeadCodeElimination(inline_once=True, ignore_impurity=True),
]
),
)
assert Feature.fRefCreate not in detect_feature(x)
unit = relay.Function([], relay.const(0.0, dtype="float32"))
f_ref = relay.Var("f_ref")
one = relay.const(1.0, dtype="float32")
two = relay.const(2.0, dtype="float32")
cond = relay.var(shape=(), dtype="uint1", name_hint="cond")
true_branch = relay.RefWrite(f_ref, relay.Function([], one))
false_branch = relay.RefWrite(f_ref, relay.Function([], two))
if_expr = relay.If(cond, true_branch, false_branch)
stmt = relay.Let(
f_ref,
relay.RefCreate(unit),
relay.Let(relay.Var("x"), if_expr, relay.Call(relay.RefRead(f_ref), [])),
)
F = relay.Function([cond], stmt)
destroy_ref(F)
G = relay.Function([cond], relay.If(cond, one, two))
G = run_infer_type(G)
G = relay.transform.gradient(G)
destroy_ref(G)
x = relay.var("x", shape=(1, 16))
y = relay.var("y", shape=(1, 16))
z = relay.var("z", shape=(1, 16))
cond = relay.var("cond", shape=(), dtype="uint1")
H = relay.If(cond, x, y)
H = relay.add(H, z)
H = relay.Function([cond, x, y, z], H)
H = run_infer_type(H)
H = relay.transform.gradient(H)
destroy_ref(H)
if __name__ == "__main__":
tvm.testing.main()
| 4,313 | 32.703125 | 90 | py |
tvm | tvm-main/tests/python/relay/test_pass_simplify_expr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from math import sqrt
import pytest
import tvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import run_opt_pass, run_infer_type
import numpy as np
def test_simplify_reshape():
def before():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
y = relay.nn.conv2d(x, w, padding=(1, 1))
y = relay.reshape(y, newshape=(1, 16, -1))
y = relay.reshape(y, newshape=(4, 8, -1, 16))
y = relay.reverse_reshape(y, newshape=(32, 0, -1))
return relay.Function([x, w], y)
def expected():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
y = relay.nn.conv2d(x, w, padding=(1, 1))
y = relay.reshape(y, newshape=(32, 16, 16))
return relay.Function([x, w], y)
def symbolic():
b = tvm.te.size_var("b")
x = relay.var("x", shape=(b, 16, 16, 16), dtype="float32")
w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
y = relay.nn.conv2d(x, w, padding=(1, 1))
y = relay.reshape(y, newshape=(1, 16, -1))
y = relay.reshape(y, newshape=(4, 8, -1, 16))
y = relay.reverse_reshape(y, newshape=(32, 0, -1))
return relay.Function([x, w], y)
z = before()
zz = run_opt_pass(z, transform.SimplifyExpr())
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
z = symbolic()
zz = run_opt_pass(z, transform.SimplifyExpr())
after = run_opt_pass(symbolic(), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
def test_simplify_transpose():
# Test a series of transpose and layout_transform ops
def before1():
x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32") # NCHW
y = relay.transpose(x, axes=[0, 2, 3, 1]) # To NHWC
y = relay.layout_transform(y, "NHWC", "HWCN") # To HWCN
y = relay.transpose(y, axes=[3, 0, 1, 2]) # To NHWC
return relay.Function([x], y)
def expected1():
x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32") # NCHW
y = relay.transpose(x, axes=[0, 2, 3, 1]) # To NHWC
return relay.Function([x], y)
# Test that all transpose ops can be cancelled
def before2():
x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32") # NCHW
y = relay.nn.relu(x)
y = relay.transpose(y, axes=[0, 2, 3, 1]) # To NHWC
y = relay.transpose(y, axes=[1, 2, 3, 0]) # To HWCN
y = relay.transpose(y, axes=[3, 2, 0, 1]) # To NCHW
return relay.Function([x], y)
def expected2():
x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32") # NCHW
y = relay.nn.relu(x)
return relay.Function([x], y)
# Test default axis (reverse) and negative axis
def before3():
x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32") # NCHW
y = relay.nn.relu(x)
y = relay.transpose(y) # Reverse
y = relay.transpose(y) # Reverse
y = relay.transpose(y, axes=[0, 2, -1, 1])
y = relay.transpose(y) # Reverse
y = relay.transpose(y) # Reverse
return relay.Function([x], y)
def expected3():
x = relay.var("x", shape=(1, 3, 224, 224), dtype="float32") # NCHW
y = relay.nn.relu(x)
y = relay.transpose(y, axes=[0, 2, 3, 1])
return relay.Function([x], y)
# Test a series of transpose and rank changing layout_transform
def before4():
"""
Simplify transpose->layout_transform and its inverse.
Input:
NHWC -> NCHW -> NCHW4c -> op -> NCHW4c -> NCHW -> NHWC
Simplified:
NHWC -> NCHW4c -> op -> NCHW4c -> NHWC
"""
x = relay.var("x", shape=(1, 56, 56, 128), dtype="float32")
y = relay.transpose(x, axes=[0, 3, 1, 2])
y = relay.layout_transform(y, "NCHW", "NCHW4c")
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW4c", "NCHW")
y = relay.transpose(y, axes=[0, 2, 3, 1])
return relay.Function([x], y)
def expected4():
x = relay.var("x", shape=(1, 56, 56, 128), dtype="float32") # NHWC
y = relay.layout_transform(x, "NHWC", "NCHW4c") # To NCHW4c
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW4c", "NHWC") # To NHWC
return relay.Function([x], y)
def before5():
"""
Simplify layout_transform->layout_transform and its inverse.
Input:
NHWC -> NCHW -> NCHW4c -> op -> NCHW4c -> NCHW -> NHWC
Simplified:
NHWC -> NCHW4c -> op -> NCHW4c -> NHWC
"""
x = relay.var("x", shape=(1, 56, 56, 128), dtype="float32") # NHWC
y = relay.layout_transform(x, "NHWC", "NCHW") # To NCHW
y = relay.layout_transform(y, "NCHW", "NCHW4c") # To NCHW4c
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW4c", "NCHW") # To NCHW
y = relay.layout_transform(y, "NCHW", "NHWC") # To NHWC
return relay.Function([x], y)
def expected5():
x = relay.var("x", shape=(1, 56, 56, 128), dtype="float32") # NHWC
y = relay.layout_transform(x, "NHWC", "NCHW4c") # To NCHW4c
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW4c", "NHWC") # To NHWC
return relay.Function([x], y)
def before6():
"""
Remove trivial layout_transform->layout_transform.
Input:
NCHW -> NHWC -> NCHW -> op
Simplified:
NHWC -> op
"""
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.layout_transform(x, "NCHW", "NHWC")
y = relay.layout_transform(y, "NHWC", "NCHW")
y = relay.nn.relu(y)
return relay.Function([x], y)
def expected6():
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.nn.relu(x)
return relay.Function([x], y)
def before7():
"""
Remove trivial layout_transform->layout_transform.
Input:
NCHW4c -> NCHW8c -> NCHW4c -> op
Simplified:
NCHW4c -> op
"""
x = relay.var("x", shape=(1, 32, 56, 56, 4), dtype="float32")
y = relay.layout_transform(x, "NCHW4c", "NCHW8c")
y = relay.layout_transform(y, "NCHW8c", "NCHW4c")
y = relay.nn.relu(y)
return relay.Function([x], y)
def expected7():
x = relay.var("x", shape=(1, 32, 56, 56, 4), dtype="float32")
y = relay.nn.relu(x)
return relay.Function([x], y)
def before8():
"""
Simplify layout_transform->layout_transform with rank contraction and expansion
Input:
NCHW4c -> NCHW -> NCHW8c -> op
Simplified:
NCHW4c -> NCHW8c -> op
"""
x = relay.var("x", shape=(1, 32, 56, 56, 4), dtype="float32")
y = relay.layout_transform(x, "NCHW4c", "NCHW")
y = relay.layout_transform(y, "NCHW", "NCHW8c")
y = relay.nn.relu(y)
return relay.Function([x], y)
def expected8():
x = relay.var("x", shape=(1, 32, 56, 56, 4), dtype="float32")
y = relay.layout_transform(x, "NCHW4c", "NCHW8c")
y = relay.nn.relu(y)
return relay.Function([x], y)
def before9():
"""
Remove trivial layout_transform->layout_transform.
Input:
NCHW -> NCHW4c -> NCHW -> op
Simplified:
NCHW -> op
"""
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.layout_transform(x, "NCHW", "NCHW4c")
y = relay.layout_transform(y, "NCHW4c", "NCHW")
y = relay.nn.relu(y)
return relay.Function([x], y)
def expected9():
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.nn.relu(x)
return relay.Function([x], y)
def before10():
"""
Simplify layout_transform->layout_transform without rank change to transpose.
Input:
NCHW -> NHWC -> CHWN -> op
Simplified:
NCHW -> CHWN -> op
"""
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.layout_transform(x, "NCHW", "NHWC")
y = relay.layout_transform(y, "NHWC", "CHWN")
y = relay.nn.relu(y)
return relay.Function([x], y)
def expected10():
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.transpose(x, axes=[1, 2, 3, 0])
y = relay.nn.relu(y)
return relay.Function([x], y)
def before11():
"""
Remove trivial no op transpose ops
Input:
op1 -> relay.transpose(x, axes=[0, 1, 2, 3]) -> op2
Simplified:
op1 -> op2
"""
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.transpose(x, axes=[0, 1, 2, 3])
y = relay.nn.relu(y)
y = relay.layout_transform(y, "NCHW", "NCHW")
return relay.Function([x], y)
def expected11():
x = relay.var("x", shape=(1, 128, 56, 56), dtype="float32")
y = relay.nn.relu(x)
return relay.Function([x], y)
for before, expected in [
[before1(), expected1()],
[before2(), expected2()],
[before3(), expected3()],
[before4(), expected4()],
[before5(), expected5()],
[before6(), expected6()],
[before7(), expected7()],
[before8(), expected8()],
[before9(), expected9()],
[before10(), expected10()],
[before11(), expected11()],
]:
after = run_opt_pass(before, transform.SimplifyExpr())
expected = run_opt_pass(expected, transform.InferType())
assert tvm.ir.structural_equal(after, expected), "\nafter: {} \nexpected: {}".format(
after, expected
)
def test_simplify_full_elementwise():
def validate(shape, value, dtype):
def before_left(x, elem_op, full):
return elem_op(full, x)
def after_left(x, elem_op, value):
if elem_op == relay.add and value == 0:
return x
elif elem_op == relay.multiply and (value == 1 or (value > 1 and dtype == "bool")):
return x
return elem_op(relay.const(value, dtype), x)
def before_right(x, elem_op, full):
return elem_op(x, full)
def after_right(x, elem_op, value):
if elem_op in [relay.add, relay.subtract] and value == 0:
return x
elif elem_op in [relay.multiply, relay.divide] and (
value == 1 or (value > 1 and dtype == "bool")
):
return x
return elem_op(x, relay.const(value, dtype))
x = relay.var("x", shape=shape, dtype=dtype)
elem_ops = [relay.add, relay.multiply, relay.subtract, relay.divide]
full_ops = []
if value == 0:
full_ops.append(relay.zeros(shape, dtype))
full_ops.append(relay.zeros_like(x))
if value == 1:
full_ops.append(relay.ones(shape, dtype))
full_ops.append(relay.ones_like(x))
else:
full_ops.append(relay.full(relay.const(value, dtype), shape))
full_ops.append(relay.full_like(x, relay.const(value, dtype)))
for op in elem_ops:
for full in full_ops:
z = before_left(x, op, full)
zz = run_opt_pass(z, transform.SimplifyExpr())
after = run_opt_pass(after_left(x, op, value), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
z = before_right(x, op, full)
zz = run_opt_pass(z, transform.SimplifyExpr())
after = run_opt_pass(after_right(x, op, value), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
# Test the case in which x is broadcast to full's shape
full_ops = []
if value == 0:
full_ops.append(relay.zeros(shape * 2, dtype))
if value == 1:
full_ops.append(relay.ones(shape * 2, dtype))
else:
full_ops.append(relay.full(relay.const(value, dtype), shape * 2))
for op in elem_ops:
for full in full_ops:
z = before_left(x, op, full)
zz = run_opt_pass(z, transform.SimplifyExpr())
after = run_opt_pass(before_left(x, op, full), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
z = before_right(x, op, full)
zz = run_opt_pass(z, transform.SimplifyExpr())
after = run_opt_pass(before_right(x, op, full), transform.InferType())
assert tvm.ir.structural_equal(zz, after)
for shape in [[10], [10, 10], [10, 10, 10]]:
for dtype in ["float32", "int32", "bool"]:
for value in [0, 1, 2]:
validate(shape, value, dtype)
def test_eliminate_identity():
def check(x, y=None, do_nothing=False):
expected = run_infer_type(x)
if do_nothing:
actual = run_opt_pass(x, transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
else:
assert y is not None
actual = run_opt_pass(y, transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
shape = [2, 3, 4]
dtype = "float32"
x = relay.var("x", shape=shape, dtype=dtype)
x = run_opt_pass(x, transform.InferType())
for op, op_like, id_op, const in [
(relay.zeros, relay.zeros_like, relay.add, relay.const(0, dtype)),
(relay.ones, relay.ones_like, relay.multiply, relay.const(1, dtype)),
]:
check(x, id_op(op_like(x), x))
check(x, id_op(op(shape, dtype), x))
check(x, id_op(const, x))
check(x, id_op(op(shape[1:], dtype), x))
check(x, id_op(x, op_like(x)))
check(x, id_op(x, op(shape, dtype)))
check(x, id_op(x, const))
check(x, id_op(x, op(shape[1:], dtype)))
check(id_op(x, op([2] + shape, dtype)), do_nothing=True)
check(id_op(op([2] + shape, dtype), x), do_nothing=True)
for op, op_like, id_op, const in [
(relay.zeros, relay.zeros_like, relay.subtract, relay.const(0, dtype)),
(relay.ones, relay.ones_like, relay.divide, relay.const(1, dtype)),
]:
check(x, id_op(x, op_like(x)))
check(x, id_op(x, const))
check(x, id_op(x, op(shape, dtype)))
check(x, id_op(x, op(shape[1:], dtype)))
check(id_op(x, op([2] + shape, dtype)), do_nothing=True)
check(id_op(const, x), id_op(op(shape, dtype), x))
check(id_op(const, x), id_op(op_like(x), x))
def test_simplify_same_cast():
dtype = "int32"
data = relay.var("data", shape=(3, 4, 5), dtype=dtype)
expr1 = relay.cast(data, dtype)
dtype_like = relay.var("dtype_like", shape=(2, 2, 2), dtype=dtype)
expr2 = relay.cast_like(data, dtype_like)
expected = run_infer_type(data)
actual1 = run_opt_pass(expr1, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual1, expected)
actual2 = run_opt_pass(expr2, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual2, expected)
def test_simplify_consecutive_cast():
x = relay.var("x", shape=(3, 4, 5), dtype="int8")
y = relay.var("y", shape=(3, 4), dtype="int64")
z = relay.var("z", shape=(3,), dtype="float32")
expr1 = relay.cast(x, "int16")
expr2 = relay.cast(expr1, "int32")
expr3 = relay.cast_like(expr2, y)
expr4 = relay.cast_like(expr3, z)
actual1 = run_opt_pass(expr2, relay.transform.SimplifyExpr())
expected = run_infer_type(relay.cast(x, "int32"))
assert tvm.ir.structural_equal(actual1, expected)
actual2 = run_opt_pass(expr3, relay.transform.SimplifyExpr())
expected = run_infer_type(relay.cast(x, "int64"))
assert tvm.ir.structural_equal(actual2, expected)
actual3 = run_opt_pass(expr4, relay.transform.SimplifyExpr())
expected = run_infer_type(relay.cast(x, "float32"))
assert tvm.ir.structural_equal(actual3, expected)
# cannot simplify the narrow cast
x = relay.var("x", shape=(3, 4, 5), dtype="float32")
y = relay.var("y", shape=(3, 4), dtype="float32")
expr1 = relay.cast(x, "int32")
expr2 = relay.cast_like(expr1, y)
actual = run_opt_pass(expr2, relay.transform.SimplifyExpr())
expected = run_infer_type(relay.cast(expr1, "float32"))
assert tvm.ir.structural_equal(actual, expected)
x = relay.var("x", shape=(3, 4), dtype="int64")
expr1 = relay.cast(x, "bool")
expr2 = relay.cast(expr1, "int32")
actual = run_opt_pass(expr2, relay.transform.SimplifyExpr())
expected = run_infer_type(expr2)
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_reshape_like():
data = relay.var("data", shape=(2, 3, 4), dtype="float32")
shape_like = relay.var("shape_like", shape=(6, 2, 2), dtype="float32")
expr = relay.reshape_like(data, shape_like)
expected = run_infer_type(relay.reshape(data, (6, 2, 2)))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_reshape_like_attrs():
data = relay.var("data", shape=(2, 3, 4), dtype="float32")
shape_like = relay.var("shape_like", shape=(6, 2, 2), dtype="float32")
expr = relay.reshape_like(data, shape_like, lhs_begin=2, rhs_begin=1)
expected = run_infer_type(relay.reshape(data, (2, 3, 2, 2)))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_zeros_like():
dtype = "int32"
shape_like = relay.var("shape_like", shape=(3, 4, 5), dtype=dtype)
expr = relay.zeros_like(shape_like)
expected = run_infer_type(relay.zeros((3, 4, 5), dtype))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_ones_like():
dtype = "int32"
shape_like = relay.var("shape_like", shape=(3, 4, 5), dtype=dtype)
expr = relay.ones_like(shape_like)
expected = run_infer_type(relay.ones((3, 4, 5), dtype))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_full_like():
dtype = "int32"
shape_like = relay.var("shape_like", shape=(3, 4, 5), dtype=dtype)
fill_value = relay.var("fill", relay.TensorType((), "float32"))
expr = relay.full_like(shape_like, fill_value)
expected = run_infer_type(relay.full(fill_value, (3, 4, 5), dtype))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_collapse_sum_like():
data = relay.var("data", shape=(3, 3, 3), dtype="float32")
shape_like = relay.var("shape_like", shape=(3,), dtype="float32")
expr = relay.collapse_sum_like(data, shape_like)
expected = run_infer_type(relay.collapse_sum_to(data, (3,)))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_broadcast_to_like():
data = relay.var("data", shape=(3,), dtype="float32")
shape_like = relay.var("shape_like", shape=(3, 3, 3), dtype="float32")
expr = relay.broadcast_to_like(data, shape_like)
expected = run_infer_type(relay.broadcast_to(data, (3, 3, 3)))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_cast_like():
dim_any = tvm.tir.Any()
data = relay.var("data", shape=(3, dim_any, 5), dtype="float32")
dtype_like = relay.var("dtype_like", shape=(dim_any, 3, 3), dtype="int32")
expr = relay.cast_like(data, dtype_like)
expected = run_infer_type(relay.cast(data, "int32"))
actual = run_opt_pass(expr, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_concretize_multiple():
x = relay.var("x", shape=(2, 3), dtype="float32")
y = relay.var("y", shape=(3,), dtype="float32")
l = x + y
dl = relay.ones_like(l)
dx = relay.zeros_like(x)
dy = relay.zeros_like(y)
dx = dx + relay.collapse_sum_like(dl, dx)
dy = dy + relay.collapse_sum_like(dl, dy)
ret = relay.Tuple([dx, dy])
dl_c = relay.ones((2, 3), "float32")
# NOTE: these are removed by EliminateIdentity
# dx_c = relay.zeros((2, 3), "float32")
# dy_c = relay.zeros((3,), "float32")
dx_c = relay.collapse_sum_to(dl_c, (2, 3))
dy_c = relay.collapse_sum_to(dl_c, (3,))
ret_c = relay.Tuple([dx_c, dy_c])
expected = run_infer_type(ret_c)
actual = run_opt_pass(ret, relay.transform.SimplifyExpr())
assert tvm.ir.structural_equal(actual, expected)
def test_simplify_mul_add():
def check_simple_fold(origin_exprs, expect_expr):
for origin_expr in origin_exprs:
simple_expr = run_opt_pass(origin_expr, transform.SimplifyExpr())
assert tvm.ir.structural_equal(simple_expr, expect_expr)
n = 32
c1_val = np.random.uniform(size=n).astype("float32")
c2_val = np.random.uniform(size=n).astype("float32")
c3_val = np.random.uniform(size=n).astype("float32")
x = relay.var("x", shape=(n,), dtype="float32")
c1 = relay.const(c1_val)
c2 = relay.const(c2_val)
c3 = relay.const(c3_val)
# add-add -> add
origin_exprs = [
x + c1 + c2,
c1 + x + c2,
]
expect_expr = x + relay.const(c1_val + c2_val)
check_simple_fold(origin_exprs, expect_expr)
# mul-mul -> mul
origin_exprs = [
x * c1 * c2,
c1 * x * c2,
]
expect_expr = x * relay.const(c1_val * c2_val)
check_simple_fold(origin_exprs, expect_expr)
# add-mul -> mul-add
origin_exprs = [
(x + c1) * c2,
(c1 + x) * c2,
c2 * (x + c1),
c2 * (c1 + x),
]
expect_expr = x * c2 + relay.const(c1_val * c2_val)
check_simple_fold(origin_exprs, expect_expr)
# add-mul-add -> mul-add
origin_exprs = [
(x + c1) * c2 + c3,
(c1 + x) * c2 + c3,
c2 * (x + c1) + c3,
c2 * (c1 + x) + c3,
c3 + (x + c1) * c2,
c3 + (c1 + x) * c2,
c3 + c2 * (x + c1),
c3 + c2 * (c1 + x),
]
expect_expr = x * c2 + relay.const(c1_val * c2_val + c3_val)
check_simple_fold(origin_exprs, expect_expr)
# mul-add-mul -> mul-add
origin_exprs = [
(x * c1 + c2) * c3,
(c1 * x + c2) * c3,
(c2 + x * c1) * c3,
(c2 + c1 * x) * c3,
c3 * (x * c1 + c2),
c3 * (c1 * x + c2),
c3 * (c2 + x * c1),
c3 * (c2 + c1 * x),
]
expect_expr = x * relay.const(c1_val * c3_val) + relay.const(c2_val * c3_val)
check_simple_fold(origin_exprs, expect_expr)
def test_simplify_rsqrt():
shape = (32, 1, 1)
x = relay.var("x", shape=shape, dtype="float32")
def before(c):
return relay.const(c) / relay.sqrt(x)
def expected(c):
if c == 1:
return relay.rsqrt(x)
else:
return relay.const(c) * relay.rsqrt(x)
for c in [1.0, 2.0, 2.5]:
opt = run_opt_pass(before(c), transform.SimplifyExpr())
after = run_opt_pass(expected(c), transform.InferType())
assert tvm.ir.structural_equal(opt, after)
def test_simplify_dq_argmax():
shape = (4, 32, 1, 1)
x = relay.var("x", shape=shape, dtype="int8")
def before():
y = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(0))
return relay.op.argmax(y, axis=1)
def expected():
return relay.op.argmax(x, axis=1)
opt = run_opt_pass(before(), transform.SimplifyExpr())
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(opt, after)
def test_simplify_dq_argmin():
shape = (4, 32, 1, 1)
x = relay.var("x", shape=shape, dtype="int8")
def before():
y = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(0))
return relay.op.argmin(y, axis=1)
def expected():
return relay.op.argmin(x, axis=1)
opt = run_opt_pass(before(), transform.SimplifyExpr())
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(opt, after)
def test_simplify_dq_argsort():
shape = (4, 32, 1, 1)
x = relay.var("x", shape=shape, dtype="int8")
def before():
y = relay.qnn.op.dequantize(x, relay.const(2.0), relay.const(0))
return relay.op.argsort(y, axis=1)
def expected():
return relay.op.argsort(x, axis=1)
opt = run_opt_pass(before(), transform.SimplifyExpr())
after = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(opt, after)
def test_simplify_clip_cast():
def before1():
x = relay.var("x", shape=(4, 8), dtype="int32")
clip = relay.clip(x, a_min=0.0, a_max=255.0)
cast = relay.cast(clip, "uint8")
cast = relay.cast(cast, "int32")
return relay.Function([x], cast)
def expected1():
x = relay.var("x", shape=(4, 8), dtype="int32")
clip = relay.clip(x, a_min=0.0, a_max=255.0)
return relay.Function([x], clip)
def before2():
x = relay.var("x", shape=(4, 8), dtype="int32")
clip = relay.clip(x, a_min=0.0, a_max=255.0)
cast = relay.cast(clip, "uint8")
cast = relay.cast(cast, "int32")
return relay.Function([x], cast)
def expected2():
x = relay.var("x", shape=(4, 8), dtype="int32")
clip = relay.clip(x, a_min=0.0, a_max=255.0)
return relay.Function([x], clip)
for before, expected in [
[before1(), expected1()],
[before2(), expected2()],
]:
after = run_opt_pass(before, transform.SimplifyExpr())
expected = run_opt_pass(expected, transform.InferType())
assert tvm.ir.structural_equal(after, expected), "\nafter: {} \nexpected: {}".format(
after, expected
)
def test_simplify_cast_clip():
def before1():
x = relay.var("x", shape=(4, 8), dtype="int32")
cast = relay.cast(x, "uint8")
clip = relay.clip(cast, a_min=0.0, a_max=255.0)
return relay.Function([x], clip)
def expected1():
x = relay.var("x", shape=(4, 8), dtype="int32")
cast = relay.cast(x, "uint8")
return relay.Function([x], cast)
def before2():
x = relay.var("x", shape=(4, 8), dtype="uint8")
clip = relay.clip(x, a_min=0.0, a_max=255.0)
return relay.Function([x], clip)
def expected2():
x = relay.var("x", shape=(4, 8), dtype="uint8")
return relay.Function([x], x)
def before3():
x = relay.var("x", shape=(4, 8), dtype="float32")
cast = relay.cast(x, "bfloat16")
clip = relay.clip(cast, a_min=-0.2, a_max=0.4)
return relay.Function([x], clip)
def expected3():
x = relay.var("x", shape=(4, 8), dtype="float32")
cast = relay.cast(x, "bfloat16")
clip = relay.clip(cast, a_min=-0.2, a_max=0.4)
return relay.Function([x], clip)
for before, expected in [
[before1(), expected1()],
[before2(), expected2()],
[before3(), expected3()],
]:
after = run_opt_pass(before, transform.SimplifyExpr())
expected = run_opt_pass(expected, transform.InferType())
assert tvm.ir.structural_equal(after, expected), "\nafter: {} \nexpected: {}".format(
after, expected
)
def test_simplify_add():
x = relay.var("x", shape=(1, 3, 100, 100), dtype="float32")
def before():
return relay.add(x, x)
def expected():
s = relay.const(2.0)
return relay.multiply(x, s)
opt = run_opt_pass(before(), transform.SimplifyExpr())
ref = run_infer_type(expected())
assert tvm.ir.structural_equal(opt, ref)
def test_binomials():
def check_simple_fold(origin_exprs, expect_exprs):
for origin_expr in origin_exprs:
simple_expr = run_opt_pass(origin_expr, transform.SimplifyExpr())
match = False
for expected in expect_exprs:
e = run_opt_pass(expected, transform.EliminateCommonSubexpr())
match = match or tvm.ir.structural_equal(simple_expr, e)
if match:
break
assert match
def gen_expected_expressions(x, y, a, b, c, dtype):
if c == 1 and a > 1:
swap = a
a = c
c = swap
swap = x
x = y
y = swap
det = b * b - 4 * a * c
if det < 0:
return gen_expressions(x, y, a, b, c)
p_val = (b + sqrt(det)) / (2 * a)
q_val = (b - sqrt(det)) / (2 * a)
p = relay.const(p_val, dtype=dtype)
q = relay.const(q_val, dtype=dtype)
first_exp = [x + y, y + x] if p_val == 1 else [x + p * y, p * y + x, x + y * p, y * p + x]
second_exp = [x + y, y + x] if q_val == 1 else [x + q * y, q * y + x, x + y * q, y * q + x]
final_exp = []
for f in first_exp:
for s in second_exp:
final_exp.append(f * s)
if not p_val == q_val:
final_exp.append(s * f)
return final_exp
def gen_expressions(x, y, a, b, c):
first_exp = [x * x] if a == 1 else [a * x * x, x * a * x, x * x * a]
second_exp = (
[x * y, y * x]
if b == 1
else [b * x * y, x * b * y, x * y * b, b * y * x, y * b * x, y * x * b]
)
third_exp = [y * y] if c == 1 else [c * y * y, y * c * y, y * y * c]
final_exp = []
for f in first_exp:
for s in second_exp:
for t in third_exp:
final_exp.append(f + s + t)
final_exp.append(f + t + s)
final_exp.append(s + f + t)
final_exp.append(s + t + f)
final_exp.append(t + f + s)
final_exp.append(t + s + f)
return final_exp
n = 5
dtypes = ["int32", "float32", "float64"]
for dtype in dtypes:
x = relay.var("x", shape=(n,), dtype=dtype)
y = relay.var("y", shape=(n,), dtype=dtype)
a = relay.const(1, dtype=dtype)
b = relay.const(2, dtype=dtype)
c = relay.const(1, dtype=dtype)
origin_exprs = gen_expressions(x, y, a, b, c)
expect_expr = gen_expected_expressions(x, y, 1, 2, 1, dtype)
check_simple_fold(origin_exprs, expect_expr)
a = relay.const(6, dtype=dtype)
b = relay.const(5, dtype=dtype)
c = relay.const(1, dtype=dtype)
origin_exprs = gen_expressions(x, y, a, b, c)
expect_expr = gen_expected_expressions(x, y, 6, 5, 1, dtype)
check_simple_fold(origin_exprs, expect_expr)
a = relay.const(1, dtype=dtype)
b = relay.const(1, dtype=dtype)
c = relay.const(1, dtype=dtype)
origin_exprs = gen_expressions(x, y, a, b, c)
expect_expr = gen_expected_expressions(x, y, 1, 1, 1, dtype)
check_simple_fold(origin_exprs, expect_expr)
a = relay.const(1, dtype=dtype)
b = relay.const(4, dtype=dtype)
c = relay.const(4, dtype=dtype)
origin_exprs = gen_expressions(x, y, a, b, c)
expect_expr = gen_expected_expressions(x, y, 1, 4, 4, dtype)
check_simple_fold(origin_exprs, expect_expr)
if __name__ == "__main__":
tvm.testing.main()
| 32,529 | 34.5131 | 99 | py |
tvm | tvm-main/tests/python/relay/test_ir_structural_equal_hash.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.testing import run_opt_pass
def consistent_equal(x, y, map_free_vars=False):
struct_equal0 = tvm.ir.structural_equal(x, y, map_free_vars)
struct_equal1 = tvm.ir.structural_equal(y, x, map_free_vars)
xhash = tvm.ir.structural_hash(x, map_free_vars)
yhash = tvm.ir.structural_hash(y, map_free_vars)
if struct_equal0 != struct_equal1:
raise ValueError(
"Non-communicative {} vs {}, sequal0={}, sequal1={}".format(
x, y, struct_equal0, struct_equal1
)
)
# NOTE: hash colision can happen but should be rare.
# we can confirm that hash colison doesn't happen for our testcases
if struct_equal0 != (xhash == yhash):
raise ValueError(
"Inconsistent {} vs {}, sequal={}, xhash={}, yhash={}".format(
x, y, struct_equal0, xhash, yhash
)
)
return struct_equal0
def test_tensor_type_sequal():
t1 = relay.TensorType((3, 4), "float32")
t2 = relay.TensorType((3, 4), "float32")
t3 = relay.TensorType((3, 4, 5), "float32")
assert t1 == t2
assert t1 != t3
t1 = relay.TensorType((), "float32")
t2 = relay.TensorType((), "float32")
assert t1 == t2
def test_incomplete_type_sequal():
t1 = relay.IncompleteType(relay.TypeKind.ShapeVar)
t2 = relay.IncompleteType(relay.TypeKind.Type)
t3 = relay.IncompleteType(relay.TypeKind.Type)
# only equal when there is pointer equality
assert t2 == t2
assert t1 == t1
assert t1 != t2
assert t2 != t3
def test_type_param_sequal():
t1 = relay.TypeVar("v1", relay.TypeKind.Type)
t2 = relay.TypeVar("v2", relay.TypeKind.ShapeVar)
t3 = relay.TypeVar("v3", relay.TypeKind.Type)
# only pointer equality and eq_map allow equal params
assert t1 == t1
assert t2 == t2
assert t1 != t2 # different kind
assert t1 != t3 # not in eq_map
# function types are the only way to put type params
# in eq map
ft1 = relay.FuncType(
tvm.runtime.convert([]), t1, tvm.runtime.convert([t1]), tvm.runtime.convert([])
)
ft2 = relay.FuncType(
tvm.runtime.convert([]), t3, tvm.runtime.convert([t3]), tvm.runtime.convert([])
)
# actually an invalid type because t2 is wrong kind
ft3 = relay.FuncType(
tvm.runtime.convert([]), t2, tvm.runtime.convert([t2]), tvm.runtime.convert([])
)
assert ft1 == ft2
assert ft1 != ft3 # kinds still do not match
def test_func_type_sequal():
t1 = relay.TensorType((1, 2), "float32")
t2 = relay.TensorType((1, 2, 3), "float32")
tp1 = relay.TypeVar("v1", relay.TypeKind.Type)
tp2 = relay.TypeVar("v2", relay.TypeKind.Type)
tp3 = relay.TypeVar("v3", relay.TypeKind.ShapeVar)
tp4 = relay.TypeVar("v3", relay.TypeKind.ShapeVar)
broadcast = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
identity = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Identity")
tr1 = relay.TypeRelation(broadcast, tvm.runtime.convert([tp1, tp3]), 1, None)
tr2 = relay.TypeRelation(broadcast, tvm.runtime.convert([tp2, tp4]), 1, None)
tr3 = relay.TypeRelation(identity, tvm.runtime.convert([tp1, tp3]), 1, None)
ft = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp1,
tvm.runtime.convert([tp1, tp3]),
tvm.runtime.convert([tr1]),
)
translate_vars = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp2,
tvm.runtime.convert([tp2, tp4]),
tvm.runtime.convert([tr2]),
)
assert ft == translate_vars
different_args = relay.FuncType(
tvm.runtime.convert([t1]), tp1, tvm.runtime.convert([tp1, tp3]), tvm.runtime.convert([tr1])
)
assert ft != different_args
different_order = relay.FuncType(
tvm.runtime.convert([t2, t1]),
tp1,
tvm.runtime.convert([tp1, tp3]),
tvm.runtime.convert([tr1]),
)
assert ft != different_order
no_rel = relay.FuncType(
tvm.runtime.convert([t1, t2]), tp1, tvm.runtime.convert([tp1, tp3]), tvm.runtime.convert([])
)
assert ft != no_rel
more_vars = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp2,
tvm.runtime.convert([tp1, tp2, tp3]),
tvm.runtime.convert([tr1]),
)
assert ft != more_vars
all_the_vars = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp1,
tvm.runtime.convert([tp1, tp2, tp3, tp4]),
tvm.runtime.convert([tr1, tr2]),
)
assert ft != all_the_vars
different_rel = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp1,
tvm.runtime.convert([tp1, tp3]),
tvm.runtime.convert([tr3]),
)
assert ft != different_rel
more_rels = relay.FuncType(
tvm.runtime.convert([t1, t2]),
tp1,
tvm.runtime.convert([tp1, tp3]),
tvm.runtime.convert([tr1, tr3]),
)
assert ft != more_rels
def test_tuple_type_sequal():
t1 = relay.TensorType((1, 2, 3), "float32")
t2 = relay.TensorType((1, 2, 3, 4), "float32")
tp1 = relay.TypeVar("v1", relay.TypeKind.Type)
tp2 = relay.TypeVar("v2", relay.TypeKind.Type)
tup1 = relay.TupleType(tvm.runtime.convert([t1, t2, tp1]))
tup2 = relay.TupleType(tvm.runtime.convert([t1, t2, tp1]))
tup3 = relay.TupleType(tvm.runtime.convert([t2, t1, tp1]))
tup4 = relay.TupleType(tvm.runtime.convert([t1, t2, tp2]))
# as long as types are alpha-equal and in same order,
# tuples should be alpha-equal
assert tup1 == tup2
assert tup1 != tup3
assert tup1 != tup4
def test_type_relation_sequal():
t1 = relay.TensorType((1, 2), "float32")
t2 = relay.TensorType((1, 2, 3), "float32")
t3 = relay.TensorType((1, 2, 3, 4), "float32")
# functions are compared only by pointer equality so
# we need to be sure to use the same pointers
broadcast = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Broadcast")
identity = tvm.ir.EnvFunc.get("tvm.relay.type_relation.Identity")
attr1 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
attr1_same = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
attr2 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4, 4))
tr = relay.TypeRelation(broadcast, tvm.runtime.convert([t1, t2]), 1, attr1)
same = relay.TypeRelation(broadcast, tvm.runtime.convert([t1, t2]), 1, attr1)
diff_func = relay.TypeRelation(identity, tvm.runtime.convert([t1, t2]), 1, attr1)
diff_order = relay.TypeRelation(broadcast, tvm.runtime.convert([t2, t1]), 1, attr1)
diff_args = relay.TypeRelation(broadcast, tvm.runtime.convert([t2, t3]), 1, attr1)
diff_attr = relay.TypeRelation(broadcast, tvm.runtime.convert([t1, t2]), 1, attr2)
same_attr = relay.TypeRelation(broadcast, tvm.runtime.convert([t1, t2]), 1, attr1_same)
bigger = relay.TypeRelation(identity, tvm.runtime.convert([t1, t3, t2]), 2, attr1)
diff_num_inputs = relay.TypeRelation(identity, tvm.runtime.convert([t1, t3, t2]), 1, attr2)
# func, number of args, input count, and order should be the same
assert tr == same
assert tr != diff_func
assert tr != diff_order
assert tr != diff_args
assert tr != diff_attr
assert tr == same_attr
assert tr != bigger
assert bigger != diff_num_inputs
def test_type_call_sequal():
h1 = relay.GlobalTypeVar("h1")
h2 = relay.GlobalTypeVar("h2")
t1 = relay.TensorType((1, 2), "float32")
t2 = relay.TensorType((1, 2, 3), "float32")
t3 = relay.TensorType((1, 2, 3, 4), "float32")
t4 = relay.TensorType((), "float32")
tc = relay.TypeCall(h1, [t1, t2, t3])
same = relay.TypeCall(h1, [t1, t2, t3])
different_func = relay.TypeCall(h2, [t1, t2, t3])
different_arg = relay.TypeCall(h1, [t1, t2, t4])
fewer_args = relay.TypeCall(h1, [t1, t2])
more_args = relay.TypeCall(h1, [t1, t2, t3, t4])
different_order_args = relay.TypeCall(h1, [t3, t2, t1])
assert tc == same
assert tc != different_func
assert tc != fewer_args
assert tc != more_args
assert tc != different_order_args
def test_constant_sequal():
x = relay.const(1)
y = relay.const(2)
assert consistent_equal(x, x)
assert not consistent_equal(x, y)
assert consistent_equal(x, relay.const(1))
def test_type_node_sequal():
v1 = relay.TypeVar("v1", 6)
v2 = relay.TypeVar("v2", 6)
assert not consistent_equal(v1, v2)
v1 = relay.TypeVar("v1", 0)
v2 = relay.TypeVar("v2", 6)
assert not consistent_equal(v1, v2)
def test_type_node_incompatible_sequal():
v1 = relay.TypeVar("v1", 6)
v2 = relay.Var("v2")
assert not consistent_equal(v1, v2)
def test_expr_node_incompatible_sequal():
v1 = relay.Var("v1")
v2 = relay.PatternVar(relay.Var("v2"))
assert not consistent_equal(v1, v2)
def test_var_sequal():
v1 = relay.Var("v1")
v2 = relay.Var("v2")
# normally only pointer equality
assert consistent_equal(v1, v1)
assert not consistent_equal(v1, v2)
# let node allows for setting the eq_map
l1 = relay.Let(v1, relay.const(1), v1)
l2 = relay.Let(v2, relay.const(1), v2)
l3 = relay.Let(v1, relay.const(1), v2)
assert consistent_equal(l1, l2)
assert not consistent_equal(l1, l3)
# type annotations
tt1 = relay.TensorType([], "int32")
tt2 = relay.TensorType([], "int32")
tt3 = relay.TensorType([], "int64")
v3 = relay.Var("v3", tt1)
v4 = relay.Var("v4", tt2)
v5 = relay.Var("v5", tt3)
l4 = relay.Let(v3, relay.const(1), v3)
l5 = relay.Let(v4, relay.const(1), v4)
l6 = relay.Let(v5, relay.const(1), v5)
# same annotations
assert consistent_equal(l4, l5)
# different annotations
assert not consistent_equal(l4, l6)
# one null annotation
assert not consistent_equal(l1, l4)
def test_global_var_sequal():
v1 = relay.GlobalVar("v1")
v2 = relay.GlobalVar("v2")
# only pointer equality suffices (smoke test)
assert consistent_equal(v1, v1)
assert not consistent_equal(v1, v2)
def test_tuple_sequal():
v0 = relay.Var("v0")
v1 = relay.Var("v1")
v2 = relay.Var("v2")
# unit value is a valid tuple
assert consistent_equal(relay.Tuple([]), relay.Tuple([]))
tup = relay.Tuple([v0, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])])
same = relay.Tuple([v0, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])])
assert consistent_equal(tup, same)
# use the eq_map
let_tup = relay.Let(v1, tup, v1)
let_mapped = relay.Let(
v2, relay.Tuple([v0, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])]), v2
)
assert consistent_equal(let_tup, let_mapped)
more_fields = relay.Tuple(
[v1, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)]), v2]
)
assert not consistent_equal(tup, more_fields)
fewer_fields = relay.Tuple([v1, relay.const(2), relay.const(3)])
assert not consistent_equal(tup, fewer_fields)
different_end = relay.Tuple([v1, relay.const(2), relay.const(3), relay.Tuple([relay.const(5)])])
assert not consistent_equal(tup, different_end)
different_start = relay.Tuple(
[v2, relay.const(2), relay.const(3), relay.Tuple([relay.const(4)])]
)
assert not consistent_equal(tup, different_start)
longer_at_end = relay.Tuple(
[v1, relay.const(2), relay.const(3), relay.Tuple([relay.const(4), relay.const(5)])]
)
assert not consistent_equal(tup, longer_at_end)
def test_tuple_get_item_sequal():
x = relay.Var("x")
y = relay.Var("y")
assert not consistent_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(y, 1))
assert not consistent_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(x, 2))
assert consistent_equal(relay.TupleGetItem(x, 1), relay.TupleGetItem(x, 1))
def test_function_attr():
x0 = relay.var("x0", shape=(10, 10))
w00 = relay.var("w00", shape=(10, 10))
w01 = relay.var("w01", shape=(10, 10))
w02 = relay.var("w02", shape=(10, 10))
z00 = relay.add(x0, w00)
p00 = relay.subtract(z00, w01)
q00 = relay.multiply(p00, w02)
func0 = relay.Function([x0, w00, w01, w02], q00)
func0 = func0.with_attr("FuncName", "a")
x1 = relay.var("x1", shape=(10, 10))
w10 = relay.var("w10", shape=(10, 10))
w11 = relay.var("w11", shape=(10, 10))
w12 = relay.var("w12", shape=(10, 10))
z10 = relay.add(x1, w10)
p10 = relay.subtract(z10, w11)
q10 = relay.multiply(p10, w12)
func1 = relay.Function([x1, w10, w11, w12], q10)
func1 = func1.with_attr("FuncName", "b")
assert not consistent_equal(func0, func1)
def test_function_sequal():
tt1 = relay.TensorType((1, 2, 3), "float32")
tt2 = relay.TensorType((4, 5, 6), "int8")
tt3 = relay.TupleType([tt1, tt2])
v1 = relay.Var("v1", tt1)
v2 = relay.Var("v2", tt2)
v3 = relay.Var("v3", tt3)
v4 = relay.Var("v4", tt2)
vret = relay.Constant(tvm.nd.array(np.ones(1)))
tp1 = relay.TypeVar("tp1", relay.TypeKind.Type)
tp2 = relay.TypeVar("tp2", relay.TypeKind.Type)
tp3 = relay.TypeVar("tp3", relay.TypeKind.ShapeVar)
tp4 = relay.TypeVar("tp4", relay.TypeKind.ShapeVar)
basic_args = [relay.Var("v3", tt1), relay.Var("v4", tt2)]
basic_tps = [tp1, tp2]
func = relay.Function([v1, v2], v1, tt2, basic_tps)
mapped = relay.Function(basic_args, basic_args[0], tt2, basic_tps)
assert consistent_equal(func, mapped)
fewer_params = relay.Function([relay.Var("v4", tt2)], v4, tt2, basic_tps)
assert not consistent_equal(func, fewer_params)
more_params = relay.Function(
[relay.Var("v3", tt1), relay.Var("v4", tt2), relay.Var("v2", tt2)], v4, tt2, basic_tps
)
assert not consistent_equal(func, more_params)
params_unordered = relay.Function([v2, v1], v1, tt2, basic_tps)
assert not consistent_equal(func, params_unordered)
params_mismatch = relay.Function([v1, v3], v1, tt2, basic_tps)
assert not consistent_equal(func, params_mismatch)
# also would not typecheck
ret_type_mismatch = relay.Function(basic_args, v4, tt1, basic_tps)
assert not consistent_equal(func, ret_type_mismatch)
# also mis-typed
different_body = relay.Function(basic_args, v3, tt2, basic_tps)
assert not consistent_equal(func, different_body)
fewer_type_params = relay.Function(basic_args, v4, tt2, [tp1])
assert not consistent_equal(func, fewer_type_params)
more_type_params = relay.Function(basic_args, v4, tt2, [tp1, tp2, tp3])
assert not consistent_equal(func, more_type_params)
type_params_unordered = relay.Function(basic_args, v4, tt2, [tp2, tp1])
assert not consistent_equal(func, type_params_unordered)
different_type_params = relay.Function(basic_args, v4, tt2, [tp3, tp4])
assert not consistent_equal(func, different_type_params)
# a well-typed example that also differs in body, ret type, and type params
tupled_example = relay.Function(basic_args, relay.Tuple([v3, v4]), tt3)
assert not consistent_equal(func, tupled_example)
# nullable
no_ret_type = relay.Function(basic_args, v4, None, [tp1, tp2])
# both null
assert consistent_equal(no_ret_type, no_ret_type)
# one null
assert not consistent_equal(func, no_ret_type)
assert not consistent_equal(no_ret_type, func)
def test_call_sequal():
v1 = relay.Var("v1")
v2 = relay.Var("v2")
attr1 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
attr1_same = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4))
attr2 = tvm.ir.make_node("attrs.TestAttrs", name="attr", padding=(3, 4, 4))
tt1 = relay.TensorType((1, 2, 3), "float32")
tt2 = relay.TensorType((), "int8")
basic_args = [relay.const(1), relay.const(2), v2, relay.Tuple([])]
# manually writing out args to ensure that args does not rely on
# pointer equality
call = relay.Call(v1, [relay.const(1), relay.const(2), v2, relay.Tuple([])], attr1, [tt1])
same = relay.Call(v1, basic_args, attr1, [tt1])
assert consistent_equal(call, same)
different_fn = relay.Call(v2, basic_args, attr1, [tt1])
assert not consistent_equal(call, different_fn)
fewer_args = relay.Call(v1, [relay.const(1), relay.const(2), v2], attr1, [tt1])
assert not consistent_equal(call, fewer_args)
reordered_args = relay.Call(
v1, [relay.const(2), relay.const(1), relay.Tuple([]), v2], attr1, [tt1]
)
assert not consistent_equal(call, reordered_args)
different_args = relay.Call(v1, [relay.const(1), relay.const(2), relay.const(3)], attr1, [tt1])
assert not consistent_equal(call, different_args)
more_args = relay.Call(
v1,
[relay.const(1), relay.const(2), v2, relay.Tuple([]), relay.const(3), relay.const(4)],
attr1,
[tt1],
)
assert not consistent_equal(call, more_args)
different_attrs = relay.Call(v1, basic_args, attr2, [tt1])
assert not consistent_equal(call, different_attrs)
same_attrs = relay.Call(v1, basic_args, attr1_same, [tt1])
assert consistent_equal(call, same_attrs)
no_type_args = relay.Call(v1, basic_args, attr1)
assert not consistent_equal(call, no_type_args)
more_type_args = relay.Call(v1, basic_args, attr1, [tt1, tt2])
assert not consistent_equal(call, more_type_args)
different_type_arg = relay.Call(v1, basic_args, attr1, [tt2])
assert not consistent_equal(call, different_type_arg)
def test_let_sequal():
tt1 = relay.TensorType((), "float32")
tt2 = relay.TensorType((), "int8")
v1 = relay.Var("v1")
v1_wtype = relay.Var("v1", tt1)
v2 = relay.Var("v2")
v3 = relay.Var("v3")
let = relay.Let(v1, relay.const(2), v1)
mapped = relay.Let(v2, relay.const(2), v2)
assert consistent_equal(let, mapped)
mismatched_var = relay.Let(v2, relay.const(2), v3)
assert not consistent_equal(let, mismatched_var)
different_value = relay.Let(v2, relay.const(3), v2)
assert not consistent_equal(let, different_value)
different_body = relay.Let(v2, relay.const(3), relay.const(12))
assert not consistent_equal(let, different_body)
# specified types must match
let_with_type = relay.Let(v1_wtype, relay.const(2), v1_wtype)
same_type = relay.Let(v1_wtype, relay.const(2), v1_wtype)
assert consistent_equal(let_with_type, same_type)
assert not consistent_equal(let, let_with_type)
v2 = relay.Var("v1", tt2)
different_type = relay.Let(v2, relay.const(2), v2)
assert not consistent_equal(let_with_type, different_type)
def test_if_sequal():
v1 = relay.Var("v1")
v2 = relay.Var("v2")
if_sample = relay.If(v1, relay.const(1), relay.Tuple([relay.const(2), relay.const(3)]))
same = relay.If(v1, relay.const(1), relay.Tuple([relay.const(2), relay.const(3)]))
assert consistent_equal(if_sample, same)
different_cond = relay.If(v2, relay.const(1), relay.Tuple([relay.const(2), relay.const(3)]))
assert not consistent_equal(if_sample, different_cond)
different_true = relay.If(v1, relay.const(2), relay.Tuple([relay.const(2), relay.const(3)]))
assert not consistent_equal(if_sample, different_true)
different_false = relay.If(v1, relay.const(1), relay.Tuple([]))
assert not consistent_equal(if_sample, different_false)
def test_constructor_sequal():
# smoke test: it should be pointer equality
mod = tvm.IRModule()
p = relay.prelude.Prelude(mod)
_, cons, nil = p.mod.get_type("List")
assert consistent_equal(nil, nil)
assert consistent_equal(cons, cons)
assert not consistent_equal(nil, cons)
def test_match_sequal():
mod = tvm.IRModule()
p = relay.prelude.Prelude(mod)
_, cons, nil = p.mod.get_type("List")
_, none, some = p.mod.get_type("Option")
x = relay.Var("x")
y = relay.Var("y")
nil_case = relay.Clause(relay.PatternConstructor(nil), nil())
cons_case = relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(x), relay.PatternVar(y)]), cons(x, y)
)
z = relay.Var("z")
a = relay.Var("a")
equivalent_cons = relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(z), relay.PatternVar(a)]), cons(z, a)
)
data = cons(relay.const(1), cons(relay.const(2), nil()))
match = relay.Match(data, [nil_case, cons_case])
equivalent = relay.Match(data, [nil_case, equivalent_cons])
empty = relay.Match(data, [])
no_cons = relay.Match(data, [nil_case])
no_nil = relay.Match(data, [cons_case])
different_data = relay.Match(nil(), [nil_case, cons_case])
different_order = relay.Match(data, [cons_case, nil_case])
different_nil = relay.Match(
data, [relay.Clause(relay.PatternConstructor(nil), cons(nil(), nil())), cons_case]
)
different_cons = relay.Match(
data,
[
nil_case,
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternWildcard(), relay.PatternWildcard()]),
nil(),
),
],
)
another_case = relay.Match(
data, [nil_case, cons_case, relay.Clause(relay.PatternWildcard(), nil())]
)
wrong_constructors = relay.Match(
data,
[
relay.Clause(relay.PatternConstructor(none), nil()),
relay.Clause(relay.PatternConstructor(some, [relay.PatternVar(x)]), cons(x, nil())),
],
)
tvm.ir.assert_structural_equal(match, match)
assert consistent_equal(match, match)
assert consistent_equal(match, equivalent)
assert not consistent_equal(match, no_cons)
assert not consistent_equal(match, no_nil)
assert not consistent_equal(match, empty)
assert not consistent_equal(match, different_data)
assert not consistent_equal(match, different_order)
assert not consistent_equal(match, different_nil)
assert not consistent_equal(match, different_cons)
assert not consistent_equal(match, another_case)
assert not consistent_equal(match, wrong_constructors)
def test_op_sequal():
# only checks names
op1 = relay.op.get("add")
op2 = relay.op.get("add")
assert consistent_equal(op1, op2)
op3 = relay.op.get("take")
assert not consistent_equal(op1, op3)
def test_graph_equal():
x = relay.var("x")
y0 = relay.add(x, x)
z0 = relay.add(y0, y0)
y1 = relay.add(x, x)
z1 = relay.add(y1, y1)
z3 = relay.add(relay.add(x, x), relay.add(x, x))
assert consistent_equal(z0, z1)
assert consistent_equal(z0, z1)
# z3's dataflow format is different from z0
# z0 is computed from a common y0 node
# Relay view them as different programs
# Check the difference in the text format.
assert not consistent_equal(z0, z3)
def test_hash_unequal():
x1 = relay.var("x1", shape=(10, 10), dtype="float32")
y1 = relay.var("y1", shape=(10, 10), dtype="float32")
func1 = relay.Function([x1, y1], relay.add(x1, y1))
# func2 is exactly same structure with same variables shapes and dtypes
x2 = relay.var("x2", shape=(10, 10), dtype="float32")
y2 = relay.var("y2", shape=(10, 10), dtype="float32")
func2 = relay.Function([x2, y2], relay.add(x2, y2))
assert consistent_equal(func1, func2)
# func3 is same as func1 but with different var shapes
x3 = relay.var("x3", shape=(20, 10), dtype="float32")
y3 = relay.var("y3", shape=(20, 10), dtype="float32")
func3 = relay.Function([x3, y3], relay.add(x3, y3))
assert not consistent_equal(func1, func3)
def test_tuple_match():
a = relay.Var("a")
b = relay.Var("b")
clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
x = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])
a = relay.Var("a")
b = relay.Var("b")
clause = relay.Clause(relay.PatternTuple([relay.PatternVar(a), relay.PatternVar(b)]), a + b)
y = relay.Match(relay.Tuple([relay.const(1), relay.const(1)]), [clause])
assert consistent_equal(x, y)
def test_fn_attribute():
# create function that performs add
a = relay.var("a", shape=(10, 10))
b = relay.var("b", shape=(10, 10))
add = relay.add(a, b)
add_fn = relay.Function([a, b], add)
add_fn = run_opt_pass(add_fn, relay.transform.InferType())
# create function that performs add with test attribute
c = relay.var("c", shape=(10, 10))
d = relay.var("d", shape=(10, 10))
add_1 = relay.add(c, d)
add_1_fn = relay.Function([c, d], add_1)
add_1_fn = add_1_fn.with_attr("TestAttribute", "test")
add_1_fn = run_opt_pass(add_1_fn, relay.transform.InferType())
assert not consistent_equal(add_1_fn, add_fn)
assert not consistent_equal(add_fn, add_1_fn)
def test_fn_vid_map():
def get_fn(with_vid):
x = relay.var("x", shape=(10,), dtype="float32")
f = relay.Function([x], x).with_attr("dict", {x.vid: 1} if with_vid else {x: 1})
return f
assert consistent_equal(get_fn(True), get_fn(True))
assert consistent_equal(get_fn(False), get_fn(False))
def test_lets():
shape = (5, 5)
def func1():
sb = relay.ScopeBuilder()
p0 = relay.var("p0", shape=shape)
p1 = relay.var("p1", shape=shape)
a0 = sb.let("a0", relay.add(p0, relay.const(1)))
a1 = sb.let("a1", relay.add(p1, relay.const(1)))
a2 = sb.let("a2", relay.add(a0, a1))
sb.ret(a2)
return relay.Function([p0, p1], sb.get())
def func2():
# Alpha conversion is structurally equal
sb = relay.ScopeBuilder()
p0 = relay.var("p0", shape=shape)
p1 = relay.var("p1", shape=shape)
a1 = sb.let("a1", relay.add(p0, relay.const(1)))
a0 = sb.let("a0", relay.add(p1, relay.const(1)))
a2 = sb.let("a2", relay.add(a1, a0))
sb.ret(a2)
return relay.Function([p0, p1], sb.get())
def func3():
# But changing the order of bindings is not structurally equal
# (even though algebraically equal)
sb = relay.ScopeBuilder()
p0 = relay.var("p0", shape=shape)
p1 = relay.var("p1", shape=shape)
a1 = sb.let("a1", relay.add(p1, relay.const(1)))
a0 = sb.let("a0", relay.add(p0, relay.const(1)))
a2 = sb.let("a2", relay.add(a1, a0))
sb.ret(a2)
return relay.Function([p0, p1], sb.get())
assert tvm.ir.structural_equal(func1(), func2())
assert not tvm.ir.structural_equal(func1(), func3())
if __name__ == "__main__":
tvm.testing.main()
| 27,405 | 33.214732 | 100 | py |
tvm | tvm-main/tests/python/relay/test_debug.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.relay import var, const, create_executor
from tvm.relay.op import debug
_test_debug_hit = False
def test_debug():
global _test_debug_hit
x = var("x", shape=(), dtype="int32")
_test_debug_hit = False
def did_exec(x):
global _test_debug_hit
_test_debug_hit = True
prog = debug(x, debug_func=did_exec)
result = create_executor().evaluate(prog, {x: const(1, "int32")})
assert _test_debug_hit
assert result.numpy() == 1
def test_debug_with_expr():
global _test_debug_hit
_test_debug_hit = False
x = var("x", shape=(), dtype="int32")
_test_debug_hit = False
def did_exec(x):
global _test_debug_hit
_test_debug_hit = True
prog = debug(x + x * x, debug_func=did_exec)
result = create_executor().evaluate(prog, {x: const(2, "int32")})
assert _test_debug_hit
assert result.numpy() == 6
| 1,680 | 30.716981 | 69 | py |
tvm | tvm-main/tests/python/relay/test_change_batch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay.testing import synthetic
from tvm.relay import transform
def test_change_batch_synthetic():
net, params = synthetic.get_workload()
new_net = transform.ChangeBatch({net["main"].params[0]: 0}, batch_size=123)(net)
assert new_net["main"].checked_type.ret_type.shape[0] == 123
if __name__ == "__main__":
test_change_batch_synthetic()
| 1,202 | 36.59375 | 84 | py |
tvm | tvm-main/tests/python/relay/test_analysis_basic_block_normal_form.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import relay
from tvm.relay.analysis import check_basic_block_normal_form
def test_one_block():
x = relay.var("x")
y = relay.add(x, x)
z = relay.add(x, y)
check_basic_block_normal_form(z)
def test_let():
x = relay.var("x")
y = relay.var("y")
body = relay.Let(y, x, y)
check_basic_block_normal_form(body)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_if():
cond = relay.var("cond", dtype="bool", shape=())
shared = relay.var("shared")
true_branch = shared
false_branch = relay.add(shared, shared)
body = relay.If(cond, true_branch, false_branch)
"""
The program below violates basic block normal form, as the scope of %shared
is ambiguous and should not be in that of true branch.
free_var %cond: bool
if (%cond) {
free_var %shared
%shared
} else {
add(%shared, %shared)
}
"""
check_basic_block_normal_form(body)
def test_valid_if():
cond = relay.var("cond", dtype="bool", shape=())
shared = relay.var("shared")
true_branch = shared
false_branch = relay.add(shared, shared)
body = relay.If(cond, true_branch, false_branch)
shared_bound = relay.var("shared_bound", shape=(1,), dtype="float32")
body = relay.Let(shared, shared_bound, body)
"""
The program below uses let binding to control the scope of %shared, which
follows the basic block normal form.
free_var %shared_bound: Tensor[(1), float32]
let %shared = %shared_bound;
free_var %cond: bool
if (%cond) {
%shared
} else {
add(%shared, %shared)
}
"""
check_basic_block_normal_form(body)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_invalid_if2():
"""
fn (%x: float32) {
%0 = equal(%x, 2f);
if (%0) {
%1 = add(%x, 1f);
multiply(%1, 2f)
} else {
multiply(%1, 1f)
}
}
"""
x = relay.var("x", shape=(), dtype="float32")
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
v1 = relay.add(x, one)
v2 = relay.equal(x, two)
true_branch = relay.multiply(v1, two)
false_branch = relay.multiply(v1, one)
body = relay.If(v2, true_branch, false_branch)
func = relay.Function([x], body)
check_basic_block_normal_form(func)
def test_valid_if2():
"""
fn (%x: float32) {
let %v1 = add(%x, 1f);
%0 = equal(%x, 2f);
if (%0) {
multiply(%v1, 2f)
} else {
multiply(%v1, 1f)
}
}
"""
x = relay.var("x", shape=(), dtype="float32")
one = relay.const(1, dtype="float32")
two = relay.const(2, dtype="float32")
v1 = relay.var("v1")
v2 = relay.equal(x, two)
true_branch = relay.multiply(v1, two)
false_branch = relay.multiply(v1, one)
body = relay.If(v2, true_branch, false_branch)
body = relay.Let(v1, relay.add(x, one), body)
func = relay.Function([x], body)
check_basic_block_normal_form(func)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_func():
x = relay.var("x", shape=(1,), dtype="float32") # , a)
y = relay.var("y", shape=(1,), dtype="float32") # , a)
z = relay.var("z", shape=(1,), dtype="float32") # , a)
x2 = relay.add(x, x)
func_a = relay.Function([y], relay.add(x2, y)) # , a, [a])
func_b = relay.Function([z], relay.add(x2, z)) # , a, [a])
body = relay.Tuple([func_a, func_b])
body = relay.Function([x], body)
"""
fn (%x: Tensor[(1), float32]) {
%1 = fn (%y: Tensor[(1), float32]) {
%0 = add(%x, %x);
add(%0, %y)
};
%2 = fn (%z: Tensor[(1), float32]) {
add(%0, %z)
};
(%1, %2)
}
"""
check_basic_block_normal_form(body)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_higher_order_return():
x = relay.var("x", shape=(1,), dtype="float32") # , a)
y = relay.var("y", shape=(1,), dtype="float32") # , a)
z = relay.var("z", shape=(1,), dtype="float32") # , a)
x2 = relay.add(x, x)
func_a = relay.Function([y], relay.add(x2, y)) # , a, [a])
func_b = relay.Function([z], relay.add(x2, z)) # , a, [a])
body = relay.Tuple([func_a, func_b])
body = relay.Function([x], body)
"""
fn (%x: Tensor[(1), float32]) {
%1 = fn (%y: Tensor[(1), float32]) {
%0 = add(%x, %x);
add(%0, %y)
};
%2 = fn (%z: Tensor[(1), float32]) {
add(%0, %z)
};
(%1, %2)
}
"""
check_basic_block_normal_form(body)
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_higher_order_nested():
x = relay.var("x", dtype="float32", shape=(1,))
s = relay.var("s", dtype="float32", shape=(1,))
shared = relay.add(s, s)
func_true = relay.Function([x], relay.add(x, shared))
choice_t = relay.FuncType([], relay.scalar_type("bool"))
f = relay.Var("f", choice_t)
z = relay.Var("z")
body = relay.If(f(), func_true, relay.Function([z], relay.add(z, shared)))
top = relay.Function([f, s], body)
"""
fn (%f: fn () -> bool, %s: Tensor[(1), float32]) {
%0 = %f();
if (%0) {
fn (%x: Tensor[(1), float32]) {
%1 = add(%s, %s);
add(%x, %1)
}
} else {
fn (%z) {
add(%z, %1)
}
}
}
"""
check_basic_block_normal_form(top)
if __name__ == "__main__":
tvm.testing.main()
| 6,218 | 27.925581 | 79 | py |
tvm | tvm-main/tests/python/relay/test_pass_to_graph_normal_form.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import relay
from tvm.relay import op, create_executor, transform
from tvm.relay.analysis import Feature
from tvm.relay.analysis import detect_feature
def run_opt_pass(expr, opt_pass):
mod = tvm.IRModule.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def check_eval(expr, args, expected_result, mod=None, rtol=1e-07):
if mod is None:
mod = tvm.IRModule()
dev = tvm.device("llvm", 0)
result = create_executor(mod=mod, device=dev, target="llvm").evaluate(expr)(*args)
np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
def test_implicit_share():
x = relay.Var("x")
y = relay.Var("y")
z = relay.Var("z")
body = relay.Let(z, op.add(y, y), op.add(z, z))
body = relay.Let(y, op.add(x, x), body)
f = relay.Function([], relay.Let(x, relay.const(1), body))
g = run_opt_pass(f, transform.ToGraphNormalForm())
assert Feature.fLet in detect_feature(f)
assert not Feature.fLet in detect_feature(g)
check_eval(f, [], 8.0)
check_eval(g, [], 8.0)
def test_round_trip():
x = relay.Var("x")
y = relay.Var("y")
z = relay.Var("z")
body = relay.Let(z, op.add(y, y), op.add(z, z))
body = relay.Let(y, op.add(x, x), body)
f = relay.Function([], relay.Let(x, relay.const(1), body))
g = run_opt_pass(f, transform.ToGraphNormalForm())
h = run_opt_pass(g, transform.ToANormalForm())
assert Feature.fLet in detect_feature(f)
assert not Feature.fLet in detect_feature(g)
check_eval(f, [], 8.0)
check_eval(g, [], 8.0)
check_eval(h, [], 8.0)
if __name__ == "__main__":
test_implicit_share()
test_round_trip()
| 2,551 | 33.486486 | 86 | py |
tvm | tvm-main/tests/python/relay/test_pass_legalize_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test legalize pass"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
from tvm import relay
from tvm.contrib import graph_executor
from tvm.relay import transform, analysis
from tvm.relay.testing.temp_op_attr import TempOpAttr
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
@tvm.testing.uses_gpu
def test_legalize_conv2d_NHWC():
"""test legalize NHWC conv2d to enable tensorcore"""
def _test_legalize_conv2d(data_shape, kernel_shape, pad_shape, dtype, do_pad=True):
out_channel = kernel_shape[3]
out_shape = list(data_shape)
out_shape[3] = out_channel
db, di, do = pad_shape
def before():
x = relay.var("x", shape=data_shape, dtype=dtype)
weight = relay.var("weight", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv2d(
x,
weight,
channels=out_channel,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
y = relay.Function([x, weight], y)
return y
def legalize_conv2d(attrs, inputs, types):
with tvm.target.Target("cuda"):
return topi.nn.conv2d_legalize(attrs, inputs, types)
def expected():
if not do_pad:
return before()
x = relay.var("x", shape=data_shape, dtype=dtype)
if db or di:
x_pad = relay.nn.pad(x, pad_width=((0, db), (0, 0), (0, 0), (0, di)))
else:
x_pad = x
weight = relay.var("weight", shape=(kernel_shape), dtype=dtype)
if di or do:
weight_pad = relay.nn.pad(weight, pad_width=((0, 0), (0, 0), (0, di), (0, do)))
else:
weight_pad = weight
y_pad = relay.nn.conv2d(
x_pad,
weight=weight_pad,
channels=out_channel + do,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
if db or do:
y = relay.strided_slice(y_pad, begin=[0, 0, 0, 0], end=out_shape)
else:
y = y_pad
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.conv2d", "FTVMLegalize", legalize_conv2d):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "Expected = \n" + str(b)
for dtype in ["float16", "int8", "int4"]:
# conv2d pad batch
_test_legalize_conv2d((7, 16, 16, 64), (3, 3, 64, 64), (1, 0, 0), dtype)
_test_legalize_conv2d((3, 16, 16, 64), (3, 3, 64, 64), (5, 0, 0), dtype)
_test_legalize_conv2d((2, 16, 16, 64), (3, 3, 64, 64), (0, 0, 0), dtype, False)
# conv2d pad in_channel
_test_legalize_conv2d((8, 16, 16, 63), (3, 3, 63, 64), (0, 1, 0), dtype)
_test_legalize_conv2d((8, 16, 16, 33), (3, 3, 33, 64), (0, 15, 0), dtype)
_test_legalize_conv2d((8, 16, 16, 13), (3, 3, 13, 64), (0, 3, 0), dtype)
_test_legalize_conv2d((8, 16, 16, 1), (3, 3, 1, 64), (0, 0, 0), dtype, False)
# conv2d pad out_channel
_test_legalize_conv2d((8, 16, 16, 64), (3, 3, 64, 63), (0, 0, 1), dtype)
_test_legalize_conv2d((8, 16, 16, 64), (3, 3, 64, 33), (0, 0, 31), dtype)
_test_legalize_conv2d((8, 16, 16, 64), (3, 3, 64, 1), (0, 0, 0), dtype, False)
@tvm.testing.uses_gpu
def test_legalize_conv2d_HWNC():
"""test legalize HWNC conv2d to enable tensorcore"""
def _test_legalize_conv2d(data_shape, kernel_shape, pad_shape, dtype, do_pad=True):
out_channel = kernel_shape[2]
out_shape = list(data_shape)
out_shape[3] = out_channel
db, di, do = pad_shape
def before():
x = relay.var("x", shape=data_shape, dtype=dtype)
weight = relay.var("weight", shape=kernel_shape, dtype=dtype)
y = relay.nn.conv2d(
x,
weight,
channels=out_channel,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="HWNC",
kernel_layout="HWOI",
)
y = relay.Function([x, weight], y)
return y
def legalize_conv2d(attrs, inputs, types):
with tvm.target.Target("cuda"):
return topi.nn.conv2d_legalize(attrs, inputs, types)
def expected():
if not do_pad:
return before()
x = relay.var("x", shape=data_shape, dtype=dtype)
if db or di:
x_pad = relay.nn.pad(x, pad_width=((0, 0), (0, 0), (0, db), (0, di)))
else:
x_pad = x
weight = relay.var("weight", shape=(kernel_shape), dtype=dtype)
if di or do:
weight_pad = relay.nn.pad(weight, pad_width=((0, 0), (0, 0), (0, do), (0, di)))
else:
weight_pad = weight
y_pad = relay.nn.conv2d(
x_pad,
weight=weight_pad,
channels=out_channel + do,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="HWNC",
kernel_layout="HWOI",
)
if db or do:
y = relay.strided_slice(y_pad, begin=[0, 0, 0, 0], end=out_shape)
else:
y = y_pad
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.conv2d", "FTVMLegalize", legalize_conv2d):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "Expected = \n" + str(b)
# conv2d pad batch
_test_legalize_conv2d((16, 16, 7, 64), (3, 3, 64, 64), (1, 0, 0), "int8")
_test_legalize_conv2d((16, 16, 3, 64), (3, 3, 64, 64), (5, 0, 0), "int8")
_test_legalize_conv2d((2, 16, 16, 64), (3, 3, 64, 64), (0, 0, 0), "int8", False)
_test_legalize_conv2d((16, 16, 7, 64), (3, 3, 64, 64), (1, 0, 0), "int4")
_test_legalize_conv2d((16, 16, 3, 64), (3, 3, 64, 64), (5, 0, 0), "int4")
_test_legalize_conv2d((2, 16, 16, 64), (3, 3, 64, 64), (0, 0, 0), "int4", False)
# conv2d pad in_channel
_test_legalize_conv2d((16, 16, 8, 63), (3, 3, 64, 63), (0, 1, 0), "int8")
_test_legalize_conv2d((16, 16, 8, 33), (3, 3, 64, 33), (0, 15, 0), "int8")
_test_legalize_conv2d((16, 16, 8, 13), (3, 3, 64, 13), (0, 3, 0), "int8")
_test_legalize_conv2d((16, 16, 8, 1), (3, 3, 64, 1), (0, 0, 0), "int8", False)
_test_legalize_conv2d((16, 16, 8, 63), (3, 3, 64, 63), (0, 1, 0), "int4")
_test_legalize_conv2d((16, 16, 8, 33), (3, 3, 64, 33), (0, 31, 0), "int4")
_test_legalize_conv2d((16, 16, 8, 13), (3, 3, 64, 13), (0, 19, 0), "int4")
_test_legalize_conv2d((16, 16, 8, 1), (3, 3, 64, 1), (0, 0, 0), "int4", False)
# conv2d pad out_channel
_test_legalize_conv2d((16, 16, 8, 64), (3, 3, 63, 64), (0, 0, 1), "int8")
_test_legalize_conv2d((16, 16, 8, 64), (3, 3, 33, 64), (0, 0, 31), "int8")
_test_legalize_conv2d((16, 16, 8, 64), (3, 3, 1, 64), (0, 0, 0), "int8", False)
_test_legalize_conv2d((16, 16, 8, 64), (3, 3, 63, 64), (0, 0, 1), "int4")
_test_legalize_conv2d((16, 16, 8, 64), (3, 3, 33, 64), (0, 0, 7), "int4")
_test_legalize_conv2d((16, 16, 8, 64), (3, 3, 1, 64), (0, 0, 0), "int4", False)
@tvm.testing.uses_gpu
def test_legalize_dense():
def _test_legalize_dense(data_shape, kernel_shape, pad_shape, dtype, do_pad=True, units=None):
"""test legalize dense to enable tensorcore"""
M, K = data_shape
N, _ = kernel_shape
out_shape = (M, N)
dm, dk, dn = pad_shape
def before():
x = relay.var("x", shape=data_shape, dtype=dtype)
weight = relay.var("weight", shape=kernel_shape, dtype=dtype)
y = relay.nn.dense(x, weight, units)
y = relay.Function([x, weight], y)
return y
def legalize_dense(attrs, inputs, types):
with tvm.target.Target("cuda"):
return topi.nn.dense_legalize(attrs, inputs, types)
def expected():
if not do_pad:
return before()
x = relay.var("x", shape=data_shape, dtype=dtype)
if dm or dk:
x_pad = relay.nn.pad(x, pad_width=((0, dm), (0, dk)))
else:
x_pad = x
weight = relay.var("weight", shape=(kernel_shape), dtype=dtype)
if dn or dk:
weight_pad = relay.nn.pad(weight, pad_width=((0, dn), (0, dk)))
else:
weight_pad = weight
y_pad = relay.nn.dense(x_pad, weight_pad, units=N + dn if units else None)
if dm or dn:
y = relay.strided_slice(y_pad, begin=[0, 0], end=out_shape)
else:
y = y_pad
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.dense", "FTVMLegalize", legalize_dense):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "Expected = \n" + str(b)
# dense
for dtype in ["float16", "int8"]:
_test_legalize_dense((8, 16), (32, 16), (0, 0, 0), dtype, False)
_test_legalize_dense((7, 16), (32, 16), (1, 0, 0), dtype)
_test_legalize_dense((8, 15), (32, 15), (0, 1, 0), dtype)
_test_legalize_dense((8, 16), (31, 16), (0, 0, 1), dtype)
_test_legalize_dense((7, 15), (31, 15), (1, 1, 1), dtype)
_test_legalize_dense((3, 16), (32, 16), (5, 0, 0), dtype)
_test_legalize_dense((1, 16), (32, 16), (0, 0, 0), dtype, False)
# Test if units parameter is correctly updated
_test_legalize_dense((8, 16), (30, 16), (0, 0, 2), "float16", units=30)
_test_legalize_dense((8, 32), (32, 32), (0, 0, 0), "int4", False)
_test_legalize_dense((7, 32), (32, 32), (1, 0, 0), "int4")
_test_legalize_dense((8, 31), (32, 31), (0, 1, 0), "int4")
_test_legalize_dense((8, 32), (31, 32), (0, 0, 1), "int4")
_test_legalize_dense((7, 31), (31, 31), (1, 1, 1), "int4")
_test_legalize_dense((3, 32), (32, 32), (5, 0, 0), "int4")
_test_legalize_dense((8, 16), (32, 16), (0, 16, 0), "int4")
_test_legalize_dense((2, 16), (32, 16), (0, 0, 0), "int4", False)
@tvm.testing.uses_gpu
def test_legalize_batch_matmul():
def _test_legalize_batch_matmul(
data_shape, kernel_shape, pad_shape, dtype, do_pad=True, transpose_a=False, transpose_b=True
):
"""test legalize dense to enable tensorcore"""
if transpose_a:
B, _, M = data_shape
else:
B, M, _ = data_shape
if transpose_b:
_, N, _ = kernel_shape
else:
_, _, N = kernel_shape
out_shape = (B, M, N)
dm, dk, dn = pad_shape
def before():
x = relay.var("x", shape=data_shape, dtype=dtype)
weight = relay.var("weight", shape=kernel_shape, dtype=dtype)
y = relay.nn.batch_matmul(x, weight, transpose_a=transpose_a, transpose_b=transpose_b)
y = relay.Function([x, weight], y)
return y
def legalize_batch_matmul(attrs, inputs, types):
with tvm.target.Target("cuda"):
return topi.nn.batch_matmul_legalize(attrs, inputs, types)
def expected():
if not do_pad:
return before()
x = relay.var("x", shape=data_shape, dtype=dtype)
weight = relay.var("weight", shape=(kernel_shape), dtype=dtype)
if dm or dk:
if transpose_a:
x_pad = relay.nn.pad(x, pad_width=((0, 0), (0, dk), (0, dm)))
else:
x_pad = relay.nn.pad(x, pad_width=((0, 0), (0, dm), (0, dk)))
else:
x_pad = x
if dn or dk:
if transpose_b:
weight_pad = relay.nn.pad(weight, pad_width=((0, 0), (0, dn), (0, dk)))
else:
weight_pad = relay.nn.pad(weight, pad_width=((0, 0), (0, dk), (0, dn)))
else:
weight_pad = weight
y_pad = relay.nn.batch_matmul(
x_pad,
weight_pad,
transpose_a=transpose_a,
transpose_b=transpose_b,
)
if dm or dn:
y = relay.strided_slice(y_pad, begin=[0, 0, 0], end=out_shape)
else:
y = y_pad
y = relay.Function([x, weight], y)
return y
with TempOpAttr("nn.batch_matmul", "FTVMLegalize", legalize_batch_matmul):
a = before()
a = run_opt_pass(a, transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + "Expected = \n" + str(b)
for dtype in ["float16", "int8"]:
_test_legalize_batch_matmul((16, 8, 16), (16, 32, 16), (0, 0, 0), dtype, False)
_test_legalize_batch_matmul((16, 7, 16), (16, 32, 16), (1, 0, 0), dtype)
_test_legalize_batch_matmul((16, 8, 15), (16, 32, 15), (0, 1, 0), dtype)
_test_legalize_batch_matmul((16, 8, 16), (16, 31, 16), (0, 0, 1), dtype)
_test_legalize_batch_matmul((16, 7, 15), (16, 31, 15), (1, 1, 1), dtype)
_test_legalize_batch_matmul((16, 3, 16), (16, 32, 16), (5, 0, 0), dtype)
_test_legalize_batch_matmul((16, 2, 16), (16, 32, 16), (0, 0, 0), dtype, False)
_test_legalize_batch_matmul((16, 8, 32), (16, 32, 32), (0, 0, 0), "int4", False)
_test_legalize_batch_matmul((16, 7, 32), (16, 32, 32), (1, 0, 0), "int4")
_test_legalize_batch_matmul((16, 8, 31), (16, 32, 31), (0, 1, 0), "int4")
_test_legalize_batch_matmul((16, 8, 32), (16, 31, 32), (0, 0, 1), "int4")
_test_legalize_batch_matmul((16, 7, 31), (16, 31, 31), (1, 1, 1), "int4")
_test_legalize_batch_matmul((16, 3, 32), (16, 32, 32), (5, 0, 0), "int4")
_test_legalize_batch_matmul((16, 8, 16), (16, 32, 16), (0, 16, 0), "int4")
_test_legalize_batch_matmul((16, 2, 16), (16, 32, 16), (0, 0, 0), "int4", False)
_test_legalize_batch_matmul(
(16, 8, 16), (16, 16, 32), (0, 0, 0), "float16", False, transpose_b=False
)
_test_legalize_batch_matmul(
(16, 16, 8), (16, 32, 16), (0, 0, 0), "float16", False, transpose_a=True
)
if __name__ == "__main__":
test_legalize_conv2d_NHWC()
test_legalize_conv2d_HWNC()
test_legalize_dense()
test_legalize_batch_matmul()
| 16,104 | 41.270341 | 100 | py |
tvm | tvm-main/tests/python/relay/test_pass_combine_parallel_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import relay
from tvm.relay import transform
def run_combine_parallel(expr, min_num_branches=3, to_batch=True):
mod = tvm.IRModule.from_expr(expr)
mod = transform.CombineParallelDense(min_num_branches, to_batch)(mod)
return mod["main"]
def run_opt_pass(expr, opt_pass):
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = tvm.relay.transform.InferType()(mod)
mod = opt_pass(mod)
return mod["main"]
def test_combine_parallel_dense():
"""Simple testcase. One dense cannot be combined due to shape mismatch"""
def before(x, w1, w2, w3, w4):
args = [x, w1, w2, w3, w4]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
# y3 cannot be combined
y3 = relay.nn.dense(x, w3)
y4 = relay.nn.dense(x, w4)
y = relay.Tuple((y1, y2, y3, y4))
return relay.Function(args, y)
def expected(x, w1, w2, w3, w4):
# use a fixed order of args so alpha equal check can pass
args = [x, w1, w2, w3, w4]
x_stacked = relay.stack((x, x, x), axis=0)
w = relay.stack((w1, w2, w4), axis=0)
y = relay.nn.batch_matmul(x_stacked, w)
(y1, y2, y4) = relay.split(y, 3)
y1 = relay.squeeze(y1, [0])
y2 = relay.squeeze(y2, [0])
y4 = relay.squeeze(y4, [0])
# y3 cannot be combined
y3 = relay.nn.dense(x, w3)
y = relay.Tuple((y1, y2, y3, y4))
return relay.Function(args, y)
def check(i, j, k):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(j, k))
w3 = relay.var("w3", shape=(j + 1, k))
w4 = relay.var("w4", shape=(j, k))
y_before = before(x, w1, w2, w3, w4)
y = run_opt_pass(y_before, transform.CombineParallelDense(min_num_branches=2))
y_expected = expected(x, w1, w2, w3, w4)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(3, 5, 4)
check(100, 200, 300)
def test_combine_parallel_dense_biasadd():
"""Testcase of combining dense + 1d biasadd"""
def before(x, w1, w2, b1, b2):
args = [x, w1, w2, b1, b2]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
y1 = relay.add(y1, b1)
y2 = relay.add(y2, b2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, b1, b2, is_2d_bias):
args = [x, w1, w2, b1, b2]
x_stacked = relay.stack((x, x), axis=0)
w = relay.stack((w1, w2), axis=0)
y = relay.nn.batch_matmul(x_stacked, w)
if not is_2d_bias:
b1 = relay.expand_dims(b1, 0)
b2 = relay.expand_dims(b2, 0)
b = relay.stack((b1, b2), axis=0)
y = relay.add(y, b)
(y1, y2) = relay.split(y, 2)
y1 = relay.squeeze(y1, [0])
y2 = relay.squeeze(y2, [0])
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def check(i, j, k, is_2d_bias):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(j, k))
if is_2d_bias:
b1 = relay.var("b1", shape=(i, j))
b2 = relay.var("b2", shape=(i, j))
else:
b1 = relay.var("b1", shape=(j,))
b2 = relay.var("b2", shape=(j,))
y_before = before(x, w1, w2, b1, b2)
y = run_opt_pass(y_before, transform.CombineParallelDense(min_num_branches=2))
y_expected = expected(x, w1, w2, b1, b2, is_2d_bias)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(3, 5, 4, False)
check(100, 200, 300, False)
check(3, 5, 4, True)
check(100, 200, 300, True)
def test_combine_parallel_dense_biasadd_scale_reshape():
"""Testcase of combining dense + 1d biasadd + multiply with non-fused reshape"""
def before(x, w1, w2, b1, b2, scale1, scale2, newshape):
args = [x, w1, w2, b1, b2, scale1, scale2]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
y1 = relay.add(y1, b1)
y2 = relay.add(y2, b2)
y1 = relay.multiply(y1, scale1)
y2 = relay.multiply(y2, scale2)
y1 = relay.reshape(y1, newshape=newshape)
y2 = relay.reshape(y2, newshape=newshape)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, b1, b2, scale1, scale2, newshape):
args = [x, w1, w2, b1, b2, scale1, scale2]
x_stacked = relay.stack((x, x), axis=0)
w = relay.stack((w1, w2), axis=0)
y = relay.nn.batch_matmul(x_stacked, w)
b1 = relay.expand_dims(b1, 0)
b2 = relay.expand_dims(b2, 0)
b = relay.stack((b1, b2), axis=0)
y = relay.add(y, b)
scale1 = relay.expand_dims(scale1, 0)
scale2 = relay.expand_dims(scale2, 0)
scale = relay.stack((scale1, scale2), axis=0)
y = relay.multiply(y, scale)
(y1, y2) = relay.split(y, 2)
y1 = relay.squeeze(y1, [0])
y2 = relay.squeeze(y2, [0])
y1 = relay.reshape(y1, newshape=newshape)
y2 = relay.reshape(y2, newshape=newshape)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def check(i, j, k, scale1, scale2, newshape):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(j, k))
b1 = relay.var("b1", shape=(j,))
b2 = relay.var("b2", shape=(j,))
scale1 = relay.var("scale1", shape=(1,))
scale2 = relay.var("scale2", shape=(1,))
y_before = before(x, w1, w2, b1, b2, scale1, scale2, newshape)
y = run_opt_pass(y_before, transform.CombineParallelDense(min_num_branches=2))
y_expected = expected(x, w1, w2, b1, b2, scale1, scale2, newshape)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(3, 5, 4, 0.5, 0.25, (1, 1, 15))
check(100, 200, 300, 0.5, 0.25, (1, 1, 20000))
def test_combine_parallel_dense_flat():
"""Simple testcase. All matmul of different output dim can be combined"""
def before(x, w1, w2, w3):
args = [x, w1, w2, w3]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
y3 = relay.nn.dense(x, w3)
y = relay.Tuple((y1, y2, y3))
return relay.Function(args, y)
def expected(x, w1, w2, w3, j):
args = [x, w1, w2, w3]
w_stacked = relay.concatenate((w1, w2, w3), axis=0)
y = relay.nn.dense(x, w_stacked, units=6 * j)
strides = [1, 1]
y1 = relay.strided_slice(y, begin=[0, 0], end=[-1, j], strides=strides, slice_mode="size")
y2 = relay.strided_slice(
y, begin=[0, j], end=[-1, 2 * j], strides=strides, slice_mode="size"
)
y3 = relay.strided_slice(
y, begin=[0, 3 * j], end=[-1, 3 * j], strides=strides, slice_mode="size"
)
y = relay.Tuple((y1, y2, y3))
return relay.Function(args, y)
def check(i, j, k):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(2 * j, k))
w3 = relay.var("w3", shape=(3 * j, k))
y_before = before(x, w1, w2, w3)
combine_pass = transform.CombineParallelDense(min_num_branches=3, to_batch=False)
y = run_opt_pass(y_before, combine_pass)
y_expected = expected(x, w1, w2, w3, j)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(3, 5, 4)
check(100, 200, 300)
def test_combine_parallel_dense_flat_biasadd():
"""Testcase of combining dense + 1d biasadd with different out dims"""
def before(x, w1, w2, b1, b2):
args = [x, w1, w2, b1, b2]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
y1 = relay.add(y1, b1)
y2 = relay.add(y2, b2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, b1, b2, j, bias_shape1, bias_shape2):
args = [x, w1, w2, b1, b2]
w_stacked = relay.concatenate((w1, w2), axis=0)
y = relay.nn.dense(x, w_stacked, units=3 * j)
n_out_dims = max(len(bias_shape1), 2)
if len(bias_shape1) == 0:
b1 = relay.repeat(relay.expand_dims(b1, -1), j, 0)
elif bias_shape1[-1] == 1:
b1 = relay.repeat(b1, j, len(bias_shape1) - 1)
if len(bias_shape2) == 0:
b2 = relay.repeat(relay.expand_dims(b2, -1), 2 * j, 0)
elif bias_shape2[-1] == 1:
b2 = relay.repeat(b2, 2 * j, len(bias_shape2) - 1)
b = relay.concatenate((b1, b2), axis=max(0, len(bias_shape1) - 1))
y = relay.add(y, b)
begin = [0 for _ in range(n_out_dims - 1)]
end = [-1 for _ in range(n_out_dims - 1)]
strides = [1 for _ in range(n_out_dims)]
y1 = relay.strided_slice(
y, begin=begin + [0], end=end + [j], strides=strides, slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=begin + [j], end=end + [2 * j], strides=strides, slice_mode="size"
)
return relay.Function(args, relay.Tuple((y1, y2)))
def check(i, j, k, bias_shape1, bias_shape2):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(2 * j, k))
b1 = relay.var("b1", shape=bias_shape1)
b2 = relay.var("b2", shape=bias_shape2)
y_before = before(x, w1, w2, b1, b2)
combine_pass = transform.CombineParallelDense(min_num_branches=2, to_batch=False)
y = run_opt_pass(y_before, combine_pass)
y_expected = expected(x, w1, w2, b1, b2, j, bias_shape1, bias_shape2)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(3, 5, 4, (), ())
check(3, 5, 4, (1,), (1,))
check(3, 5, 4, (5,), (1,))
check(3, 5, 4, (1,), (10,))
check(3, 5, 4, (3, 1), (3, 1))
check(3, 5, 4, (3, 5), (3, 10))
check(3, 5, 4, (3, 1), (3, 10))
check(3, 5, 4, (3, 5), (3, 1))
check(3, 5, 4, (9, 3, 5), (9, 3, 10))
check(3, 5, 4, (9, 3, 5), (9, 3, 1))
check(3, 5, 4, (9, 3, 1), (9, 3, 10))
def test_combine_parallel_dense_flat_biasadd_scale_reshape():
"""Testcase of combining dense with different out dims
following bias add, scale, reshape ops
"""
def before(x, w1, w2, b1, b2, scale1, scale2, newshape1, newshape2):
args = [x, w1, w2, b1, b2, scale1, scale2]
y1 = relay.nn.dense(x, w1)
y2 = relay.nn.dense(x, w2)
y1 = relay.add(y1, b1)
y2 = relay.add(y2, b2)
y1 = relay.multiply(y1, scale1)
y2 = relay.multiply(y2, scale2)
y1 = relay.reshape(y1, newshape=newshape1)
y2 = relay.reshape(y2, newshape=newshape2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2, b1, b2, scale1, scale2, newshape1, newshape2, j):
args = [x, w1, w2, b1, b2, scale1, scale2]
w_stacked = relay.concatenate((w1, w2), axis=0)
y = relay.nn.dense(x, w_stacked, units=3 * j)
b = relay.concatenate((b1, b2), axis=0)
y = relay.add(y, b)
scale1 = relay.repeat(scale1, j, 0)
scale2 = relay.repeat(scale2, 2 * j, 0)
scale = relay.concatenate((scale1, scale2), axis=0)
y = relay.multiply(y, scale)
strides = [1, 1]
y1 = relay.strided_slice(y, begin=[0, 0], end=[-1, j], strides=strides, slice_mode="size")
y2 = relay.strided_slice(
y, begin=[0, j], end=[-1, 2 * j], strides=strides, slice_mode="size"
)
y1 = relay.reshape(y1, newshape=newshape1)
y2 = relay.reshape(y2, newshape=newshape2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def check(i, j, k, scale1, scale2, newshape1, newshape2):
x = relay.var("x", shape=(i, k))
w1 = relay.var("w1", shape=(j, k))
w2 = relay.var("w2", shape=(2 * j, k))
b1 = relay.var("b1", shape=(j,))
b2 = relay.var("b2", shape=(2 * j,))
scale1 = relay.var("scale1", shape=(1,))
scale2 = relay.var("scale2", shape=(1,))
y_before = before(x, w1, w2, b1, b2, scale1, scale2, newshape1, newshape2)
combine_pass = transform.CombineParallelDense(min_num_branches=2, to_batch=False)
y = run_opt_pass(y_before, combine_pass)
y_expected = expected(x, w1, w2, b1, b2, scale1, scale2, newshape1, newshape2, j)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
check(3, 5, 4, 0.5, 0.25, (1, 1, 15), (1, 1, 30))
check(100, 200, 300, 0.5, 0.25, (1, 1, 20000), (1, 1, 40000))
def test_combine_parallel_dense_expand_dims():
"""Verify that the correct slice axis is selected after the combined dense."""
def before(x, w1, w2):
args = [x, w1, w2]
y1 = relay.nn.dense(x, w1)
y1 = relay.expand_dims(y1, axis=2)
y2 = relay.nn.dense(x, w2)
y2 = relay.expand_dims(y2, axis=2)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
def expected(x, w1, w2):
args = [x, w1, w2]
w_stacked = relay.concatenate((w1, w2), axis=0)
y = relay.nn.dense(x, w_stacked, units=24)
y = relay.expand_dims(y, axis=2)
strides = [1, 1, 1]
y1 = relay.strided_slice(
y, begin=[0, 0, 0], end=[-1, 16, -1], strides=strides, slice_mode="size"
)
y2 = relay.strided_slice(
y, begin=[0, 16, 0], end=[-1, 8, -1], strides=strides, slice_mode="size"
)
y = relay.Tuple((y1, y2))
return relay.Function(args, y)
x = relay.var("x", shape=(2, 32))
w1 = relay.var("w1", shape=(16, 32))
w2 = relay.var("w2", shape=(8, 32))
y_before = before(x, w1, w2)
combine_pass = transform.CombineParallelDense(min_num_branches=2, to_batch=False)
y = run_opt_pass(y_before, combine_pass)
y_expected = expected(x, w1, w2)
y_expected = run_opt_pass(y_expected, transform.InferType())
tvm.ir.assert_structural_equal(y, y_expected, map_free_vars=True)
if __name__ == "__main__":
tvm.testing.main()
| 15,442 | 37.036946 | 98 | py |
tvm | tvm-main/tests/python/relay/test_expr_functor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay import ExprFunctor, ExprMutator, ExprVisitor
def check_visit(expr):
try:
ef = ExprFunctor()
ef.visit(expr)
assert False
except NotImplementedError:
pass
ev = ExprVisitor()
ev.visit(expr)
em = ExprMutator()
assert expr == em.visit(expr)
def test_constant():
check_visit(relay.const(1.0))
def test_tuple():
t = relay.Tuple([relay.var("x", shape=())])
check_visit(t)
def test_var():
v = relay.var("x", shape=())
check_visit(v)
def test_global():
v = relay.GlobalVar("f")
check_visit(v)
def test_function():
x = relay.var("x", shape=())
y = relay.var("y", shape=())
params = [x, y]
body = x + y
ret_type = relay.TensorType(())
type_params = []
attrs = None # How to build?
f = relay.Function(params, body, ret_type, type_params, attrs)
check_visit(f)
def test_call():
x = relay.var("x", shape=())
y = relay.var("y", shape=())
call = relay.op.add(x, y)
check_visit(call)
def test_let():
x = relay.var("x", shape=())
value = relay.const(2.0)
body = x + x
l = relay.Let(x, value, body)
check_visit(l)
def test_ite():
cond = relay.var("x", shape=(), dtype="bool")
ite = relay.If(cond, cond, cond)
check_visit(ite)
def test_get_item():
t = relay.Tuple([relay.var("x", shape=())])
t = relay.TupleGetItem(t, 0)
check_visit(t)
def test_ref_create():
r = relay.expr.RefCreate(relay.const(1.0))
check_visit(r)
def test_ref_read():
ref = relay.expr.RefCreate(relay.const(1.0))
r = relay.expr.RefRead(ref)
check_visit(r)
def test_ref_write():
ref = relay.expr.RefCreate(relay.const(1.0))
r = relay.expr.RefWrite(ref, relay.const(2.0))
check_visit(r)
def test_memo():
expr = relay.const(1)
for _ in range(100):
expr = expr + expr
check_visit(expr)
def test_match():
p = relay.prelude.Prelude()
check_visit(p.mod[p.map])
def test_match_completeness():
p = relay.prelude.Prelude()
_, _, nil = p.mod.get_type("List")
for completeness in [True, False]:
match_expr = relay.adt.Match(nil, [], complete=completeness)
result_expr = ExprMutator().visit(match_expr)
# ensure the mutator doesn't mangle the completeness flag
assert result_expr.complete == completeness
if __name__ == "__main__":
test_constant()
test_tuple()
test_var()
test_global()
test_function()
test_call()
test_let()
test_ite()
test_ref_create()
test_ref_read()
test_ref_write()
test_memo()
test_match()
test_match_completeness()
| 3,503 | 22.36 | 68 | py |
tvm | tvm-main/tests/python/relay/test_pass_vars.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay.analysis import (
free_vars,
free_type_vars,
bound_vars,
bound_type_vars,
all_vars,
all_type_vars,
)
def assert_vars_match(actual, expected):
assert len(actual) == len(expected)
for i in range(len(actual)):
assert actual[i] == expected[i]
def test_free_vars():
ty = relay.TensorType([], "int32")
x = relay.Var("x", ty)
fvx = free_vars(x)
assert len(fvx) == 1
assert fvx[0] == x
v = relay.Constant(tvm.nd.array(10))
let = relay.Let(x, v, x)
fvx = free_vars(let)
assert len(free_vars(let)) == 0
f = relay.Function([x], x, ty)
assert len(free_vars(f)) == 0
def test_free_vars_tuple():
t = relay.Var("t")
fv = free_vars(relay.Tuple([t, t]))
assert len(fv) == 1
assert fv[0] == t
fv = free_vars(relay.TupleGetItem(t, 123))
assert len(fv) == 1
assert fv[0] == t
def test_free_type_vars():
tp = relay.TypeVar("")
ty = relay.TupleType([tp, relay.TensorType([], "int32")])
x = relay.Var("x", ty)
y = relay.Var("y")
let = relay.Let(x, y, x)
fvl = free_vars(let)
assert len(fvl) == 1
assert fvl[0] == y
ftvl = free_type_vars(let)
assert len(ftvl) == 1
assert ftvl[0] == tp
def test_bound_vars():
x = relay.Var("x")
y = relay.Var("y")
z = relay.Var("z")
a = relay.Var("a")
f1 = relay.Function([x, y, z], relay.Let(a, x, relay.Tuple([])))
assert_vars_match(bound_vars(f1), [x, y, z, a])
tup = relay.Tuple([x, y, z, a])
assert len(bound_vars(tup)) == 0
f2 = relay.Function([x, y], relay.Tuple([x, y, z, a]))
assert_vars_match(bound_vars(f2), [x, y])
def test_match_vars():
mod = tvm.IRModule()
p = relay.prelude.Prelude(mod)
rlist, cons, nil = p.mod.get_type("List")
x = relay.Var("x")
y = relay.Var("y")
z = relay.Var("z")
match1 = relay.Match(
nil(),
[
relay.Clause(relay.PatternConstructor(nil), z),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(x), relay.PatternVar(y)]),
cons(x, y),
),
],
)
match2 = relay.Match(
nil(),
[
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternWildcard(), relay.PatternVar(x)]), y
),
relay.Clause(relay.PatternWildcard(), z),
],
)
assert_vars_match(bound_vars(match1), [x, y])
assert_vars_match(free_vars(match1), [z])
assert_vars_match(all_vars(match1), [z, x, y])
assert_vars_match(bound_vars(match2), [x])
assert_vars_match(free_vars(match2), [y, z])
assert_vars_match(all_vars(match2), [x, y, z])
def test_bound_type_vars():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
c = relay.TypeVar("c")
ft1 = relay.FuncType([a], b, [a, b])
bound_ft1 = bound_type_vars(ft1)
assert_vars_match(bound_type_vars(ft1), [a, b])
ft2 = relay.FuncType([], c, [a])
assert_vars_match(bound_type_vars(ft2), [a])
tup_ty = relay.TupleType([a, b, c])
assert len(bound_type_vars(tup_ty)) == 0
f1 = relay.Function([], relay.Tuple([]), type_params=[a, b])
assert_vars_match(bound_type_vars(f1), [a, b])
f2 = relay.Function([], relay.Tuple([]), c)
assert len(bound_type_vars(f2)) == 0
x = relay.Var("x", a)
let1 = relay.Let(x, relay.Tuple([]), x)
assert len(bound_type_vars(let1)) == 0
let2 = relay.Let(x, relay.Function([], relay.Tuple([]), type_params=[b, c]), x)
assert_vars_match(bound_type_vars(let2), [b, c])
def test_all_vars():
x = relay.Var("x")
y = relay.Var("y")
z = relay.Var("z")
f1 = relay.Function([x, y], z)
assert_vars_match(all_vars(f1), [x, y, z])
f2 = relay.Function([x], relay.Let(y, relay.Tuple([]), z))
assert_vars_match(all_vars(f2), [x, y, z])
f3 = relay.Function([x], relay.Tuple([y, z]))
assert_vars_match(all_vars(f3), [x, y, z])
tup = relay.Tuple([x, y, z])
assert_vars_match(all_vars(tup), [x, y, z])
def test_all_type_vars():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
c = relay.TypeVar("c")
ft1 = relay.FuncType([b], c, [a])
assert_vars_match(all_type_vars(ft1), [a, b, c])
ft2 = relay.FuncType([], relay.TupleType([a, b, c]), [])
assert_vars_match(all_type_vars(ft2), [a, b, c])
w = relay.Var("w")
x = relay.Var("x", a)
y = relay.Var("y", b)
z = relay.Var("z", c)
f1 = relay.Function([x], y, b, [a])
assert_vars_match(all_type_vars(f1), [a, b])
f2 = relay.Function([x], relay.Let(y, x, z))
assert_vars_match(all_type_vars(f2), [a, b, c])
f3 = relay.Function([], relay.Tuple([x, y, z]), ret_type=relay.TupleType([a, b, c]))
assert_vars_match(all_type_vars(f3), [a, b, c])
f4 = relay.Function([w], relay.Tuple([]), type_params=[a, b, c])
assert_vars_match(all_type_vars(f4), [a, b, c])
f5 = relay.Function([w], w)
assert len(all_type_vars(f5)) == 0
| 5,833 | 27.183575 | 97 | py |
tvm | tvm-main/tests/python/relay/test_backend_interpreter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import testing
from tvm import nd
from tvm import relay
from tvm.runtime import container
from tvm.relay.backend.interpreter import RefValue, ConstructorValue
from tvm.relay.scope_builder import ScopeBuilder
def check_eval(expr, args, expected_result, mod=None, rtol=1e-07):
# TODO(tqchen) add more types once the schedule register is fixed.
for target in ["llvm"]:
dev = tvm.device(target, 0)
if not testing.device_enabled(target):
return
func = relay.create_executor(mod=mod, device=dev, target=target).evaluate(expr)
result = func if args is None else func(*args)
# use testing which also set atol
testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
def test_tuple_value():
tv = container.tuple_object([relay.const(1), relay.const(2), relay.const(3)])
np.testing.assert_allclose(tv[0].data.numpy(), 1)
np.testing.assert_allclose(tv[1].data.numpy(), 2)
np.testing.assert_allclose(tv[2].data.numpy(), 3)
def test_tuple_getitem():
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], relay.TupleGetItem(relay.Tuple([relay.const(1), relay.const(2)]), 0))
check_eval(func, [], 1)
def test_id():
x = relay.var("x", "float32")
ident = relay.Function([x], x)
one = np.array(1.0, "float32")
check_eval(ident, [one], one)
def test_add_const():
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
check_eval(func, [], 2)
def test_mul_param():
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(1, 10))
func = relay.Function([x, y], relay.multiply(x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(1, 10).astype("float32")
check_eval(func, [x_data, y_data], x_data * y_data)
def test_equal():
i = relay.var("i", shape=[], dtype="int32")
j = relay.var("i", shape=[], dtype="int32")
z = relay.equal(i, j)
func = relay.Function([i, j], z, ret_type=relay.TensorType([], "bool"))
i_data = relay.const(0, "int32")
j_data = relay.const(0, "int32")
check_eval(func, [i_data, j_data], True)
def test_subtract():
i = relay.var("i", shape=[], dtype="int32")
sub = relay.subtract(i, relay.const(1, dtype="int32"))
func = relay.Function([i], sub, ret_type=relay.TensorType([], "int32"))
i_data = np.array(1, dtype="int32")
check_eval(func, [i_data], 0)
def test_simple_loop():
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, dtype="int32"))):
sb.ret(i)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, dtype="int32"))
rec_call = relay.Call(sum_up, [one_less])
sb.ret(relay.add(rec_call, i))
func = relay.Function([i], sb.get(), ret_type=relay.TensorType([], "int32"))
mod[sum_up] = func
i_data = np.array(10, dtype="int32")
check_eval(sum_up, [i_data], sum(range(1, 11)), mod=mod)
def test_loop():
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
accum = relay.var("accum", shape=[], dtype="int32")
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, "int32"))):
sb.ret(accum)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, "int32"))
new_accum = relay.add(accum, i)
sb.ret(relay.Call(sum_up, [one_less, new_accum]))
func = relay.Function([i, accum], sb.get())
mod[sum_up] = func
i_data = np.array(10, dtype="int32")
accum_data = np.array(0, dtype="int32")
check_eval(sum_up, [i_data, accum_data], sum(range(1, 11)), mod=mod)
def test_ref():
mod = tvm.IRModule()
three_with_ref = relay.GlobalVar("three_with_ref")
i = relay.Var("i")
iv = relay.Var("iv")
u = relay.Var("u")
uv = relay.Var("uv")
body = relay.add(iv, uv)
body = relay.Let(uv, relay.RefRead(i), body)
body = relay.Let(u, relay.RefWrite(i, relay.const(2)), body)
body = relay.Let(iv, relay.RefRead(i), body)
body = relay.Let(i, relay.RefCreate(relay.const(1)), body)
mod[three_with_ref] = relay.Function([], body)
check_eval(three_with_ref, [], 3, mod=mod)
def test_binds():
x = relay.var("x")
y = relay.add(x, x)
xx = np.ones((10, 20))
res = relay.create_executor().evaluate(y, binds={x: xx}).numpy()
testing.assert_allclose(xx + xx, res)
def test_kwargs_params():
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1, 10))
z = relay.var("z", shape=(1, 10))
f = relay.Function([x, y, z], x + y + z)
x_data = np.random.rand(1, 10).astype("float32")
y_data = np.random.rand(1, 10).astype("float32")
z_data = np.random.rand(1, 10).astype("float32")
params = {"y": y_data, "z": z_data}
res = relay.create_executor().evaluate(f)(x_data, **params)
testing.assert_allclose(res.numpy(), x_data + y_data + z_data)
def test_function_taking_adt_ref_tuple():
mod = tvm.IRModule()
prelude = relay.prelude.Prelude(mod)
_, cons, nil = prelude.mod.get_type("List")
nil_value = ConstructorValue(nil.tag, [], nil)
cons_value = ConstructorValue(
cons.tag,
[nd.array(np.random.rand(1, 10).astype("float32")), nil_value],
cons,
)
ref_value = RefValue(nd.array(np.random.rand(1, 10).astype("float32")))
tuple_value = container.tuple_object(
[nd.array(np.random.rand(1, 10).astype("float32")) for _ in range(10)]
)
id_func = relay.create_executor(mod=mod).evaluate(prelude.id)
res_nil = id_func(nil_value)
assert res_nil.tag == nil_value.tag
assert len(res_nil.fields) == 0
res_cons = id_func(cons_value)
assert res_cons.tag == cons_value.tag
assert len(res_cons.fields) == len(cons_value.fields)
testing.assert_allclose(res_cons.fields[0].numpy(), cons_value.fields[0].numpy())
assert isinstance(res_cons.fields[1], ConstructorValue)
assert res_cons.fields[1].tag == nil.tag
assert len(res_cons.fields[1].fields) == 0
res_ref = id_func(ref_value)
testing.assert_allclose(res_ref.value.numpy(), ref_value.value.numpy())
res_tuple = id_func(tuple_value)
for i in range(10):
testing.assert_allclose(res_tuple[i].numpy(), tuple_value[i].numpy())
def test_tuple_passing():
x = relay.var(
"x",
type_annotation=relay.ty.TupleType(
[relay.ty.TensorType((), "int64"), relay.ty.TensorType((), "int64")]
),
)
fn = relay.Function([x], relay.expr.TupleGetItem(x, 0))
mod = tvm.IRModule({})
gv = relay.GlobalVar("main")
mod[gv] = fn
mod = relay.transform.InferType()(mod)
dev = tvm.cpu()
target = tvm.target.Target("llvm")
f = relay.create_executor(mod=mod, device=dev, target=target).evaluate(gv)
# First use a Python tuple.
out = f((10, 8))
testing.assert_allclose(out.numpy(), np.array(10))
# Second use a tuple value.
value_tuple = container.tuple_object([nd.array(np.array(11)), nd.array(np.array(12))])
out = f(value_tuple)
testing.assert_allclose(out.numpy(), np.array(11))
def test_dynamic():
n = 3
m = 2
x = relay.Var("x", relay.TensorType([relay.Any(), m], "float32"))
y = relay.Var("y", relay.TensorType([relay.Any(), m], "float32"))
xx = x - relay.expr.const(3.0)
yy = y * relay.expr.const(5.0)
z = relay.op.concatenate([xx, yy], axis=0)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.random.uniform(size=(n, m)).astype("float32")
y_np = np.random.uniform(size=(n, m)).astype("float32")
expected = np.concatenate([x_np - 3.0, y_np * 5.0], axis=0)
check_eval(None, [x_np, y_np], expected, mod)
def test_ref_global_from_expr():
n = 3
x = relay.Var("x", relay.TensorType([n], "float32"))
y = relay.Var("y", relay.TensorType([n], "float32"))
mod = tvm.IRModule()
mod["add"] = relay.Function([x, y], relay.add(x, y))
x_np = np.random.uniform(size=(n,)).astype("float32")
y_np = np.random.uniform(size=(n,)).astype("float32")
expected = np.add(x_np, y_np)
expr = relay.Call(mod.get_global_var("add"), [relay.const(x_np), relay.const(y_np)])
check_eval(expr, None, expected, mod)
def test_keyword_args():
n = 3
x = relay.Var("x", relay.TensorType([n], "float32"))
y = relay.Var("y", relay.TensorType([n], "float32"))
z = relay.add(x, y)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], z)
x_np = np.random.uniform(size=(n,)).astype("float32")
y_np = np.random.uniform(size=(n,)).astype("float32")
expected = np.add(x_np, y_np)
actual = relay.create_executor(mod=mod).evaluate()(y=y_np, x=x_np)
testing.assert_allclose(actual.numpy(), expected)
# TODO(mbs): Support? Would help reduce wasted work when we need to prepare
# multiple functions w.r.t. the same module.
@pytest.mark.skip(reason="closures are currently not directly Python callable")
def test_functional_returns():
n = 3
x = relay.Var("x", relay.TensorType([n], "float32"))
f = relay.Function([x], x)
t = relay.Tuple([f, f])
c = np.random.rand(n).astype("float32")
result1, result2 = relay.create_executor().evaluate(t)
testing.assert_allclose(result1(c).numpy(), c)
testing.assert_allclose(result2(c).numpy(), c)
if __name__ == "__main__":
tvm.testing.main()
| 10,361 | 34.731034 | 99 | py |
tvm | tvm-main/tests/python/relay/test_op_grad_level10.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.testing import check_grad
index_dtype = tvm.testing.parameter("int32", "int64")
val_dtype = tvm.testing.parameter("float32", "float64")
executor_kind = tvm.testing.parameter("debug")
def test_cross_entropy_grad(executor_kind, target, dev, val_dtype):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and val_dtype == "float64":
# GLSL.std.450's Log implementation only takes 16/32-bit floats.
pytest.xfail("Known failing test case for vulkan runtime")
x = relay.var("x", shape=(2, 5), dtype=val_dtype)
y = relay.var("y", shape=(2, 5), dtype=val_dtype)
check_grad(
relay.Function([x, y], relay.op.nn.cross_entropy(x, y)),
eps=0.01,
scale=0.1,
mean=1,
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
def test_cross_entropy_with_logits_grad(executor_kind, target, dev, val_dtype):
x = relay.var("x", shape=(2, 5), dtype=val_dtype)
y = relay.var("y", shape=(2, 5), dtype=val_dtype)
check_grad(
relay.Function([x, y], relay.op.nn.cross_entropy_with_logits(x, y)),
eps=0.01,
scale=0.1,
mean=1,
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
def test_checkpoint(executor_kind, target, dev):
inputs = [relay.var("x{}".format(i), shape=(1,)) for i in range(4)]
output = relay.multiply(relay.add(inputs[0], inputs[1]), relay.add(inputs[2], inputs[3]))
check_grad(
relay.Function(inputs, relay.annotation.checkpoint(output)), executor_kind=executor_kind
)
scope = relay.ScopeBuilder()
out_tuple = scope.let(
"out_tuple",
relay.Tuple([relay.add(inputs[0], inputs[1]), relay.multiply(inputs[2], inputs[3])]),
)
scope.ret(
relay.subtract(
relay.annotation.checkpoint(relay.TupleGetItem(out_tuple, 0)),
relay.TupleGetItem(out_tuple, 1),
)
)
out_single = scope.get()
check_grad(
relay.Function(inputs, out_single),
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
class TestBatchMatmulGrad:
a_shape, b_shape, transpose_a, transpose_b = tvm.testing.parameters(
((2, 3, 5), (2, 5, 4), False, False),
((2, 3, 5), (2, 4, 5), False, True),
((2, 5, 3), (2, 5, 4), True, False),
((2, 5, 3), (2, 4, 5), True, True),
)
def test_batch_matmul_grad(
self, executor_kind, target, dev, a_shape, b_shape, transpose_a, transpose_b
):
tensor_a = relay.var("tensor_a", relay.TensorType(a_shape, "float32"))
tensor_b = relay.var("tensor_b", relay.TensorType(b_shape, "float32"))
check_grad(
relay.Function(
[tensor_a, tensor_b],
relay.op.nn.batch_matmul(
tensor_a, tensor_b, transpose_a=transpose_a, transpose_b=transpose_b
),
),
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
def test_reverse_reshape_grad(executor_kind, target, dev):
x = relay.var("x", shape=(3, 4, 5), dtype="float64")
check_grad(
relay.Function([x], relay.op.reverse_reshape(x, (-1, 0))),
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
def test_one_hot_grad(executor_kind, target, dev, index_dtype, val_dtype):
indices_shape = (3, 4)
depth = 5
axis = -1
inputs = [
np.random.randint(depth, size=indices_shape, dtype=index_dtype),
np.array(np.random.randn() * 1e-5).astype(val_dtype),
np.array(np.random.randn() * 1e-5).astype(val_dtype),
]
test_inputs = inputs[1:]
indices = relay.var("indices", shape=indices_shape, dtype=index_dtype)
on_val = relay.var("on_val", shape=tuple(), dtype=val_dtype)
off_val = relay.var("off_val", shape=tuple(), dtype=val_dtype)
y = relay.one_hot(indices, on_val, off_val, depth, axis, val_dtype)
f = relay.Function([indices, on_val, off_val], y)
check_grad(
f,
inputs=inputs,
test_inputs=test_inputs,
target_devices=[(target, dev)],
executor_kind=executor_kind,
)
if __name__ == "__main__":
tvm.testing.main()
| 5,142 | 32.396104 | 96 | py |
tvm | tvm-main/tests/python/relay/test_external_codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for graph partitioning."""
import sys
from collections import OrderedDict
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay, runtime
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.op.annotation import compiler_begin, compiler_end
from utils.external_codegen import (
update_lib,
set_external_func_attr,
parametrize_external_codegen_checks,
parametrize_external_json_codegen_checks,
check_graph_executor_result,
check_vm_result,
)
@parametrize_external_codegen_checks
def test_multi_node_subgraph(check_result):
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
w3 = relay.var("w3", shape=(10, 10))
w4 = relay.var("w4", shape=(10, 10))
w5 = relay.var("w5", shape=(10, 10))
w6 = relay.var("w6", shape=(10, 10))
w7 = relay.var("w7", shape=(10, 10))
# subgraph0
x0 = relay.var("x0", shape=(10, 10))
w00 = relay.var("w00", shape=(10, 10))
w01 = relay.var("w01", shape=(10, 10))
w02 = relay.var("w02", shape=(10, 10))
z00 = relay.add(x0, w00)
p00 = relay.subtract(z00, w01)
q00 = relay.multiply(p00, w02)
subgraph0 = relay.Function([x0, w00, w01, w02], q00)
subgraph0 = set_external_func_attr(subgraph0, "ccompiler", "ccompiler_0")
call0 = relay.Call(subgraph0, [x, w0, w1, w2])
# subgraph1
x1 = relay.var("x1", shape=(10, 10))
w10 = relay.var("w10", shape=(10, 10))
w11 = relay.var("w11", shape=(10, 10))
w12 = relay.var("w12", shape=(10, 10))
z10 = relay.add(x1, w10)
p10 = relay.subtract(z10, w11)
q10 = relay.multiply(p10, w12)
subgraph1 = relay.Function([x1, w10, w11, w12], q10)
subgraph1 = set_external_func_attr(subgraph1, "ccompiler", "ccompiler_1")
call1 = relay.Call(subgraph1, [x, w3, w4, w5])
# Other parts on TVM
z2 = relay.add(x, w6)
q2 = relay.subtract(z2, w7)
r = relay.concatenate((call0, call1, q2), axis=0)
f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
mod = tvm.IRModule()
mod["main"] = f
mod = relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = OrderedDict([("x", x_data)] + [("w{}".format(i), w_data[i]) for i in range(8)])
check_result(
mod,
map_inputs,
(30, 10),
np.concatenate(
(
((x_data + w_data[0]) - w_data[1]) * w_data[2],
((x_data + w_data[3]) - w_data[4]) * w_data[5],
x_data + w_data[6] - w_data[7],
),
axis=0,
),
)
@parametrize_external_codegen_checks
def test_extern_gcc_single_op(check_result):
x = relay.var("x", shape=(8, 8))
y = relay.var("y", shape=(8, 8))
x0 = relay.var("x0", shape=(8, 8))
y0 = relay.var("y0", shape=(8, 8))
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x, y])
mod = tvm.IRModule.from_expr(call)
x_data = np.random.rand(8, 8).astype("float32")
y_data = np.random.rand(8, 8).astype("float32")
check_result(mod, {"x": x_data, "y": y_data}, (8, 8), x_data + y_data)
@parametrize_external_codegen_checks
def test_extern_gcc_single_op_int(check_result):
x = relay.var("x", shape=(8, 8), dtype="int32")
y = relay.var("y", shape=(8, 8), dtype="int32")
x0 = relay.var("x0", shape=(8, 8), dtype="int32")
y0 = relay.var("y0", shape=(8, 8), dtype="int32")
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x, y])
mod = tvm.IRModule.from_expr(call)
x_data = np.random.rand(8, 8).astype("int32")
y_data = np.random.rand(8, 8).astype("int32")
check_result(mod, {"x": x_data, "y": y_data}, (8, 8), x_data + y_data)
@parametrize_external_codegen_checks
def test_extern_gcc(check_result):
x = relay.var("x", shape=(2, 2))
y = relay.var("y", shape=(2, 2))
# subgraph for mul
x0 = relay.var("x0", shape=(2, 2))
y0 = relay.var("y0", shape=(2, 2))
mul = x0 * y0
mul = relay.Function([x0, y0], mul)
mul = set_external_func_attr(mul, "ccompiler", "ccompiler_2")
call_mul = relay.Call(mul, [y, y])
# subgraph for add
x1 = relay.var("x1", shape=(2, 2))
y1 = relay.var("y1", shape=(2, 2))
add = x1 + y1
add = relay.Function([x1, y1], add)
add = set_external_func_attr(add, "ccompiler", "ccompiler_1")
call_add = relay.Call(add, [x, x])
# subgraph for sub
x2 = relay.var("x2", shape=(2, 2))
y2 = relay.var("y2", shape=(2, 2))
sub = x2 - y2
sub = relay.Function([x2, y2], sub)
sub = set_external_func_attr(sub, "ccompiler", "ccompiler_0")
call_sub = relay.Call(sub, [call_mul, call_add])
mod = tvm.IRModule.from_expr(call_sub)
x_data = np.random.rand(2, 2).astype("float32")
y_data = np.random.rand(2, 2).astype("float32")
inputs = OrderedDict(
[
("y", y_data),
("x", x_data),
]
)
check_result(mod, inputs, (2, 2), (y_data * y_data) - (x_data + x_data))
# TODO(mbs): The check_aot_executor_result does not support the list-of-targets, mostly because
# tvm.testing.aot.compile_and_run requires the target to be a kind name string, and
# tvm.testing.aot.compile_models requires a single Target object. However, code outside of
# tvm.testing.aot is ready for this more general form.
@pytest.mark.parametrize("check_result", [check_graph_executor_result, check_vm_result])
def test_extern_gcc_with_target_instance(check_result):
shape = (8, 8)
dtype = "int32"
def make_mod():
x0 = relay.var("x0", shape=shape, dtype=dtype)
y0 = relay.var("y0", shape=shape, dtype=dtype)
z = x0 + y0
f = relay.Function([x0, y0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
x = relay.var("x", shape=shape, dtype=dtype)
y = relay.var("y", shape=shape, dtype=dtype)
call = relay.Call(f, [x, y])
return tvm.IRModule.from_expr(call)
host_target = tvm.target.Target("llvm")
generic_target = tvm.target.Target("llvm", host=host_target)
# The header attribute is just whitespace, so compilation is as usual.
good_extern_codegen_target = tvm.target.Target(
{"kind": "ccompiler", "header": "// Good"}, host=host_target
)
# The header attribute is ill-formed, so compilation is expected to fail.
bogus_extern_codegen_target = tvm.target.Target(
{"kind": "ccompiler", "header": "Bogus"}, host=host_target
)
mod = make_mod()
x_data = np.random.rand(*shape).astype(dtype)
y_data = np.random.rand(*shape).astype(dtype)
expected_result = x_data + y_data
inputs = {"x": x_data, "y": y_data}
check_result(
mod, inputs, shape, expected_result, target=[generic_target, good_extern_codegen_target]
)
with pytest.raises(RuntimeError):
check_result(
mod,
inputs,
shape,
expected_result,
target=[generic_target, bogus_extern_codegen_target],
)
@pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now")
@pytest.mark.parametrize("check_result", [check_graph_executor_result, check_vm_result])
def test_extern_gcc_consts(check_result):
shape = (8, 8)
dtype = "float32"
x = relay.var("x", shape=shape)
y0_data = np.random.uniform(0, 1, shape).astype(dtype)
x0 = relay.var("x0", shape=shape)
y0_const = relay.const(y0_data, dtype)
z = x0 + y0_const
f = relay.Function([x0], z)
f = set_external_func_attr(f, "ccompiler", "ccompiler_0")
call = relay.Call(f, [x])
mod = tvm.IRModule.from_expr(call)
# Note that while the VMCompiler get_params() will return all 'parameters' from both
# TVM and external codegen compiled code, the GraphExecutor.get_params() will return only
# those from non-external modules. So in the following we'll test by execution rather than
# test by inspection.
x_data = np.random.rand(*shape).astype(dtype)
inputs = {"x": x_data}
expected_result = x_data + y0_data
check_result(mod, inputs, shape, expected_result, target="llvm")
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@parametrize_external_json_codegen_checks
def test_extern_dnnl_padding(check_result):
dtype = "float32"
ishape = (1, 1, 99, 12)
w1shape = (54, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight0", shape=(w1shape), dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), strides=(2, 2), padding=(1, 0, 1, 1))
f = relay.Function([data0, weight0], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight0", shape=(w1shape), dtype=dtype)
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data1, weight1])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(
i_data, w_data
)
check_result(
mod, {"data0": i_data, "weight0": w_data}, (1, 54, 50, 6), ref_res.numpy(), tol=1e-5
)
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@parametrize_external_json_codegen_checks
def test_extern_dnnl(check_result):
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
weight0 = relay.var("weight0", shape=(w1shape), dtype=dtype)
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.var("weight0", shape=(w1shape), dtype=dtype)
weight2 = relay.var("weight1", shape=(w1shape), dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight2, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data1, weight1, weight2], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data0, weight0, weight0])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(
i_data, w_data, w_data
)
check_result(
mod, {"data0": i_data, "weight0": w_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5
)
@pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True),
reason="skip because DNNL codegen is not available",
)
@parametrize_external_json_codegen_checks
def test_extern_dnnl_const(check_result):
dtype = "float32"
ishape = (1, 32, 14, 14)
w1shape = (32, 1, 3, 3)
data0 = relay.var("data0", shape=(ishape), dtype=dtype)
w_data = np.random.uniform(0, 1, w1shape).astype(dtype)
data1 = relay.var("data0", shape=(ishape), dtype=dtype)
weight1 = relay.const(w_data, dtype=dtype)
weight2 = relay.const(w_data, dtype=dtype)
depthwise_conv2d_1 = relay.nn.conv2d(
data1, weight1, kernel_size=(3, 3), padding=(1, 1), groups=32
)
depthwise_conv2d_2 = relay.nn.conv2d(
depthwise_conv2d_1, weight2, kernel_size=(3, 3), padding=(1, 1), groups=32
)
out = relay.add(depthwise_conv2d_1, depthwise_conv2d_2)
f = relay.Function([data1], out)
ref_mod = tvm.IRModule()
ref_mod["main"] = f
f = set_external_func_attr(f, "dnnl", "dnnl_0")
call = relay.Call(f, [data0])
mod = tvm.IRModule.from_expr(call)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
ref_res = relay.create_executor("graph", mod=ref_mod, device=tvm.cpu()).evaluate()(i_data)
check_result(mod, {"data0": i_data}, (1, 32, 14, 14), ref_res.numpy(), tol=1e-5)
def test_load_params_with_constants_in_ext_codegen():
# After binding params and partitioning graph_module.get_params()
# might contain parameters that are not an graph executor input but
# for example constants in external function.
y_in = np.ones((1,)).astype("float32")
params = {"y": y_in}
mod = tvm.IRModule()
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1,))
xcb = compiler_begin(x, "ccompiler")
ycb = compiler_begin(y, "ccompiler")
z = relay.add(xcb, ycb)
zce = compiler_end(z, "ccompiler")
mod["main"] = relay.Function([x, y], zce)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = relay.transform.PartitionGraph()(mod)
graph_module = relay.build(mod, target="llvm", params=params)
# Params will be stored in metadata module.
assert len(graph_module.get_params()) == 0
lib = update_lib(graph_module.get_lib())
rt_mod = tvm.contrib.graph_executor.create(graph_module.get_graph_json(), lib, tvm.cpu(0))
rt_mod.load_params(runtime.save_param_dict(graph_module.get_params()))
if __name__ == "__main__":
tvm.testing.main()
| 14,528 | 35.141791 | 99 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_simulated_dequantize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import relay
from tvm.contrib import graph_executor
from tvm.runtime.vm import VirtualMachine
from tvm.topi.nn.qnn import SQNN_DTYPE_TO_CODE
def dequantize_test_driver(in_dtype, quant_args, axis, in_data):
shape = in_data.shape
input_data = relay.var("input_data", shape=shape, dtype=in_dtype)
input_zero_point = relay.const(quant_args["in_zero_point"])
input_scale = relay.const(quant_args["in_scale"])
dequantized_output = relay.qnn.op.dequantize(
input_data,
input_scale=input_scale,
input_zero_point=input_zero_point,
axis=axis,
)
mod = relay.Function(relay.analysis.free_vars(dequantized_output), dequantized_output)
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm", params=None)
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
rt_mod.set_input(input_data=in_data)
rt_mod.set_input(**params)
rt_mod.run()
res = rt_mod.get_output(0).numpy()
return res
def build_simulated_dequantize(input_data, scale, zp, dtype, axis=-1):
sim_q = relay.qnn.op.simulated_dequantize(
input_data,
scale,
zp,
axis=axis,
in_dtype=dtype,
)
mod = tvm.IRModule.from_expr(sim_q)
with tvm.transform.PassContext(opt_level=3):
vm_exec = relay.vm.compile(mod, "llvm", params=None)
vm = VirtualMachine(vm_exec, tvm.cpu(0))
return vm
def verify_simulated_dequantize_simple(dtype):
data = np.random.uniform(low=-128, high=127, size=[2, 5]).astype(dtype)
data_fp = data.astype("float32")
scale_np = np.float32(0.5)
zp_np = np.int32(127)
dtype_np = np.int32(SQNN_DTYPE_TO_CODE[dtype])
quant_args = {"in_zero_point": zp_np, "in_scale": scale_np}
dq_out = dequantize_test_driver(
in_dtype=dtype,
quant_args=quant_args,
axis=-1,
in_data=data,
)
input_data = relay.var("input_data", shape=data.shape, dtype="float32")
scale = relay.var("scale", shape=[])
zp = relay.var("zp", shape=[], dtype="int32")
dtype = relay.var("dtype", shape=[], dtype="int32")
vm = build_simulated_dequantize(input_data, scale, zp, dtype)
sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np)
np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5)
def test_simulated_dequantize():
verify_simulated_dequantize_simple("uint8")
verify_simulated_dequantize_simple("int8")
verify_simulated_dequantize_simple("int32")
def test_dynamic_channels():
# Compile simulated quantize once but support either per-channel or scalar params.
data = np.random.uniform(low=-64, high=64, size=[2, 5]).astype("int8")
data_fp = data.astype("float32")
# Test scalar qnn params.
scale_np = np.asarray([0.5]).astype("float32")
zp_np = np.asarray([0]).astype("int32")
dtype_np = np.int32(SQNN_DTYPE_TO_CODE["int8"])
quant_args = {"in_zero_point": zp_np[0], "in_scale": scale_np[0]}
dq_out = dequantize_test_driver(
in_dtype="int8",
quant_args=quant_args,
axis=0,
in_data=data,
)
# Create variables with undefined shape and run with scalar inputs.
input_data = relay.var("input_data", shape=data.shape, dtype="float32")
scale = relay.var("scale", shape=[relay.Any()], dtype="float32")
zp = relay.var("zp", shape=[relay.Any()], dtype="int32")
dtype = relay.var("dtype", shape=[], dtype="int32")
vm = build_simulated_dequantize(input_data, scale, zp, dtype, axis=0)
sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np)
np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5)
# Now get the perchannel quantize output and compare without recompiling.
scale_np = np.array([0.5, 0.25]).astype("float32")
zp_np = np.array([127, 123]).astype("int32")
# Get the reference quantize output.
quant_args = {"in_zero_point": zp_np, "in_scale": scale_np}
dq_out = dequantize_test_driver(
in_dtype="int8",
quant_args=quant_args,
axis=0,
in_data=data,
)
# Run the simulated quantize without recompiling and confirm results match.
sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np)
np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5)
def test_dynamic_dtype():
# Compile simulated quantize once but support any type of quantization.
data = np.random.uniform(low=0, high=255, size=[2, 5]).astype("uint8")
data_fp = data.astype("float32")
# Test scalar uint8 to fp32.
scale_np = np.asarray([0.5]).astype("float32")
zp_np = np.asarray([127]).astype("int32")
dtype_np = np.int32(SQNN_DTYPE_TO_CODE["uint8"])
quant_args = {"in_zero_point": zp_np[0], "in_scale": scale_np[0]}
dq_out = dequantize_test_driver(
in_dtype="uint8",
quant_args=quant_args,
axis=-1,
in_data=data,
)
# Create variables with undefined shape and run with scalar inputs.
input_data = relay.var("input_data", shape=data.shape, dtype="float32")
scale = relay.var("scale", shape=[relay.Any()], dtype="float32")
zp = relay.var("zp", shape=[relay.Any()], dtype="int32")
dtype = relay.var("dtype", shape=[], dtype="int32")
vm = build_simulated_dequantize(input_data, scale, zp, dtype)
sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np)
np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5)
# Now test int8 to float32 compilation.
data = np.random.uniform(low=0, high=255, size=[2, 5]).astype("int8")
data_fp = data.astype("float32")
# Get the reference quantize output.
dq_out = dequantize_test_driver(
in_dtype="int8",
quant_args=quant_args,
axis=-1,
in_data=data,
)
# Run the simulated quantize without recompiling and confirm results match.
dtype_np = np.int32(SQNN_DTYPE_TO_CODE["int8"])
sim_dq_out = vm.invoke("main", input_data=data_fp, scale=scale_np, zp=zp_np, dtype=dtype_np)
np.testing.assert_allclose(sim_dq_out.numpy(), dq_out, rtol=1e-5)
if __name__ == "__main__":
test_simulated_dequantize()
test_dynamic_channels()
test_dynamic_dtype()
| 7,206 | 39.488764 | 96 | py |
tvm | tvm-main/tests/python/relay/test_ir_text_printer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
from tvm import relay
from tvm.relay import testing
import numpy as np
from tvm.relay import Expr
from tvm.relay.analysis import free_vars
import pytest
DEBUG_PRINT = False
SEMVER = '#[version = "0.0.5"]\n'
def astext(program, unify_free_vars=False):
text = program.astext()
if isinstance(program, Expr):
roundtrip_program = tvm.relay.parse_expr(text)
else:
roundtrip_program = tvm.relay.fromtext(text)
tvm.ir.assert_structural_equal(roundtrip_program, program, map_free_vars=True)
return text
def show(text):
if DEBUG_PRINT:
print("---------------------------")
print(text)
def assert_prints_as(expr, str):
assert astext(expr) == SEMVER + str
def test_scalars():
assert_prints_as(relay.const(42, "int16"), "42i16")
assert_prints_as(relay.const(42, "int32"), "42")
assert_prints_as(relay.const(42, "int64"), "42i64")
assert_prints_as(relay.const(3.0, "float16"), "3f16")
assert_prints_as(relay.const(3.0, "float32"), "3f")
assert_prints_as(relay.const(3.0, "float64"), "3f64")
def test_large_graph():
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float32")
z = relay.add(x, one)
for i in range(int(9e4)):
z = relay.add(z, one)
f = relay.Function([x, y], z)
show(astext(f))
def test_func():
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float32")
z = relay.add(x, one)
z = relay.add(z, z)
f = relay.Function([x, y], z)
show(astext(z))
show(astext(f))
def test_mod():
x = relay.var("x", "float32")
y = relay.var("y", "float32")
z = relay.add(x, y)
z = relay.add(z, z)
f = relay.Function([x, y], z)
mod = tvm.IRModule()
mod["myf"] = f
mod = relay.transform.InferType()(mod)
text = astext(mod)
assert "def @myf" in text
assert "def @myf" in str(mod)
assert "add(%0, %0) /* ty=float32 */" in text
assert "add(%0, %0) /* ty=float32 */" in str(mod)
show(mod.astext(annotate=lambda x: str(x.checked_type.dtype) if type(x) == relay.Call else ""))
show(text)
def test_meta_data():
n, c, h, w = te.size_var("n"), 10, 224, 224
x = relay.var("x", shape=(n, c, h, w))
w = relay.var("w")
z = relay.nn.conv2d(x, w, kernel_size=(3, 3), padding=(1, 1), channels=2)
f = relay.Function([x, w], z)
text = astext(f, unify_free_vars=True)
text_no_meta = str(f)
assert "channels=2" in text
assert "channels=2" in text_no_meta
assert "meta[tir.SizeVar][0]" in text
assert "meta[tir.SizeVar][0]" in text_no_meta
assert "type_key" in text
assert "type_key" not in text_no_meta
text = astext(relay.const([1, 2, 3]))
assert "meta[relay.Constant][0]" in text
def test_call_attrs():
x = relay.var("x")
# non default args
z = relay.nn.softmax(x, axis=2)
assert "axis=2" in astext(z)
# default args
z = relay.nn.softmax(x)
assert "softmax(%x)" in astext(z)
# non default args
z = relay.expand_dims(x, axis=2, num_newaxis=2)
assert "num_newaxis=2" in astext(z)
def test_let_if_scope():
x = relay.var("x", "float32")
y = relay.var("y", "float32")
cond = relay.var("cond", "bool")
sb = relay.ScopeBuilder()
with sb.if_scope(cond):
v1 = sb.let("v", relay.const(1, "float32"))
v2 = sb.let("v", x)
sb.ret(relay.subtract(v1, v2))
with sb.else_scope():
v3 = relay.var("v")
let2 = relay.Let(v3, y, v3)
sb.ret(relay.add(let2, let2))
result = sb.get()
f = relay.Function([x, y, cond], result)
text = astext(f)
assert text.count("{") == 3
assert "%cond: bool" in text
show(astext(f))
def test_variable_name():
# avoid pure number even if the namehint is pure number
v1 = relay.var("1")
assert "%v1" in astext(v1)
def test_mlp():
net, _ = tvm.relay.testing.mlp.get_workload(batch_size=1)
astext(net)
def test_resnet():
net, _ = tvm.relay.testing.resnet.get_workload(batch_size=1)
astext(net)
def test_mobilenet():
net, _ = tvm.relay.testing.mobilenet.get_workload(batch_size=1)
astext(net)
def test_dqn():
net, _ = tvm.relay.testing.dqn.get_workload(batch_size=1)
astext(net)
def test_dcgan():
net, _ = tvm.relay.testing.dcgan.get_workload(batch_size=1)
astext(net)
def test_lstm():
net, _ = tvm.relay.testing.lstm.get_workload(1, 1)
astext(net)
net, _ = tvm.relay.testing.lstm.get_workload(4, 4)
astext(net)
def test_inception_v3():
net, _ = tvm.relay.testing.inception_v3.get_workload(batch_size=1)
astext(net)
def test_squeezenet():
for version in ["1.0", "1.1"]:
net, _ = tvm.relay.testing.squeezenet.get_workload(batch_size=1, version=version)
astext(net)
def test_densenet():
net, _ = tvm.relay.testing.densenet.get_workload(batch_size=1)
astext(net)
def test_call_node_order():
x = relay.var("x")
y = relay.var("y")
prog = relay.Call(
relay.Function([x], x), [relay.Call(relay.Function([y], y), [relay.const(1)])]
)
assert astext(prog) == SEMVER + (
"%0 = fn (%y) {\n"
" %y\n"
"};\n"
"%1 = %0(1);\n"
"%2 = fn (%x) {\n"
" %x\n"
"};\n"
"%2(%1)"
)
def test_let_inlining():
tup = relay.Tuple([relay.const(0), relay.const(0)])
x = relay.var("x")
assert astext(relay.Let(x, tup, tup)) == SEMVER + ("%0 = (0, 0);\n" "let %x = %0;\n" "%0")
assert astext(relay.Let(x, tup, x)) == SEMVER + ("let %x = (0, 0);\n" "%x")
def test_zeros():
x = relay.op.zeros([], "float32")
astext(x)
def test_unapplied_constructor():
type_def_str = r"""
type List[A] {
Cons(A, List[A]),
Nil,
}
"""
main_def_str = r"""
def @main[A]() -> fn (A, List[A]) -> List[A] {
Cons
}
"""
mod = tvm.relay.parse(SEMVER + type_def_str + main_def_str)
mod_str = str(mod)
# ensure constructors are printed correctly in type definitions (with their
# signature) and as exprs (without their signature)
assert type_def_str.strip() in mod_str
assert main_def_str.strip() in mod_str
def test_null_attribute():
x = relay.var("x")
y = relay.var("y")
z = relay.Function([x], y)
z = z.with_attr("TestAttribute", None)
txt = astext(z)
assert "TestAttribute=None" in txt
def test_span():
x = relay.var("x", shape=(3, 2))
y = relay.var("y")
one = relay.const(10e10, dtype="float32")
z = relay.add(x, one)
z = relay.Call(
z.op, z.args, z.attrs, z.type_args, relay.Span(relay.SourceName("Add0"), 0, 0, 0, 0)
)
z = relay.add(z, z)
z = relay.Call(
z.op, z.args, z.attrs, z.type_args, relay.Span(relay.SourceName("Add1"), 0, 0, 0, 0)
)
f = relay.Function([x, y], z)
txt = astext(f)
assert "Add0" in txt
assert "Add1" in txt
def test_optional_info():
c = relay.const(1)
call = relay.add(c, c)
m = tvm.IRModule.from_expr(call)
m = relay.transform.InferType()(m)
txt = astext(m)
assert txt.count("/* ty=int32 */") == 3
def test_slash_in_identifier():
x = relay.var("base/x")
y = relay.var("base/y")
z = x + y
txt = astext(z)
assert "base/x" in txt
assert "base/y" in txt
if __name__ == "__main__":
tvm.testing.main()
| 8,213 | 25.326923 | 99 | py |
tvm | tvm-main/tests/python/relay/test_cpp_build_module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
from tvm import relay, runtime
from tvm.contrib.nvcc import have_fp16
import tvm.testing
def test_basic_build():
tgt = "llvm"
dev = tvm.cpu()
# func
a = relay.var("a", dtype="float32", shape=(16, 8))
b = relay.var("b", dtype="float32", shape=(8, 8))
c = relay.var("c", dtype="float32", shape=(16, 8))
x = relay.nn.dense(a, b)
y = relay.nn.relu(x)
z = y + c
func = relay.Function([a, b, c], z)
A = tvm.nd.array(np.random.uniform(-1, 1, (16, 8)).astype("float32"), device=dev)
B = tvm.nd.array(np.random.uniform(-1, 1, (8, 8)).astype("float32"), device=dev)
C = tvm.nd.array(np.random.uniform(-1, 1, (16, 8)).astype("float32"), device=dev)
params = {"b": B, "c": C}
# build
targets = {tvm.tir.IntImm("int32", dev.device_type): tgt}
mod = tvm.IRModule.from_expr(func)
func_in_mod = mod["main"]
assert mod["main"] == func_in_mod, "cannot compare function to itself"
lib = relay.build(mod, targets, "llvm", params=params)
assert mod["main"] == func_in_mod, "relay.build changed module in-place"
# test
rt = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
rt.set_input("a", A)
rt.run()
out = rt.get_output(0)
np.testing.assert_allclose(
out.numpy(),
np.maximum(np.dot(A.numpy(), B.numpy().T), 0) + C.numpy(),
atol=1e-5,
rtol=1e-5,
)
@tvm.testing.requires_cuda
def test_fp16_build():
dtype = "float16"
dev = tvm.cuda(0)
if dtype == "float16" and not have_fp16(dev.compute_version):
print("skip because gpu does not support fp16")
return
x = relay.var("x", dtype=dtype, shape=(4, 4))
y = relay.var("y", dtype=dtype, shape=(4, 4))
z = x + y
func = relay.Function([x, y], z)
X = tvm.nd.array(np.random.uniform(-1, 1, (4, 4)).astype(dtype), device=dev)
Y = tvm.nd.array(np.random.uniform(-1, 1, (4, 4)).astype(dtype), device=dev)
params = {
"x": X,
"y": Y,
}
# build
g_json, mmod, params = relay.build(func, "cuda", params=params)
# test
rt = tvm.contrib.graph_executor.create(g_json, mmod, dev)
rt.load_params(runtime.save_param_dict(params))
rt.run()
out = rt.get_output(0)
np.testing.assert_allclose(out.numpy(), X.numpy() + Y.numpy(), atol=1e-5, rtol=1e-5)
@tvm.testing.requires_llvm
def test_bf16_build():
data = relay.var("data", shape=(1, 3, 224, 224), dtype="float32")
weight = relay.var("weight", shape=(64, 3, 7, 7), dtype="float32")
bn_gamma = relay.var("gamma", shape=(64,), dtype="float32")
bn_beta = relay.var("beta", shape=(64,), dtype="float32")
bn_mean = relay.var("mean", shape=(64,), dtype="float32")
bn_var = relay.var("var", shape=(64,), dtype="float32")
params = {
"weight": np.random.uniform(-1, 1, size=(64, 3, 7, 7)).astype("float32"),
"gamma": np.random.uniform(-1, 1, size=(64,)).astype("float32"),
"beta": np.random.uniform(-1, 1, size=(64,)).astype("float32"),
"mean": np.random.uniform(-1, 1, size=(64,)).astype("float32"),
"var": np.random.uniform(-1, 1, size=(64,)).astype("float32"),
}
conv_bf16 = relay.nn.conv2d(
relay.cast(data, "bfloat16"),
relay.cast(weight, "bfloat16"),
strides=(2, 2),
padding=(3, 3, 3, 3),
channels=64,
kernel_size=(7, 7),
out_dtype="bfloat16",
)
bn_bf16 = relay.nn.batch_norm(
conv_bf16,
relay.cast(bn_gamma, "bfloat16"),
relay.cast(bn_beta, "bfloat16"),
relay.cast(bn_mean, "bfloat16"),
relay.cast(bn_var, "bfloat16"),
)
relu_bf16 = relay.nn.relu(bn_bf16[0])
maxpool_bf16 = relay.nn.max_pool2d(relu_bf16, pool_size=(2, 2), strides=(2, 2))
avgpool_bf16 = relay.nn.avg_pool2d(maxpool_bf16, pool_size=(2, 2), strides=(2, 2))
flattened_bf16 = relay.nn.batch_flatten(avgpool_bf16)
softmax_bf16 = relay.nn.softmax(flattened_bf16)
mod_bf16 = tvm.IRModule.from_expr(softmax_bf16)
with tvm.transform.PassContext(opt_level=3):
relay.build(mod_bf16, target="llvm", params=params)
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_fp16_conversion(target, dev):
if target == "cuda" and not have_fp16(dev.compute_version):
print("skip because gpu does not support fp16")
return
n = 10
for (src, dst) in [("float32", "float16"), ("float16", "float32")]:
x = relay.var("x", relay.TensorType((n,), src))
y = x.astype(dst)
func = relay.Function([x], y)
# init input
X = tvm.nd.array(n * np.random.randn(n).astype(src) - n / 2)
# build
with tvm.transform.PassContext(opt_level=1):
g_json, mmod, params = relay.build(tvm.IRModule.from_expr(func), target)
# test
rt = tvm.contrib.graph_executor.create(g_json, mmod, dev)
rt.set_input("x", X)
rt.run()
out = rt.get_output(0)
np.testing.assert_allclose(out.numpy(), X.numpy().astype(dst), atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
test_basic_build()
test_fp16_build()
test_fp16_conversion()
test_bf16_build()
| 6,003 | 34.111111 | 92 | py |
tvm | tvm-main/tests/python/relay/test_op_level4.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import numpy.random
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import relay, te
from tvm.relay import transform
from tvm.relay.testing import run_infer_type
executor_kind = tvm.testing.parameter("graph", "vm")
@tvm.testing.uses_gpu
def test_binary_op():
def check_binary_op(opfunc, ref):
n = te.size_var("n")
t1 = relay.TensorType((5, n, 5))
t2 = relay.TensorType((n, 1))
x = relay.var("x", t1)
y = relay.var("y", t2)
z = opfunc(x, y)
# test printer
assert ("{}(%x, %y)".format(z.op.name)) in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == t1
if ref is not None:
t1 = relay.TensorType((5, 10, 5))
t2 = relay.TensorType((5, 10, 5))
x = relay.var("x", t1)
y = relay.var("y", t2)
z = opfunc(x, y)
x_data = np.random.rand(5, 10, 5).astype(t1.dtype)
y_data = np.random.rand(5, 10, 5).astype(t2.dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
for opfunc, ref in [(relay.power, np.power)]:
check_binary_op(opfunc, ref)
@tvm.testing.uses_gpu
def test_cmp_type():
for op, ref in (
(relay.greater, np.greater),
(relay.greater_equal, np.greater_equal),
(relay.less, np.less),
(relay.less_equal, np.less_equal),
(relay.equal, np.equal),
(relay.not_equal, np.not_equal),
):
x = relay.var("x", relay.TensorType((10, 4), "float32"))
y = relay.var("y", relay.TensorType((5, 10, 1), "float32"))
z = op(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((5, 10, 4), "bool")
if ref is not None:
x_shape = (10, 4)
y_shape = (5, 10, 1)
t1 = relay.TensorType(x_shape)
t2 = relay.TensorType(y_shape)
x = relay.var("x", t1)
y = relay.var("y", t2)
z = op(x, y)
x_data = np.random.rand(*x_shape).astype(t1.dtype)
y_data = np.random.rand(*y_shape).astype(t2.dtype)
ref_res = ref(x_data, y_data)
func = relay.Function([x, y], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
@tvm.testing.uses_gpu
def test_binary_int_broadcast_1():
for op, ref in [(relay.right_shift, np.right_shift), (relay.left_shift, np.left_shift)]:
x = relay.var("x", relay.TensorType((10, 4), "int32"))
y = relay.var("y", relay.TensorType((5, 10, 1), "int32"))
z = op(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((5, 10, 4), "int32")
if ref is not None:
x_shape = (10, 4)
y_shape = (5, 10, 1)
t1 = relay.TensorType(x_shape, "int32")
t2 = relay.TensorType(y_shape, "int32")
x_data = np.random.randint(1, 10000, size=(x_shape)).astype(t1.dtype)
y_data = np.random.randint(1, 31, size=(y_shape)).astype(t2.dtype)
func = relay.Function([x, y], z)
ref_res = ref(x_data, y_data)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
@tvm.testing.uses_gpu
def test_binary_int_broadcast_2():
for op, ref in [(relay.maximum, np.maximum), (relay.minimum, np.minimum), (relay.mod, np.mod)]:
x = relay.var("x", relay.TensorType((10, 4), "int32"))
y = relay.var("y", relay.TensorType((5, 10, 1), "int32"))
z = op(x, y)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((5, 10, 4), "int32")
if ref is not None:
x_shape = (10, 4)
y_shape = (5, 10, 1)
t1 = relay.TensorType(x_shape, "int32")
t2 = relay.TensorType(y_shape, "int32")
x_data = np.random.randint(1, 10000, size=(x_shape)).astype(t1.dtype)
y_data = np.random.randint(1, 10000, size=(y_shape)).astype(t2.dtype)
func = relay.Function([x, y], z)
ref_res = ref(x_data, y_data)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
@tvm.testing.uses_gpu
def test_where(executor_kind):
def run(func, inputs, ref_res):
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
*inputs
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
def verify(x_np, y_np, cond_np):
ref_res = np.where(cond_np, x_np, y_np)
args = []
args_np = []
vs = []
cond = relay.var("cond", relay.TensorType(cond_np.shape, "bool"))
args.append(cond)
args_np.append(cond_np)
for v_name, v_np in [("x", x_np), ("y", y_np)]:
if len(v_np.shape) == 0:
v = relay.const(v_np.item())
else:
v = relay.var(v_name, relay.TensorType(v_np.shape, dtype))
args.append(v)
args_np.append(v_np)
vs.append(v)
z = relay.where(cond, vs[0], vs[1])
func = relay.Function(args, z)
run(func, args_np, ref_res)
dtype = "float32"
x_np = np.random.uniform(size=(3, 4)).astype(dtype)
y_np = np.random.uniform(size=(3, 4)).astype(dtype)
cond_np = np.random.uniform(low=-1, high=1, size=(3, 4)) > 0
verify(x_np, y_np, cond_np)
x_np = np.array(1.0, dtype)
y_np = np.array(-1.0, dtype)
cond_np = np.array([1, 0, 1], dtype=bool)
verify(x_np, y_np, cond_np)
x_np = np.arange(10).astype(dtype)
y_np = 10 * x_np
cond_np = x_np < 5
verify(x_np, y_np, cond_np)
x_np = np.array([[1, 2], [3, 4]], dtype)
y_np = np.array([[5, 6], [7, 8]], dtype)
cond_np = np.array([[1], [0]], dtype=bool)
verify(x_np, y_np, cond_np)
verify(x_np, y_np, cond_np.T)
x_np = np.random.randn(1, 12, 8, 8).astype(dtype)
y_np = np.array(-1.0, dtype)
cond_np = np.random.randn(1, 1, 8, 8) > 0
verify(x_np, y_np, cond_np)
x_np, y_np = np.ogrid[:3, :4]
cond_np = np.where(x_np < y_np, x_np, 10 + y_np).astype(bool)
verify(x_np.astype(dtype), y_np.astype(dtype), cond_np)
def _with_keepdims(func):
def _wrapper(data, axis=None, keepdims=False):
if not keepdims:
return func(data, axis=axis)
else:
if axis is not None:
axis = axis if isinstance(axis, int) else axis[0]
out_shape = list(data.shape)
out_shape[axis] = 1
else:
out_shape = [1 for _ in range(len(data.shape))]
return func(data, axis=axis).reshape(out_shape)
return _wrapper
def _np_log_sum_exp(x, axis, keepdims=False):
max_x = np.max(x, axis=axis, keepdims=True)
x = np.log(np.sum(np.exp(x - max_x), axis=axis, keepdims=True))
x = x + max_x
if not keepdims:
x = np.squeeze(x, axis=axis)
return x
def _unbiased_relay_wrapper(f):
def _unbiased_func(x, axis=None, keepdims=False, exclude=False):
return f(x, axis=axis, keepdims=keepdims, exclude=exclude, unbiased=True)
return _unbiased_func
def _unbiased_np_wrapper(f):
def _unbiased_func(a, axis=None, dtype=None, keepdims=None):
return f(a, axis=axis, dtype=dtype, ddof=1, keepdims=keepdims)
return _unbiased_func
class TestReduceFunctions:
funcs = {
"sum": (relay.sum, np.sum),
"max": (relay.max, np.max),
"min": (relay.min, np.min),
"mean": (relay.mean, np.mean),
"var": (relay.variance, np.var),
"unbiased_var": (_unbiased_relay_wrapper(relay.variance), _unbiased_np_wrapper(np.var)),
"std": (relay.std, np.std),
"unbiased_std": (_unbiased_relay_wrapper(relay.std), _unbiased_np_wrapper(np.std)),
"prod": (relay.prod, np.prod),
"all": (relay.all, np.all),
"any": (relay.any, np.any),
"logsumexp": (relay.logsumexp, _np_log_sum_exp),
"argmin": (relay.argmin, _with_keepdims(np.argmin)),
"argmax": (relay.argmax, _with_keepdims(np.argmax)),
}
relay_func, ref_func = tvm.testing.parameters(
*funcs.values(),
ids=list(funcs),
)
d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4")
data, axis, keepdims, exclude, output = tvm.testing.parameters(
((d1, d2, d3, d4), None, False, False, ()),
((d1, d2, d3, d4), 2, True, False, (d1, d2, 1, d4)),
((d1, d2, d3, d4), 0, True, False, (1, d2, d3, d4)),
((d1, d2, d3), 1, True, False, (d1, 1, d3)),
((d1, d2, d3), 0, True, False, (1, d2, d3)),
((d1, d2, d3), None, True, False, (1, 1, 1)),
((d1, d2, d3), (0, 1), True, False, (1, 1, d3)),
((2, 3, 4), 1, True, False, (2, 1, 4)),
((2, 3, 4), (1,), True, False, (2, 1, 4)),
((2, 3, 4), -1, True, False, (2, 3, 1)),
((2, 3, 4), (0, 1, 2), False, False, ()),
((4, 4, 3), None, False, False, ()),
((4, 4, 3), (0, 2), False, False, (4,)),
((128, 24, 128), (0, 1), False, False, (128,)),
((128, 24, 128), (0, 2), False, False, (24,)),
((128, 24, 128), (0, 1), True, False, (1, 1, 128)),
((128, 24, 128), (0, 2), True, False, (1, 24, 1)),
)
def test_reduce(
self,
target,
dev,
relay_func,
ref_func,
executor_kind,
data,
axis,
keepdims,
exclude,
output,
):
dtype = "bool" if ref_func in [np.all, np.any] else "float32"
out_type = "int32" if relay_func in [relay.argmin, relay.argmax] else dtype
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and dtype == "bool":
pytest.xfail("Known failing test on vulkan runtime")
x = relay.var("x", relay.TensorType(data, dtype))
if relay_func == relay.logsumexp:
z = relay_func(x, axis, keepdims)
else:
z = relay_func(x, axis, keepdims, exclude)
zz = run_infer_type(z)
if axis:
assert "axis=" in z.astext()
if keepdims:
assert "keepdims=" in z.astext()
if exclude:
assert "exclude=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(output, out_type)
if all(isinstance(v, tvm.tir.Var) == 1 for v in data):
return
func = relay.Function([x], z)
x_data = (
np.random.choice([True, False], size=data)
if ref_func in [np.all]
else np.random.uniform(size=data).astype(dtype)
)
if ref_func in [np.sum]:
ref_res = ref_func(x_data + 0, axis=axis, dtype=dtype, keepdims=keepdims)
elif ref_func in [np.max, np.min, np.mean, np.prod]:
ref_res = ref_func(x_data + 0, axis=axis, keepdims=keepdims)
else: # argmin/argmax
if axis and not isinstance(axis, int) and len(axis) > 1:
return
ref_res = ref_func(x_data + 0, axis=axis, keepdims=keepdims)
op_res1 = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_sum_with_bool_input():
def verify(dshape, axis, keepdims, exclude):
x = relay.var("x", relay.TensorType(dshape, "bool"))
y = relay.sum(x, axis, keepdims, exclude)
func = relay.Function([x], y)
func = run_infer_type(func)
text = func.astext()
assert "sum" in text
data = np.random.choice([False, True], size=dshape)
if exclude and axis is not None:
axis = tuple(set(range(len(dshape))) - set(axis))
ref_res = np.sum(data, axis, keepdims=keepdims, dtype="bool")
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
verify((3, 5, 7, 9), None, False, False)
verify((3, 5, 7, 9), None, True, False)
verify((3, 5, 7, 9), (0,), False, False)
verify((3, 5, 7, 9), (1,), True, False)
verify((3, 5, 7, 9), (2, 3), False, True)
verify((3, 5, 7, 9), (0, 2), True, True)
verify((3, 5, 7, 9), (0, 1, 2, 3), False, False)
verify((3, 5, 7, 9), (0, 1, 2, 3), False, True)
verify((3, 5, 7, 9), (0, 1, 2, 3), True, False)
verify((3, 5, 7, 9), (0, 1, 2, 3), True, True)
@tvm.testing.uses_gpu
def test_argmin_argmax_get_last_elements():
def get_test_case(shape, gt_func, test_argmin=False):
total_ele = np.product(shape)
arr = np.zeros(total_ele)
target_value = -1 if test_argmin else 1
arr[: total_ele // 3] = target_value
np.random.shuffle(arr)
arr = arr.reshape(shape)
ans = gt_func(np.flip(arr))
return arr, len(arr) - ans - 1
funcs_and_gt_funcs = [(relay.argmax, np.argmax), (relay.argmin, np.argmin)]
lengths = [5, 10, 15]
for func, gt_func in funcs_and_gt_funcs:
for shape in lengths:
x_in = relay.var("x_in", shape=[shape])
output = func(x_in, select_last_index=True)
arr, ans = get_test_case(shape, gt_func, test_argmin=func == relay.argmin)
mod = tvm.IRModule.from_expr(output)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(
"graph", mod=mod, device=dev, target=target
).evaluate()(arr)
assert op_res.numpy().item() == ans
def verify_mean_var_std(executor_kind, funcs, shape, axis, keepdims, dtype="float32"):
test_func = funcs[0]
ref_func = funcs[1]
x = relay.var("x", relay.TensorType(shape, dtype))
z = test_func(x, axis, keepdims)
func = relay.Function([x], z.astuple())
x_data = np.random.uniform(size=shape).astype("float32")
ref_mean = np.mean(x_data, axis=axis, dtype="float32", keepdims=keepdims).astype(dtype)
ref_res = ref_func(x_data, axis=axis, dtype="float32", keepdims=keepdims).astype(dtype)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data.astype(dtype)
)
# FP16 is always a little less accurate.
if dtype == "float16":
rtol, atol = (1e-2, 1e-2)
else:
rtol, atol = (1e-5, 1e-5)
tvm.testing.assert_allclose(op_res[0].numpy(), ref_mean, rtol=rtol, atol=atol)
tvm.testing.assert_allclose(op_res[1].numpy(), ref_res, rtol=rtol, atol=atol)
@tvm.testing.uses_gpu
def test_mean_var_std(executor_kind):
for func in [[relay.mean_variance, np.var], [relay.mean_std, np.std]]:
verify_mean_var_std(executor_kind, func, (2, 3, 4), 1, True)
verify_mean_var_std(executor_kind, func, (2, 3, 4), (1,), True)
verify_mean_var_std(executor_kind, func, (2, 3, 4), -1, True)
verify_mean_var_std(executor_kind, func, (2, 3, 4), (0, 1, 2), False)
verify_mean_var_std(executor_kind, func, (4, 4, 3), None, False)
verify_mean_var_std(executor_kind, func, (4, 4, 3), (0, 2), False)
verify_mean_var_std(executor_kind, func, (128, 24, 128), (0, 1), False)
verify_mean_var_std(executor_kind, func, (128, 24, 128), (0, 2), False)
verify_mean_var_std(executor_kind, func, (128, 24, 128), (0, 1), True)
verify_mean_var_std(executor_kind, func, (128, 24, 128), (0, 2), True)
# Test FP16 reduction with large indices.
verify_mean_var_std(executor_kind, func, (128, 24, 128), (0, 2), True, "float16")
verify_mean_var_std(executor_kind, func, (128, 24, 128), None, False, "float16")
@tvm.testing.uses_gpu
def test_strided_slice():
def verify(
dshape,
begin,
end,
strides,
output,
axes=None,
slice_mode="end",
test_ref=True,
dtype="int32",
unknown_dim_value=10,
):
x = relay.var("x", relay.TensorType(dshape, "float32"))
ndim = len(dshape)
begin = begin if begin else [0] * ndim
end = end if end else list(dshape)
# Resolve unknown dimensions to create test case:
dshape = list(dshape)
for i, d in enumerate(dshape):
if not isinstance(d, int):
dshape[i] = unknown_dim_value
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.strided_slice_python(
x_data,
begin,
end,
strides,
slice_mode,
axes=axes,
)
if strides:
z = relay.strided_slice(
x, begin=begin, end=end, strides=strides, axes=axes, slice_mode=slice_mode
)
else:
z = relay.strided_slice(x, begin=begin, end=end, axes=axes, slice_mode=slice_mode)
func = relay.Function([x], z)
func = run_infer_type(func)
text = func.astext()
assert "begin=" in text
assert "end=" in text
if output:
assert func.body.checked_type == relay.ty.TensorType(output, "float32")
if not test_ref:
return
for target, dev in tvm.testing.enabled_targets():
# Need VM to run tests with non-static dimensions
op_res = relay.create_executor("vm", device=dev, target=target).evaluate(func)(x_data)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
verify((1, 3, 10, 10), [0, 0, 0, 0], [-1, 3, 10, 10], [1], (0, 3, 10, 10), dtype="int64")
verify(
(1, 224, 224, 3),
[0, 20, 20, 0],
[1, 140, 140, 3],
[1, 1, 1, 1],
(1, 120, 120, 3),
dtype="int64",
)
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], (1, 3, 3), dtype="int16")
verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4], None, (2, 3, 3))
verify((3, 4, 3), [1, 1], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], (1, 4, 3))
verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1], (1, 2, 3))
# Test backwards slicing.
verify((3, 4, 3), [-1, -1, -1], [-5, -5, -5], [-1, -1, -1], (3, 4, 3))
# Test slicing with overlarge indices.
verify((3, 4, 3), [0, 0, 0], [np.iinfo(np.int32).max] * 3, [1, 1, 1], (3, 4, 3))
# Test slice mode.
verify(
(3, 4, 3), [1, 0, 0], [3, -1, 3], [1, 1, 1], (2, 4, 3), slice_mode="size", test_ref=False
)
verify((3, 4, 3), [1, 0, 0], [-1, 2, 3], [1, 1, 1], (2, 2, 3), slice_mode="size", test_ref=True)
verify((3, 4, 3), [1], [4], None, None, axes=[1])
# Test Any dims for simple cases
verify((3, relay.Any()), [0], [1], [1], None, axes=[1], unknown_dim_value=10)
verify((relay.Any(), 3), [0], [1], [1], None, axes=[1], unknown_dim_value=10)
verify(
(relay.Any(), relay.Any(), relay.Any()),
[0, 1, 2],
[5, 5, 5],
[1, 2, 1],
None,
unknown_dim_value=10,
)
@tvm.testing.uses_gpu
def test_dyn_strided_slice():
def verify(
dshape,
begin,
end,
strides,
output,
axes=None,
ishape=None,
slice_mode="end",
test_ref=True,
dtype="int32",
):
ndim = len(dshape)
begin = begin if begin else [0] * ndim
end = end if end else list(dshape)
# target numpy result
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.strided_slice_python(
x_data, begin, end, strides, slice_mode, axes=axes
)
if ishape is None:
ishape = (relay.Any(),) * ndim
x = relay.var("x", relay.TensorType(ishape, "float32"))
if strides:
z = relay.strided_slice(
x, begin=begin, end=end, strides=strides, axes=axes, slice_mode=slice_mode
)
else:
z = relay.strided_slice(x, begin=begin, end=end, axes=axes, slice_mode=slice_mode)
func = relay.Function([x], z)
func = run_infer_type(func)
text = func.astext()
assert "begin=" in text
assert "end=" in text
if not test_ref:
return
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
verify(
(1, 224, 224, 3),
[0, 20, 20, 0],
[1, 140, 140, 3],
[1, 1, 1, 1],
(1, 120, 120, 3),
dtype="int64",
)
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], (1, 3, 3), dtype="int16")
verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4, 4], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], (1, 4, 3))
verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1], (1, 2, 3))
verify(
(3, 4, 3), [1, 0, 0], [3, -1, 3], [1, 1, 1], (2, 4, 3), slice_mode="size", test_ref=False
)
verify((3, 4, 3), [1, 0, 0], [-1, 2, 3], [1, 1, 1], (2, 2, 3), slice_mode="size", test_ref=True)
verify(
(3, 4, 3, 2),
[1, 0],
[3, 1],
[1, 1],
None,
axes=[1, 3],
ishape=(relay.Any(), 4, relay.Any(), 2),
)
@tvm.testing.uses_gpu
def test_strided_set():
def verify(dshape, begin, end, strides, vshape, test_ref=True):
x = relay.var("x", relay.TensorType(dshape, "float32"))
v = relay.var("v", relay.TensorType(vshape, "float32"))
begin_c = relay.const(begin, dtype="int32")
end_c = relay.const(end, dtype="int32")
if strides:
strides_c = relay.const(strides, dtype="int32")
z = relay.strided_set(x, v, begin=begin_c, end=end_c, strides=strides_c)
else:
z = relay.strided_set(x, v, begin=begin_c, end=end_c)
func = relay.Function([x, v], z)
func = run_infer_type(func)
text = func.astext()
assert "strided_set" in text
assert func.body.checked_type == relay.ty.TensorType(dshape, "float32")
if not test_ref:
return
x_data = np.random.uniform(size=dshape).astype("float32")
v_data = np.random.uniform(size=vshape).astype("float32")
ref_res = tvm.topi.testing.strided_set_python(x_data, v_data, begin, end, strides)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
x_data, v_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
verify((3, 4, 16), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))
verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2], (3, 1, 2))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], (1, 3, 3))
verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], (1, 4, 3))
verify((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2], (1, 2, 2))
verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1], (1, 2, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None, (2, 3, 3))
verify((3, 4, 3), [1, 1, 0], [4, 4], None, (2, 3, 3))
verify((3, 4, 3), [1, 1], [4, 4, 3], None, (2, 3, 3))
if __name__ == "__main__":
tvm.testing.main()
| 25,838 | 35.807692 | 100 | py |
tvm | tvm-main/tests/python/relay/test_pass_qnn_legalize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test legalize pass"""
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm.contrib import graph_executor
from tvm.relay import transform, analysis
from tvm.relay.testing.temp_op_attr import TempOpAttr
from tvm.relay.testing import run_infer_type
def alpha_equal(x, y):
"""
Wrapper around alpha equality which ensures that
the hash function respects equality.
"""
x = x["main"]
y = y["main"]
return tvm.ir.structural_equal(x, y) and tvm.ir.structural_hash(x) == tvm.ir.structural_hash(y)
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def test_qnn_legalize():
"""Test directly replacing an operator with a new one"""
def before():
x = relay.var("x", shape=(1, 64, 56, 56), dtype="int8")
y = relay.qnn.op.requantize(
x,
input_scale=relay.const(1, "float32"),
input_zero_point=relay.const(0, "int32"),
output_scale=relay.const(1, "float32"),
output_zero_point=relay.const(0, "int32"),
out_dtype="int8",
)
y = relay.Function([x], y)
return y
def legalize_qnn_requantize(attrs, inputs, types):
data = inputs[0]
data = relay.add(relay.const(0, "int8"), data)
y = relay.qnn.op.requantize(
data,
input_scale=relay.const(1, "float32"),
input_zero_point=relay.const(0, "int32"),
output_scale=relay.const(1, "float32"),
output_zero_point=relay.const(0, "int32"),
out_dtype="int8",
)
return y
def expected():
x = relay.var("x", shape=(1, 64, 56, 56), dtype="int8")
y = relay.add(relay.const(0, "int8"), x)
z = relay.qnn.op.requantize(
y,
input_scale=relay.const(1, "float32"),
input_zero_point=relay.const(0, "int32"),
output_scale=relay.const(1, "float32"),
output_zero_point=relay.const(0, "int32"),
out_dtype="int8",
)
z = relay.Function([x], z)
return z
a = before()
with TempOpAttr("qnn.requantize", "FTVMQnnLegalize", legalize_qnn_requantize):
# Check that Relay Legalize does not change the graph.
a = run_opt_pass(a, relay.transform.Legalize())
b = run_opt_pass(before(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
# Check that QNN Legalize modifies the graph.
a = run_opt_pass(a, relay.qnn.transform.Legalize())
b = run_opt_pass(expected(), transform.InferType())
assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a)
def test_qnn_legalize_qnn_conv2d():
def _get_mod(data_dtype, kernel_dtype):
data_shape = (1, 64, 256, 256)
kernel_shape = (128, 64, 3, 3)
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
func = relay.qnn.op.conv2d(
data,
kernel,
input_zero_point=relay.const(1, "int32"),
kernel_zero_point=relay.const(1, "int32"),
input_scale=relay.const(1.0, "float32"),
kernel_scale=relay.const(1.0, "float32"),
kernel_size=(3, 3),
channels=kernel_shape[0],
strides=(1, 1),
dilation=(1, 1),
out_dtype="int32",
data_layout="NCHW",
kernel_layout="OIHW",
)
mod = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(mod)
return mod
# Check uint8 x uint8 and int8 x int8 transformation
for dtype in ("uint8", "int8"):
mod = _get_mod(dtype, dtype)
#############################################################
# Check transformations for platforms with fast Int8 support.
#############################################################
# Check that Intel AVX512 (with or w/o VNNI) gets picked up.
for target in ["llvm -mcpu=skylake-avx512", "llvm -mcpu=cascadelake"]:
with tvm.target.Target(target):
mod = relay.transform.InferType()(mod)
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn.conv2d" in legalized_mod.astext()
# Since same dtype, there should not be any transformation
with tvm.target.Target(
"llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+v8.2a,+dotprod"
):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert tvm.ir.structural_equal(mod, legalized_mod)
################################################################
# Check transformations for platforms without fast Int8 support.
################################################################
# Older Intel versions.
with tvm.target.Target("llvm"):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn" not in legalized_mod.astext()
# Older ARM vesions.
with tvm.target.Target("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu"):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn" not in legalized_mod.astext()
# Check uint8 x int8 transformation
mod = _get_mod("uint8", "int8")
#############################################################
# Check transformations for platforms with fast Int8 support.
#############################################################
# Check no transformation for Intel AVX512.
with tvm.target.Target("llvm -mcpu=skylake-avx512"):
mod = relay.transform.InferType()(mod)
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert tvm.ir.structural_equal(mod, legalized_mod)
# ARM - so check that transformation has happened.
with tvm.target.Target(
"llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+v8.2a,+dotprod"
):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn.conv2d" in legalized_mod.astext()
################################################################
# Check transformations for platforms without fast Int8 support.
################################################################
# Older Intel versions.
with tvm.target.Target("llvm"):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn" not in legalized_mod.astext()
# Older ARM vesions.
with tvm.target.Target("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu"):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn" not in legalized_mod.astext()
###########################################
# Check transformations for CUDA platforms.
###########################################
with tvm.target.Target("cuda"):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn" in legalized_mod.astext()
def test_qnn_legalize_qnn_dense():
def _get_mod(data_dtype, kernel_dtype):
data_shape = (10, 3)
kernel_shape = (20, 3)
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
func = relay.qnn.op.dense(
data,
kernel,
input_zero_point=relay.const(1, "int32"),
kernel_zero_point=relay.const(1, "int32"),
input_scale=relay.const(1, "float32"),
kernel_scale=relay.const(1, "float32"),
units=kernel_shape[0],
out_dtype="int32",
)
mod = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(mod)
return mod
# Check uint8 x uint8 and int8 x int8 transformation
for dtype in ("uint8", "int8"):
mod = _get_mod(dtype, dtype)
#############################################################
# Check transformations for platforms with fast Int8 support.
#############################################################
# Check that Intel AVX512 (with or w/o VNNI) gets picked up.
for target in ["llvm -mcpu=skylake-avx512", "llvm -mcpu=cascadelake"]:
with tvm.target.Target(target):
mod = relay.transform.InferType()(mod)
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn.dense" in legalized_mod.astext()
# Since same dtype, there should not be any transformation
with tvm.target.Target(
"llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+v8.2a,+dotprod"
):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert tvm.ir.structural_equal(mod, legalized_mod)
################################################################
# Check transformations for platforms without fast Int8 support.
################################################################
# Older Intel versions.
with tvm.target.Target("llvm"):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn" not in legalized_mod.astext()
# Older ARM vesions.
with tvm.target.Target("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu"):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn" not in legalized_mod.astext()
# Check uint8 x int8 transformation
mod = _get_mod("uint8", "int8")
#############################################################
# Check transformations for platforms with fast Int8 support.
#############################################################
# Check no transformation for Intel AVX512.
with tvm.target.Target("llvm -mcpu=skylake-avx512"):
mod = relay.transform.InferType()(mod)
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert tvm.ir.structural_equal(mod, legalized_mod)
# ARM - so check that transformation has happened.
with tvm.target.Target(
"llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+v8.2a,+dotprod"
):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn.dense" in legalized_mod.astext()
################################################################
# Check transformations for platforms without fast Int8 support.
################################################################
# Older Intel versions.
with tvm.target.Target("llvm"):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn" not in legalized_mod.astext()
# Older ARM vesions.
with tvm.target.Target("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu"):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn" not in legalized_mod.astext()
###########################################
# Check transformations for CUDA platforms.
###########################################
with tvm.target.Target("cuda"):
legalized_mod = relay.qnn.transform.Legalize()(mod)
assert "cast" in legalized_mod.astext() and "qnn" in legalized_mod.astext()
def test_qnn_legalize_qnn_conv2d_non_scalar_qnn_params():
"""
Test QNN legalization for qnn.conv2d op for Hexagon target when kernel zero point and kernel
scale are vectors of scalars.
"""
data_shape = (1, 29, 16, 16)
weights_shape = (60, 29, 3, 3)
O, I = weights_shape[0], weights_shape[1]
data = relay.var("data", shape=data_shape, dtype="uint8")
weights = relay.var("weight", shape=weights_shape, dtype="int8")
data_zp = relay.const(2)
data_scale = relay.const(0.15)
def before():
op = relay.qnn.op.conv2d(
data,
weights,
input_zero_point=data_zp,
kernel_zero_point=relay.const([1] * O),
input_scale=data_scale,
kernel_scale=relay.const([0.17] * O),
padding=[0, 0, 0, 0],
channels=O,
kernel_size=[3, 3],
)
return op
def expected():
in_diff = 3
out_diff = 4
op0 = relay.nn.pad(weights, pad_width=[[0, 0], [0, in_diff], [0, 0], [0, 0]])
op1 = relay.nn.pad(data, pad_width=[[0, 0], [0, in_diff], [0, 0], [0, 0]])
op2 = relay.nn.pad(op0, pad_width=[[0, out_diff], [0, 0], [0, 0], [0, 0]])
op3 = relay.qnn.op.conv2d(
op1,
op2,
input_zero_point=data_zp,
kernel_zero_point=relay.const([1] * O + [0] * out_diff),
input_scale=data_scale,
kernel_scale=relay.const([0.17] * O + [1.0] * out_diff),
padding=[0, 0, 0, 0],
channels=(O + out_diff),
kernel_size=[3, 3],
)
op4 = relay.strided_slice(op3, begin=[0, 0, 0, 0], end=[1, 60, 14, 14], strides=[1])
return op4
target = tvm.target.hexagon("v68")
with tvm.target.Target(target):
a = run_opt_pass(before(), relay.qnn.transform.Legalize())
b = run_infer_type(expected())
tvm.ir.assert_structural_equal(a, b)
def test_qnn_legalize_qnn_dense_non_scalar_qnn_params():
"""
Test QNN legalization for qnn.dense op for Hexagon target when kernel zero point and kernel
scale are vectors of scalars.
"""
data_shape = (4, 16)
weights_shape = (58, 16)
N = weights_shape[0]
data = relay.var("data", shape=data_shape, dtype="uint8")
weights = relay.var("weight", shape=weights_shape, dtype="int8")
data_zp = relay.const(2)
data_scale = relay.const(0.15)
def before():
wzp = relay.const([1] * N)
wscale = relay.const([0.17] * N)
op = relay.qnn.op.dense(data, weights, data_zp, wzp, data_scale, wscale, units=N)
return op
def expected():
diff = 6
wzp = relay.const([1] * N + [0] * diff)
wscale = relay.const([0.17] * N + [1.0] * diff)
op0 = relay.nn.pad(weights, pad_width=[[0, diff], [0, 0]])
op1 = relay.qnn.op.dense(data, op0, data_zp, wzp, data_scale, wscale, units=(N + diff))
op2 = relay.strided_slice(op1, begin=[0, 0], end=[data_shape[0], N], strides=[1], axes=None)
return op2
target = tvm.target.hexagon("v68")
with tvm.target.Target(target):
a = run_opt_pass(before(), relay.qnn.transform.Legalize())
b = run_infer_type(expected())
tvm.ir.assert_structural_equal(a, b)
if __name__ == "__main__":
test_qnn_legalize()
test_qnn_legalize_qnn_conv2d()
test_qnn_legalize_qnn_dense()
test_qnn_legalize_qnn_conv2d_non_scalar_qnn_params()
test_qnn_legalize_qnn_dense_non_scalar_qnn_params()
| 16,324 | 40.329114 | 100 | py |
tvm | tvm-main/tests/python/relay/test_op_grad_level4.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import numpy as np
import tvm.testing
from tvm import relay
from tvm.relay.testing import check_grad, _np_randn_from_type
executor_kind = tvm.testing.parameter("debug")
def verify_reduction_grad(executor_kind, red_fn, d_shape, axis=None, keepdims=False, exclude=False):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
fwd_func = relay.Function([data], red_fn(data, axis=axis, keepdims=keepdims, exclude=exclude))
check_grad(fwd_func, executor_kind=executor_kind)
def test_reduction_grad(executor_kind):
def _unbiased_variance(x, axis=None, keepdims=False, exclude=False):
return relay.variance(x, axis=axis, keepdims=keepdims, exclude=exclude, unbiased=True)
for op in (relay.sum, relay.variance, _unbiased_variance, relay.mean):
verify_reduction_grad(executor_kind, op, (4, 2))
verify_reduction_grad(executor_kind, op, (4, 2), axis=-1, keepdims=True)
verify_reduction_grad(executor_kind, op, (4, 2, 1), axis=(1, 2), exclude=True)
verify_reduction_grad(executor_kind, op, (4, 2, 1), axis=1)
def verify_max_grad(executor_kind, d_shape, axis=None, keepdims=False, exclude=False):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
fwd_func = relay.Function(
[data], relay.max(data, axis=axis, keepdims=keepdims, exclude=exclude)
)
check_grad(fwd_func, scale=1e-3, executor_kind=executor_kind)
def test_max_grad(executor_kind):
verify_max_grad(executor_kind, (10, 10), axis=None)
verify_max_grad(executor_kind, (10, 10), axis=-1)
verify_max_grad(executor_kind, (6, 3, 2), axis=(1, 2), keepdims=True)
verify_max_grad(executor_kind, (5, 4, 3), axis=(0, 2), exclude=True)
def test_where_grad(executor_kind):
cond_type = relay.TensorType((2, 3, 4), "int32")
lhs_type = relay.TensorType((1, 3, 4), "float32")
rhs_type = relay.TensorType((2, 1, 4), "float32")
inputs = [
np.random.randint(2, size=cond_type.concrete_shape, dtype=cond_type.dtype),
_np_randn_from_type(lhs_type, scale=1e-5),
_np_randn_from_type(rhs_type, scale=1e-5),
]
cond = relay.var("cond", type_annotation=cond_type)
lhs = relay.var("lhs", type_annotation=lhs_type)
rhs = relay.var("rhs", type_annotation=rhs_type)
fwd_func = relay.Function([cond, lhs, rhs], relay.where(cond, lhs, rhs))
check_grad(fwd_func, inputs=inputs, test_inputs=inputs[1:], executor_kind=executor_kind)
def test_less_equal_grad(executor_kind):
x_type = relay.TensorType((2, 3, 4), "float32")
y_type = relay.TensorType((3, 1), "float32")
# We need to generate inputs far apart to get correct numerical gradients
# (otherwise adding epsilon may change comparison result). The gradient
# should always be zero for both inputs.
inputs = [
np.random.choice([-1, 1], size=x_type.concrete_shape).astype(x_type.dtype),
np.random.choice([-2, 2], size=y_type.concrete_shape).astype(y_type.dtype),
]
x = relay.var("x", type_annotation=x_type)
y = relay.var("y", type_annotation=y_type)
fwd_func = relay.Function([x, y], relay.less_equal(x, y))
check_grad(fwd_func, inputs=inputs, test_inputs=inputs, eps=1e-6, executor_kind=executor_kind)
def test_not_equal_grad(executor_kind):
x_type = relay.TensorType((2, 3, 4), "float32")
y_type = relay.TensorType((3, 1), "float32")
# We need to generate inputs far apart to get correct numerical gradients
# (otherwise adding epsilon may change comparison result). The gradient
# should always be zero for both inputs.
inputs = [
np.random.choice([-1, 1], size=x_type.concrete_shape).astype(x_type.dtype),
np.random.choice([-2, 2], size=y_type.concrete_shape).astype(y_type.dtype),
]
x = relay.var("x", type_annotation=x_type)
y = relay.var("y", type_annotation=y_type)
fwd_func = relay.Function([x, y], relay.not_equal(x, y))
check_grad(fwd_func, inputs=inputs, test_inputs=inputs, eps=1e-6, executor_kind=executor_kind)
def test_strided_slice_grad(executor_kind):
def check(sh, dtype, begin, end, strides, slice_mode):
x = relay.var("x", shape=sh, dtype=dtype)
f = relay.Function(
[x],
relay.strided_slice(x, begin=begin, end=end, strides=strides, slice_mode=slice_mode),
)
check_grad(f, executor_kind=executor_kind)
check((2, 3, 4), "float32", (0, 1, 0), (-1, -1, 1), (1, 1, 1), "size")
check((2, 3, 4), "float32", (0, 1, 0), (2, 3, 1), (1, 1, 1), "end")
# check that strides are properly ignored when using "size" mode
check((2, 3, 4), "float32", (0, 0, 0), (-1, -1, -1), (1, 1, 2), "size")
check((2, 3, 4), "float32", (0, 0, 0), (2, 3, 4), (1, 1, 2), "end")
if __name__ == "__main__":
tvm.testing.main()
| 5,591 | 43.031496 | 100 | py |
tvm | tvm-main/tests/python/relay/test_auto_scheduler_tuning.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test end-to-end network tuning with auto-scheduler"""
import tempfile
import numpy as np
from tvm import auto_scheduler, relay
from tvm.contrib import graph_executor
import tvm.testing
from test_auto_scheduler_task_extraction import get_network
def tune_network(network, target):
# Extract tasks
mod, params = get_network(network)
target = tvm.target.Target(target)
tasks, task_weights = auto_scheduler.extract_tasks(mod["main"], params, target)
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
# Tuning
measure_ctx = auto_scheduler.LocalRPCMeasureContext(timeout=60, device=0)
tuner = auto_scheduler.TaskScheduler(tasks, task_weights, callbacks=[])
tune_option = auto_scheduler.TuningOptions(
num_measure_trials=100,
num_measures_per_round=2,
early_stopping=1,
runner=measure_ctx.runner,
builder=auto_scheduler.LocalBuilder(timeout=60),
measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
)
tuner.tune(tune_option, search_policy="sketch.random")
del measure_ctx
# Compile with the history best
with auto_scheduler.ApplyHistoryBest(log_file):
with tvm.transform.PassContext(
opt_level=3, config={"relay.backend.use_auto_scheduler": True}
):
lib = relay.build(mod, target=target, params=params)
# Also test that multiple log files can be loaded.
with auto_scheduler.ApplyHistoryBest([log_file, log_file]) as best:
assert isinstance(
best, auto_scheduler.dispatcher.ApplyHistoryBest
), "Unable to load multiple log files jointly."
# Confirm iterables can be directly loaded.
loaded_recs = auto_scheduler.dispatcher.load_records(log_file)
with auto_scheduler.ApplyHistoryBest(iter(loaded_recs)) as best:
assert isinstance(
best, auto_scheduler.dispatcher.ApplyHistoryBest
), "Unable to ingest logs from an interator."
# Sample a schedule when missing
with auto_scheduler.ApplyHistoryBestOrSample(None, num_measure=2):
with tvm.transform.PassContext(
opt_level=3, config={"relay.backend.use_auto_scheduler": True}
):
lib2 = relay.build(mod, target=target, params=params)
# Compile without auto-scheduler and any other optimization for correctness check
with tvm.transform.PassContext(opt_level=0):
ref_lib = relay.build(mod, target=target, params=params)
# Check the correctness
def get_output(data, lib):
dev = tvm.cuda()
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input("data", data)
module.run()
return module.get_output(0).numpy()
np.random.seed(0)
if network == "mlp":
data = np.random.uniform(size=(1, 32))
elif network == "winograd-test":
data = np.random.uniform(size=(1, 23, 40, 32))
else:
raise ValueError("Unknown network: " + network)
actual_output1 = get_output(data, lib)
actual_output2 = get_output(data, lib2)
expected_output = get_output(data, ref_lib)
tvm.testing.assert_allclose(actual_output1, expected_output, rtol=1e-4, atol=1e-4)
tvm.testing.assert_allclose(actual_output2, expected_output, rtol=1e-4, atol=1e-4)
@tvm.testing.requires_cuda
def test_tuning_cuda():
tune_network("mlp", "cuda")
tune_network("winograd-test", "cuda")
if __name__ == "__main__":
test_tuning_cuda()
| 4,491 | 38.06087 | 90 | py |
tvm | tvm-main/tests/python/relay/test_pass_eta_expand.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import numpy as np
import tvm
from tvm import te
from tvm import relay
import tvm.relay.transform as _transform
def test_eta_expand_global_var():
mod = tvm.relay.fromtext(
r"""
#[version = "0.0.5"]
def @aux(%x: Tensor[(), int32]) -> Tensor[(), int32] {
%x
}
def @main() -> fn(Tensor[(), int32]) -> Tensor[(), int32] {
@aux
}
"""
)
seq = tvm.transform.Sequential([_transform.EtaExpand(expand_global_var=True)])
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
expected = tvm.relay.fromtext(
r"""
#[version = "0.0.5"]
def @aux(%x: Tensor[(), int32]) -> Tensor[(), int32] {
%x
}
def @main() -> fn(Tensor[(), int32]) -> Tensor[(), int32] {
fn (%x: Tensor[(), int32]) -> Tensor[(), int32] {
@aux(%x)
}
}
"""
)
tvm.ir.assert_structural_equal(mod["main"], expected["main"], map_free_vars=True)
def test_eta_expand_constructor():
mod = tvm.relay.fromtext(
r"""
#[version = "0.0.5"]
type List[A] {
Cons(A, List[A]),
Nil,
}
def @main[A]() -> fn(A, List[A]) -> List[A] {
Cons
}
"""
)
seq = tvm.transform.Sequential(
[_transform.EtaExpand(expand_constructor=True), _transform.InferType()]
)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
expected = tvm.relay.fromtext(
r"""
#[version = "0.0.5"]
type List[A] {
Cons(A, List[A]),
Nil,
}
def @main[A]() -> fn(A, List[A]) -> List[A] {
fn [A](%x: A, %xs: List[A]) -> List[A] {
Cons(%x, %xs)
}
}
"""
)
tvm.ir.assert_structural_equal(mod["main"], expected["main"], map_free_vars=True)
if __name__ == "__main__":
test_eta_expand_global_var()
test_eta_expand_constructor()
| 2,805 | 28.229167 | 85 | py |
tvm | tvm-main/tests/python/relay/test_pass_remove_unused_functions.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.relay import transform
from tvm.relay.prelude import Prelude
def test_remove_all_prelude_functions():
mod = tvm.IRModule()
p = Prelude(mod)
x = relay.var("x", shape=(1, 16))
mod["main"] = relay.Function([x], x)
mod = relay.transform.RemoveUnusedFunctions()(mod)
l = set([x[0].name_hint for x in mod.functions.items()])
assert l == set(["main"])
def test_remove_all_prelude_functions_but_referenced_functions():
mod = tvm.IRModule()
p = Prelude(mod)
x = relay.var("x", shape=(1, 16))
id_func = relay.Function([x], x)
id_name = relay.GlobalVar("id_func")
mod[id_name] = id_func
mod["main"] = relay.Function([x], id_name(x))
mod = relay.transform.RemoveUnusedFunctions()(mod)
l = set([x[0].name_hint for x in mod.functions.items()])
assert l == set(["id_func", "main"])
def test_keep_only_referenced_prelude_functions():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = p.mod.get_type("List")
hd = p.mod.get_global_var("hd")
tl = p.mod.get_global_var("tl")
l = nil()
for i in [4, 3, 2, 1, 0]:
l = cons(relay.const(i), l)
body = hd(tl(tl(l)))
mod["main"] = relay.Function([], body)
mod = relay.transform.RemoveUnusedFunctions()(mod)
l = set([x[0].name_hint for x in mod.functions.items()])
assert l == set(["tl", "hd", "main"])
def test_multiple_entry_functions():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = p.mod.get_type("List")
hd = p.mod.get_global_var("hd")
tl = p.mod.get_global_var("tl")
l = nil()
for i in [4, 3, 2, 1, 0]:
l = cons(relay.const(i), l)
body = hd(tl(tl(l)))
mod["main1"] = relay.Function([], body)
x = relay.var("x", shape=(1, 16))
id_func = relay.Function([x], x)
id_name = relay.GlobalVar("id_func")
mod[id_name] = id_func
mod["main2"] = relay.Function([x], id_name(x))
mod = relay.transform.RemoveUnusedFunctions(["main1", "main2"])(mod)
l = set([x[0].name_hint for x in mod.functions.items()])
assert l == set(["tl", "hd", "main2", "id_func", "main1"])
def test_globalvar_as_call_arg():
mod = tvm.IRModule()
p = Prelude(mod)
tensor_array = p.get_global_var("tensor_array", "int32")
tensor1 = p.get_ctor(p.get_name("tensor_t", "int32"), "tensor1", "int32")
write = p.get_global_var("tensor_array_write", "int32")
stack = p.get_global_var("tensor_array_stack", "int32")
v = relay.var("v")
init_tensor_array = tensor_array(relay.const(3))
tensor_array1 = write(init_tensor_array, relay.const(0), tensor1(v))
tensor_array2 = stack(tensor_array1)
mod["main"] = relay.Function([v], tensor_array2)
mod = relay.transform.RemoveUnusedFunctions()(mod)
l = set([x[0].name_hint for x in mod.functions.items()])
assert "tensor_array_int32" in l
def test_call_globalvar_without_args():
def get_mod():
mod = tvm.IRModule({})
fn1 = relay.Function([], relay.const(1))
fn2 = relay.Function([], relay.const(2))
g1 = relay.GlobalVar("g1")
g2 = relay.GlobalVar("g2")
mod[g1] = fn1
mod[g2] = fn2
p = relay.var("p", "bool")
mod["main"] = relay.Function([p], relay.Call(relay.If(p, g1, g2), []))
return mod
mod = get_mod()
ref_mod = get_mod()
mod = relay.transform.RemoveUnusedFunctions()(mod)
assert tvm.ir.structural_equal(mod, ref_mod, map_free_vars=True)
if __name__ == "__main__":
tvm.testing.main()
| 4,354 | 33.84 | 78 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import relay
from tvm.contrib import graph_executor
from tvm.relay.testing.temp_op_attr import TempOpAttr
# We use llvm target for testing functionality. `llvm` points to an older Intel
# generation machine, that legalizes to a simple lowering. Therefore, the
# legalization is overwritten such that it can be skipped and we use the
# QNNCanonicalizeOps lowering for the testing.
def legalize_qnn_dense(attrs, inputs, types):
return None
def make_requantize_params(input_scale, output_scale, output_zero_point, out_dtype):
config = {
"input_scale": input_scale,
"output_scale": output_scale,
"output_zero_point": output_zero_point,
"out_dtype": out_dtype,
}
return config
def make_configuration(
quantized_data,
quantized_kernel,
dtype,
input_shape,
kernel_shape,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
units,
output,
out_dtype="int32",
bias=None,
requantize=None,
):
if requantize is not None:
assert bias is not None
config = {
"quantized_data": quantized_data,
"quantized_kernel": quantized_kernel,
"dtype": dtype,
"input_shape": input_shape,
"kernel_shape": kernel_shape,
"input_zero_point": input_zero_point,
"kernel_zero_point": kernel_zero_point,
"input_scale": input_scale,
"kernel_scale": kernel_scale,
"units": units,
"output": output,
"out_dtype": out_dtype,
"bias": bias,
"requantize": requantize,
}
return config
def make_int_configuration(use_bias=False, requantize_output=False, per_channel=False):
input_shape, kernel_shape, output_shape = (2, 10), (3, 10), (2, 3)
input_zero_point, kernel_zero_point = -1, -1
in_dtype = "int8"
out_dtype = "int32" if not requantize_output else "int8"
units = 3
quantized_data_np = (
np.array([1, 3, 5, 7, 9, 11, 13, 15, -19, -21, 1, 3, 5, 7, 9, 11, 13, -17, 17, -21])
.astype(in_dtype)
.reshape(input_shape)
)
quantized_kernel_np = (
np.array(
[
1,
3,
5,
7,
9,
11,
13,
15,
17,
19,
1,
3,
5,
7,
9,
11,
13,
15,
17,
19,
1,
3,
5,
7,
9,
11,
13,
15,
17,
19,
]
)
.astype(in_dtype)
.reshape(kernel_shape)
)
input_scale = 0.5
kernel_scale = 0.5
output_scale = 1.0
bias = np.array([4, 8, 12]).astype(out_dtype).reshape((units,)) if use_bias else None
if per_channel:
assert use_bias and requantize_output
kernel_scale = np.array([0.5, 0.3, 0.4], dtype=np.float32)
output = np.array([23, 14, 20, 57, 34, 47])
elif requantize_output:
assert use_bias
output = np.array([23, 24, 25, 57, 58, 59])
elif use_bias:
output = np.array([96, 100, 104, 232, 236, 240])
else:
output = np.array([92, 92, 92, 228, 228, 228])
requant_params = (
make_requantize_params(input_scale * kernel_scale, output_scale, -1, "int8")
if requantize_output
else None
)
output = output.astype(out_dtype).reshape(output_shape)
return make_configuration(
quantized_data=quantized_data_np,
quantized_kernel=quantized_kernel_np,
dtype=in_dtype,
input_shape=input_shape,
kernel_shape=kernel_shape,
input_zero_point=input_zero_point,
kernel_zero_point=kernel_zero_point,
input_scale=input_scale,
kernel_scale=kernel_scale,
units=units,
output=output,
bias=bias,
requantize=requant_params,
)
def qnn_dense_driver(test_configuration):
in_dtype = test_configuration["dtype"]
out_dtype = test_configuration["out_dtype"]
quantized_data_name = "quantized_data"
quantized_kernel_name = "quantized_kernel"
expected_out_dtype = test_configuration["out_dtype"]
bias_name = "bias"
quantized_data = relay.var(
quantized_data_name, shape=test_configuration["input_shape"], dtype=in_dtype
)
quantized_kernel = relay.var(
quantized_kernel_name, shape=test_configuration["kernel_shape"], dtype=in_dtype
)
mod = relay.qnn.op.dense(
quantized_data,
quantized_kernel,
relay.const(test_configuration["input_zero_point"], "int32"),
relay.const(test_configuration["kernel_zero_point"], "int32"),
relay.const(test_configuration["input_scale"], "float32"),
relay.const(test_configuration["kernel_scale"], "float32"),
test_configuration["units"],
)
if test_configuration[bias_name] is not None:
bias = relay.var(bias_name, shape=test_configuration["bias"].shape, dtype=out_dtype)
mod = relay.nn.bias_add(mod, bias)
if test_configuration["requantize"] is not None:
requantize_config = test_configuration["requantize"]
mod = relay.qnn.op.requantize(
mod,
input_scale=relay.const(requantize_config["input_scale"], "float32"),
input_zero_point=relay.const(0, "int32"),
output_scale=relay.const(requantize_config["output_scale"], "float32"),
output_zero_point=relay.const(requantize_config["output_zero_point"], "int32"),
out_dtype=requantize_config["out_dtype"],
)
expected_out_dtype = requantize_config["out_dtype"]
mod = relay.Function(relay.analysis.free_vars(mod), mod)
mod = tvm.IRModule.from_expr(mod)
mod = relay.transform.InferType()(mod)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
with tvm.transform.PassContext(opt_level=2):
graph, lib, params = relay.build(mod, "llvm", params=None)
mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
mod.set_input(quantized_data_name, test_configuration[quantized_data_name])
mod.set_input(quantized_kernel_name, test_configuration[quantized_kernel_name])
if test_configuration[bias_name] is not None:
mod.set_input(bias_name, test_configuration[bias_name])
mod.set_input(**params)
mod.run()
res = mod.get_output(0).numpy()
np.testing.assert_equal(res, test_configuration["output"])
assert res.dtype == expected_out_dtype
def test_qnn_dense_without_bias():
with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_dense):
int32_output_without_bias_params = make_int_configuration(use_bias=False)
qnn_dense_driver(int32_output_without_bias_params)
def test_qnn_dense_with_bias():
with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_dense):
int32_output_with_bias_params = make_int_configuration(use_bias=True)
qnn_dense_driver(int32_output_with_bias_params)
def test_qnn_dense_with_requantized_output():
with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_dense):
int8_requantized_output_with_bias_params = make_int_configuration(
use_bias=True, requantize_output=True
)
qnn_dense_driver(int8_requantized_output_with_bias_params)
def test_per_channel_weight_scale():
with TempOpAttr("qnn.dense", "FTVMQnnLegalize", legalize_qnn_dense):
config = make_int_configuration(use_bias=True, requantize_output=True, per_channel=True)
qnn_dense_driver(config)
if __name__ == "__main__":
test_qnn_dense_without_bias()
test_qnn_dense_with_bias()
test_qnn_dense_with_requantized_output()
test_per_channel_weight_scale()
| 8,803 | 32.861538 | 96 | py |
tvm | tvm-main/tests/python/relay/test_relay_te_compiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
import tvm.testing
from tvm import relay
from tvm import autotvm
from tvm import topi
from tvm.relay.backend import te_compiler
from tvm.relay.testing import run_infer_type
from tvm.relay.testing.temp_op_attr import TempOpAttr
@autotvm.register_topi_compute("test/conv2d_1")
def _compute_conv2d_1(cfg, input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("test/conv2d_1")
def _schedule_conv2d_1(cfg, outs):
return topi.generic.schedule_conv2d_nchw(outs)
@autotvm.register_topi_compute("test/conv2d_2")
def _compute_conv2d_2(cfg, input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
@autotvm.register_topi_schedule("test/conv2d_2")
def _schedule_conv2d_2(cfg, outs):
return topi.generic.schedule_conv2d_nchw(outs)
def _compute_conv2d_3(input, filter, strides, padding, dilation, out_dtype):
return topi.nn.conv2d_nchw(input, filter, strides, padding, dilation, out_dtype)
def _schedule_conv2d_3(outs):
return topi.generic.schedule_conv2d_nchw(outs)
@tvm.target.override_native_generic_func("test_conv2d_strategy")
def _tmp_strategy(attrs, inputs, out_type, target):
strategy = relay.op.OpStrategy()
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_1),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_1),
name="conv2d_1",
plevel=10,
)
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_2),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_2),
name="conv2d_2",
plevel=15,
)
ic = inputs[0].shape[1]
with tvm.te.SpecializedCondition(ic >= 16):
strategy.add_implementation(
relay.op.strategy.wrap_compute_conv2d(_compute_conv2d_3),
relay.op.strategy.wrap_topi_schedule(_schedule_conv2d_3),
name="conv2d_3",
plevel=20,
)
return strategy
def _create_record(task_name, dshape, wshape, target, cost):
args = [te.placeholder(dshape), te.placeholder(wshape), (1, 1), (1, 1, 1, 1), (1, 1), "float32"]
task = autotvm.task.create(task_name, args, target)
cfg = autotvm.ConfigEntity(0, None, {}, [])
cfg.cost = cost
inp = autotvm.MeasureInput(target=target, task=task, config=cfg)
result = autotvm.MeasureResult(costs=(cost,), error_no=0, all_cost=-1, timestamp=-1)
return (inp, result)
def test_get_valid_implementations():
target = tvm.target.Target("llvm")
def _get_impls(dshape, wshape):
data = relay.var("data", shape=dshape)
weight = relay.var("wshape", shape=wshape)
out = relay.nn.conv2d(data, weight, padding=(1, 1))
out = run_infer_type(out)
return relay.backend.te_compiler.get_valid_implementations(
relay.op.get("nn.conv2d"),
out.attrs,
[te.placeholder(dshape), te.placeholder(wshape)],
out.checked_type,
target,
)
with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
impls = _get_impls((1, 8, 7, 7), (32, 8, 3, 3))
assert len(impls) == 2
impls = _get_impls((1, 16, 7, 7), (32, 16, 3, 3))
assert len(impls) == 3
def test_select_implementation():
target = tvm.target.Target("llvm")
def _select_impl(dshape, wshape, use_autotvm=False):
data = relay.var("data", shape=dshape)
weight = relay.var("wshape", shape=wshape)
out = relay.nn.conv2d(data, weight, padding=(1, 1))
out = run_infer_type(out)
return relay.backend.te_compiler.select_implementation(
relay.op.get("nn.conv2d"),
out.attrs,
[te.placeholder(dshape), te.placeholder(wshape)],
out.checked_type,
target,
use_autotvm,
)
with TempOpAttr("nn.conv2d", "FTVMStrategy", _tmp_strategy):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3))
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3))
assert impl.name == "conv2d_3"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_3"
# add autotvm record
records = []
records.append(_create_record("test/conv2d_1", (1, 8, 7, 7), (32, 8, 3, 3), target, 0.5))
records.append(_create_record("test/conv2d_1", (1, 16, 7, 7), (32, 16, 3, 3), target, 1.0))
with target:
with autotvm.apply_history_best(records):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_1"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_1"
records.append(_create_record("test/conv2d_2", (1, 8, 7, 7), (32, 8, 3, 3), target, 0.2))
records.append(_create_record("test/conv2d_1", (1, 16, 7, 7), (32, 16, 3, 3), target, 1.2))
with target:
with autotvm.apply_history_best(records):
impl, _ = _select_impl((1, 8, 7, 7), (32, 8, 3, 3), True)
assert impl.name == "conv2d_2"
impl, _ = _select_impl((1, 16, 7, 7), (32, 16, 3, 3), True)
assert impl.name == "conv2d_1"
def test_te_compiler():
tec = relay.backend.te_compiler.get()
def get_func(shape):
x = relay.var("x", shape=shape)
y = relay.add(x, x)
z = relay.add(y, x)
f = relay.Function([x], z)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
return mod["main"]
z1 = tec.lower(get_func((10,)), "llvm")
z2 = tec.lower(get_func((10,)), "llvm")
z3 = tec.lower(get_func(()), "llvm")
assert z1.same_as(z2)
assert not z3.same_as(z1)
if tvm.testing.device_enabled("cuda"):
z4 = tec.lower(get_func(()), "cuda")
assert not z3.same_as(z4)
# Test JIT target
for target in ["llvm"]:
dev = tvm.device(target)
if tvm.testing.device_enabled(target):
f = tec.jit(get_func((10,)), target)
x = tvm.nd.array(np.ones(10).astype("float32"), device=dev)
y = tvm.nd.empty((10,), device=dev)
f(x, y)
tvm.testing.assert_allclose(y.numpy(), x.numpy() * 3)
# Note: Once the te compiler is removed, we should keep this test so that
# we make sure that opt_level=0 passes are being called correctly.
def test_compile_placeholder_bypass():
te_compiler = relay.backend.te_compiler.get()
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
z = relay.var("z", shape=(2, 3))
result = relay.Tuple([x, relay.op.concatenate([y, z], axis=0)])
func = relay.Function(relay.analysis.free_vars(result), result)
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(tvm.IRModule.from_expr(func), "llvm")
def test_compile_injective_with_tuple():
x = relay.var("x", shape=(2, 3))
y = relay.var("y", shape=(2, 3))
x_transpose = relay.transpose(x)
output = relay.Tuple([x_transpose, y])
func = relay.Function([x, y], output)
relay.build(tvm.IRModule.from_expr(func), "llvm")
def test_compile_tuple_dup():
x = relay.var("data", shape=(16, 16))
log = relay.log(x)
output = relay.Tuple([log, log])
f = relay.Function([x], output)
relay.build(tvm.IRModule.from_expr(f), "llvm")
def test_compile_full():
# Shape calculations can happen in int64. The test checks that full operator
# can handle when shapes are not int32
shape = (
tvm.tir.IntImm("int32", 1),
tvm.tir.IntImm("int64", 16),
tvm.tir.IntImm("int64", 16),
tvm.tir.IntImm("int32", 64),
)
output = relay.full(relay.const(0, "int32"), shape=shape, dtype="int32")
f = relay.Function([], output)
mod = tvm.IRModule.from_expr(f)
mod = relay.qnn.transform.CanonicalizeOps()(mod)
relay.build(mod, "llvm")
def test_compile_nhwc_pack():
data = relay.var("data", shape=(1, 1, 1, 1024), dtype="uint8")
weight = relay.var("weight", shape=(1, 1, 1024, 1001), dtype="int8")
p2 = relay.var("p2", shape=(1, 1, 1, 1), dtype="int32")
conv = relay.nn.conv2d(
data,
weight,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
multiply = relay.multiply(relay.const(-22, dtype="int32"), p2)
tile = relay.tile(multiply, reps=(1, 1, 1, 1001))
subtract = relay.subtract(conv, tile)
func = subtract
mod = relay.Function(relay.analysis.free_vars(func), func)
relay.build(mod, target="llvm")
def test_compile_propogate_hash():
data = relay.var("data", shape=(1, 1, 1, 1024), dtype="uint8")
weight = relay.var("weight", shape=(1, 1, 1024, 1001), dtype="int8")
p2 = relay.var("p2", shape=(1, 1, 1, 1), dtype="int32")
conv = relay.nn.conv2d(
data,
weight,
kernel_size=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="int32",
)
multiply = relay.multiply(relay.const(-22, dtype="int32"), p2)
tile = relay.tile(multiply, reps=(1, 1, 1, 1001))
subtract = relay.subtract(conv, tile)
func = subtract
mod = tvm.IRModule.from_expr(relay.Function(relay.analysis.free_vars(func), func))
vm = relay.vm.VMCompiler()
opt_mod, _ = vm.optimize(mod, target="llvm")
for f in opt_mod.functions.values():
assert "hash" in f.attrs.keys()
if __name__ == "__main__":
test_get_valid_implementations()
test_select_implementation()
test_te_compiler()
test_compile_placeholder_bypass()
test_compile_injective_with_tuple()
test_compile_tuple_dup()
test_compile_full()
test_compile_nhwc_pack()
| 10,894 | 35.807432 | 100 | py |
tvm | tvm-main/tests/python/relay/test_op_grad_level3.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.relay.testing import check_grad, run_infer_type, run_opt_pass, _np_randn_from_type
from tvm.relay.transform import gradient
import tvm.testing
executor_kind = tvm.testing.parameter("debug")
@tvm.testing.uses_gpu
def test_clip(executor_kind):
for dtype in ("float32", "float64"):
ref = lambda x: np.where(
x > 10.0, np.zeros_like(x), np.where(x < 1.0, np.zeros_like(x), np.ones_like(x))
)
x = relay.var("x", relay.TensorType((10, 4), dtype))
y = tvm.relay.clip(x, 1.0, 10.0)
data = np.random.rand(10, 4).astype(dtype) * 11.0
ref_grad = ref(data)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
for target, dev in tvm.testing.enabled_targets():
op_res, (op_grad,) = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01)
def verify_transpose_grad(d_shape, axes=None, executor_kind="vm"):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
fwd_func = relay.Function([data], relay.transpose(data, axes=axes))
check_grad(fwd_func, executor_kind=executor_kind)
def test_transpose_grad(executor_kind):
verify_transpose_grad((1, 2, 3, 4), executor_kind=executor_kind)
verify_transpose_grad((1, 2, 3, 4), axes=(0, 2, 3, 1), executor_kind=executor_kind)
def test_negative_grad(executor_kind):
data = relay.var("data", relay.TensorType((10, 4), "float32"))
fwd_func = relay.Function([data], relay.negative(data))
check_grad(fwd_func, executor_kind=executor_kind)
def test_cast_grad(executor_kind):
data = relay.var("data", relay.TensorType((10, 4), "float32"))
fwd_func = relay.Function([data], relay.cast(data, "float64"))
check_grad(fwd_func, executor_kind=executor_kind)
def test_cast_like_grad(executor_kind):
data = relay.var("data", shape=(10, 4), dtype="float32")
like = relay.var("like", shape=(1,), dtype="float64")
fwd_func = relay.Function([data, like], relay.cast_like(data, like))
check_grad(fwd_func, executor_kind=executor_kind)
def test_copy_grad(executor_kind):
data = relay.var("data", relay.TensorType((10, 4), "float64"))
fwd_func = relay.Function([data], relay.copy(data))
check_grad(fwd_func, executor_kind=executor_kind)
def test_take_grad(executor_kind):
data_dtype = relay.TensorType((3, 4, 5), "float64")
data = relay.var("data", data_dtype)
indices = relay.var("indices", relay.TensorType((relay.Any(),), "int32"))
inputs = [_np_randn_from_type(data_dtype, scale=1e-5), np.array([1, 2], dtype="int32")]
test_inputs = [inputs[0]]
# take on axis
fwd_func = relay.Function([data, indices], relay.take(data, indices, axis=1))
check_grad(fwd_func, inputs=inputs, test_inputs=test_inputs, executor_kind=executor_kind)
# take on flattened
fwd_func = relay.Function([data, indices], relay.take(data, indices, axis=None))
check_grad(fwd_func, inputs=inputs, test_inputs=test_inputs, executor_kind=executor_kind)
def test_stack_grad(executor_kind):
args = [relay.var(c, shape=(2, 3, 4), dtype="float64") for c in "xyz"]
fwd_func = relay.Function(args, relay.stack(args, axis=0))
check_grad(fwd_func, executor_kind=executor_kind)
def test_squeeze_grad(executor_kind):
data = relay.var("data", shape=(2, 1, 1, 3, 4, 1), dtype="float64")
fwd_func = relay.Function([data], relay.squeeze(data))
fwd_func_subset = relay.Function([data], relay.squeeze(data, axis=[1, -1]))
check_grad(fwd_func, executor_kind=executor_kind)
check_grad(fwd_func_subset, executor_kind=executor_kind)
def test_arange_grad(executor_kind):
# TODO: testing arange numerically is strange because two-sided approx can
# produce different output shapes
dtype = "float64"
start = relay.var("start", relay.TensorType((), dtype))
stop = relay.var("stop", relay.TensorType((), dtype))
step = relay.var("step", relay.TensorType((), dtype))
values = [np.array(v, dtype=dtype) for v in [2.5, 9.5, 1.8]]
fwd_func = relay.Function([start, stop, step], relay.arange(start, stop, step, dtype))
check_grad(fwd_func, inputs=values, executor_kind=executor_kind)
def test_gather_nd_grad(executor_kind):
data = relay.var("data", relay.TensorType((2, 3), "float64"))
indices = relay.var("indices", relay.TensorType((2, 4), "int64"))
fwd = relay.Function([data, indices], relay.gather_nd(data, indices))
data_np = np.random.rand(2, 3).astype("float64")
indices_np = np.array([[0, 1, 1, 0], [0, 1, 0, 0]], dtype="int64")
check_grad(
fwd, inputs=[data_np, indices_np], test_inputs=[data_np], executor_kind=executor_kind
)
def test_reshape_like_grad(executor_kind):
data = relay.var("data", shape=(2, 3, 4), dtype="float32")
shape_like = relay.var("shape_like", shape=(6, 2, 2), dtype="float32")
fwd_func = relay.Function([data, shape_like], relay.reshape_like(data, shape_like))
check_grad(fwd_func, executor_kind=executor_kind)
def test_zeros_ones_grad_const_ints():
# when shape is static (i.e. not an input), there is no gradient at all
static_ty = relay.TensorType([2, 3, 4], dtype="float32")
expected_ty = relay.TupleType([static_ty, relay.TupleType([])])
for op in [relay.zeros, relay.ones]:
fwd_func = relay.Function([], op(static_ty.concrete_shape, static_ty.dtype))
bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
tvm.ir.assert_structural_equal(bwd_func.ret_type, expected_ty)
def test_zeros_ones_grad_const_expr():
# when shape is static (i.e. not an input), there is no gradient at all
shape_const = relay.const(np.array([2, 3, 4]), dtype="int32") * relay.const(1, dtype="int32")
static_ty = relay.TensorType([2, 3, 4], dtype="float32")
dyn_ty = relay.TensorType([relay.Any(), relay.Any(), relay.Any()], dtype="float32")
expected_ty_static = relay.TupleType([static_ty, relay.TupleType([])])
expected_ty_dyn = relay.TupleType([dyn_ty, relay.TupleType([])])
for op in [relay.zeros, relay.ones]:
# with DynamicToStatic, the shape should be concretized
fwd_func = relay.Function([], op(shape_const, static_ty.dtype))
fwd_func = run_opt_pass(fwd_func, relay.transform.DynamicToStatic())
bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
tvm.ir.assert_structural_equal(bwd_func.ret_type, expected_ty_static)
fwd_func = relay.Function([], op(shape_const, static_ty.dtype))
bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
tvm.ir.assert_structural_equal(bwd_func.ret_type, expected_ty_dyn)
def test_zeros_ones_grad_dynamic(executor_kind):
rank = np.random.randint(low=1, high=5, dtype="int32")
dyn_shape = np.random.randint(low=1, high=4, size=(rank,), dtype="int32")
shape_data = relay.var("shape_data", shape=(rank,), dtype="int32")
for op, op_ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
fwd_func = relay.Function([shape_data], op(shape_data, dtype="float32"))
bwd_func = run_infer_type(gradient(run_infer_type(fwd_func)))
for target, dev in tvm.testing.enabled_targets():
res, (grad,) = relay.create_executor(executor_kind, device=dev, target=target).evaluate(
bwd_func
)(dyn_shape)
tvm.testing.assert_allclose(res.numpy(), op_ref(dyn_shape, dtype="float32"))
tvm.testing.assert_allclose(grad.numpy(), np.zeros((rank,), dtype="int32"))
if __name__ == "__main__":
tvm.testing.main()
| 8,628 | 42.580808 | 100 | py |
tvm | tvm-main/tests/python/relay/test_adt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import relay
from tvm.relay import testing
from tvm.relay.backend.interpreter import ConstructorValue
from tvm.relay import create_executor
from tvm.relay.prelude import Prelude, StaticTensorArrayOps
from tvm.relay.testing import count as count_, make_nat_value, make_nat_expr
import numpy as np
prelude = p = Prelude(tvm.IRModule({}))
p.mod.import_from_std("nat.rly")
def count(e):
return count_(p, e)
dev = tvm.device("llvm", 0)
def eval(expr):
# CAUTION: These tests re-process the entire prelude for each test expression.
# Hoisting the create_executor won't improve that since preprocessing won't begin
# until the evaluate.
return create_executor(mod=prelude.mod, device=dev, target="llvm").evaluate(expr)
nat, z, s = prelude.mod.get_type("nat")
double = p.mod.get_global_var("nat_double")
add = p.mod.get_global_var("nat_add")
optional, some, none = prelude.mod.get_type("Option")
rlist, cons, nil = prelude.mod.get_type("List")
hd = p.hd
tl = p.tl
nth = p.nth
update = p.update
length = p.length
map = p.map
foldl = p.foldl
foldr = p.foldr
foldr1 = p.foldr1
sum = p.sum
concat = p.concat
filter = p.filter
zip = p.zip
rev = p.rev
unfoldl = p.unfoldl
unfoldr = p.unfoldr
map_accumr = p.map_accumr
map_accuml = p.map_accuml
tree, rose = prelude.mod.get_type("Tree")
tmap = p.tmap
size = p.size
compose = p.compose
iterate = p.iterate
def to_list(l):
assert isinstance(l, ConstructorValue)
val = l
ret = []
while True:
if val.tag == cons.tag:
ret.append(val.fields[0])
val = val.fields[1]
else:
assert val.tag == nil.tag
break
return ret
def tree_to_dict(t):
assert isinstance(t, ConstructorValue)
ret = {}
assert t.tag == rose.tag
ret["member"] = t.fields[0]
ret["children"] = []
for subtree in to_list(t.fields[1]):
l = tree_to_dict(subtree)
ret["children"].append(l)
return ret
def vmobj_to_list(o, dtype="float32"):
if isinstance(o, tvm.nd.NDArray):
return [o.numpy().tolist()]
elif isinstance(o, tvm.runtime.container.ADT):
if len(o) == 0:
tensor_nil = p.get_var("tensor_nil", dtype=dtype)
if tensor_nil.tag == o.tag:
return [0]
return []
result = []
for f in o:
result.extend(vmobj_to_list(f, dtype))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1], dtype)
hd = vmobj_to_list(o.fields[0], dtype)
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].numpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
# turns a scalar-valued relay tensor value into a python number
def get_scalar(tv):
return tv.numpy().item()
# @tvm.testing.uses_gpu
def test_nat_value():
assert count(make_nat_value(p, 10)) == 10
assert count(eval(s(s(z())))) == 2
@tvm.testing.uses_gpu
def test_nat_constructor():
func = relay.Function([], z())
test_z = relay.GlobalVar("test_z")
test_sz = relay.GlobalVar("test_sz")
prelude.mod[test_z] = func
func = relay.Function([], s(z()))
prelude.mod[test_sz] = func
ck_mod = relay.transform.InferType()(prelude.mod)
assert ck_mod[test_z].body.checked_type == nat()
assert ck_mod[test_sz].body.checked_type == nat()
@tvm.testing.uses_gpu
def test_double():
assert prelude.mod[double].checked_type == relay.FuncType([nat()], nat())
res = eval(double(s(z())))
assert count(res) == 2
@tvm.testing.uses_gpu
def test_add():
assert prelude.mod[add].checked_type == relay.FuncType([nat(), nat()], nat())
res = eval(add(s(z()), s(z())))
assert count(res) == 2
@tvm.testing.uses_gpu
def test_list_constructor():
test_consz = relay.GlobalVar("test_consz")
func = relay.Function([], cons(z(), nil()))
prelude.mod[test_consz] = func
ck_mod = relay.transform.InferType()(prelude.mod)
assert ck_mod[test_consz].body.checked_type == rlist(nat())
@tvm.testing.uses_gpu
def test_hd_tl():
expected = list(range(10))
l = nil()
for i in reversed(expected):
l = cons(make_nat_expr(prelude, i), l)
got = []
for i in range(len(expected)):
got.append(count(eval(hd(l))))
l = tl(l)
assert got == expected
@tvm.testing.uses_gpu
def test_nth():
expected = list(range(10))
l = nil()
for i in reversed(expected):
l = cons(relay.const(i), l)
for i in range(len(expected)):
nth = prelude.mod.get_global_var("nth")
item = eval(nth(l, relay.const(i)))
assert get_scalar(item) == i
@tvm.testing.uses_gpu
def test_update():
expected = list(range(10))
l = nil()
# create zero initialized list
for i in range(len(expected)):
l = cons(make_nat_expr(prelude, 0), l)
# set value
for i, v in enumerate(expected):
l = update(l, relay.const(i), make_nat_expr(prelude, v))
got = []
for i in range(len(expected)):
got.append(count(eval(nth(l, relay.const(i)))))
assert got == expected
@tvm.testing.uses_gpu
def test_length():
a = relay.TypeVar("a")
assert prelude.mod[length].checked_type == relay.FuncType(
[rlist(a)], relay.scalar_type("int32"), [a]
)
res = eval(length(cons(z(), cons(z(), cons(z(), nil())))))
assert get_scalar(res) == 3
@tvm.testing.uses_gpu
def test_map():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[map].checked_type
rhs = relay.FuncType([relay.FuncType([a], b), rlist(a)], rlist(b), [a, b])
assert lhs == rhs
x = relay.Var("x")
add_one = relay.Function([x], s(x))
res = eval(map(add_one, cons(z(), cons(z(), nil()))))
ones = to_list(res)
assert len(ones) == 2
assert count(ones[0]) == 1 and count(ones[1]) == 1
@tvm.testing.uses_gpu
def test_foldl():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[foldl].checked_type
rhs = relay.FuncType([relay.FuncType([a, b], a), a, rlist(b)], a, [a, b])
assert lhs == rhs
x = relay.Var("x")
y = relay.Var("y")
rev_dup = relay.Function([y, x], cons(x, cons(x, y)))
res = eval(
foldl(
rev_dup,
nil(),
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
),
)
)
reversed = to_list(res)
assert len(reversed) == 6
assert count(reversed[0]) == 3 and count(reversed[1]) == 3
assert count(reversed[2]) == 2 and count(reversed[3]) == 2
assert count(reversed[4]) == 1 and count(reversed[5]) == 1
@tvm.testing.uses_gpu
def test_foldr():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[foldr].checked_type
rhs = relay.FuncType([relay.FuncType([a, b], b), b, rlist(a)], b, [a, b])
assert lhs == rhs
x = relay.Var("x")
y = relay.Var("y")
identity = relay.Function([x, y], cons(x, y))
res = eval(
foldr(
identity,
nil(),
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
),
)
)
same = to_list(res)
assert len(same) == 3
assert count(same[0]) == 1 and count(same[1]) == 2 and count(same[2]) == 3
@tvm.testing.uses_gpu
def test_foldr1():
a = relay.TypeVar("a")
lhs = prelude.mod[foldr1].checked_type
rhs = relay.FuncType([relay.FuncType([a, a], a), rlist(a)], a, [a])
assert lhs == rhs
x = relay.Var("x")
y = relay.Var("y")
f = relay.Function([x, y], add(x, y))
res = eval(
foldr1(
f,
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
),
)
)
assert count(res) == 6
@tvm.testing.uses_gpu
def test_sum():
assert prelude.mod[sum].checked_type == relay.FuncType(
[rlist(relay.scalar_type("int32"))], relay.scalar_type("int32")
)
res = eval(sum(cons(relay.const(1), cons(relay.const(2), nil()))))
assert get_scalar(res) == 3
@tvm.testing.uses_gpu
def test_concat():
a = relay.TypeVar("a")
assert prelude.mod[concat].checked_type == relay.FuncType([rlist(a), rlist(a)], rlist(a), [a])
l1 = cons(make_nat_expr(prelude, 1), cons(make_nat_expr(prelude, 2), nil()))
l2 = cons(make_nat_expr(prelude, 3), cons(make_nat_expr(prelude, 4), nil()))
res = eval(concat(l1, l2))
catted = to_list(res)
assert len(catted) == 4
assert count(catted[0]) == 1
assert count(catted[1]) == 2
assert count(catted[2]) == 3
assert count(catted[3]) == 4
@tvm.testing.uses_gpu
def test_filter():
a = relay.TypeVar("a")
expected_type = relay.FuncType(
[relay.FuncType([a], relay.scalar_type("bool")), rlist(a)], rlist(a), [a]
)
assert prelude.mod[filter].checked_type == expected_type
x = relay.Var("x", nat())
greater_than_one = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(
s, [relay.PatternConstructor(s, [relay.PatternWildcard()])]
),
relay.const(True),
),
relay.Clause(relay.PatternWildcard(), relay.const(False)),
],
),
)
res = eval(
filter(
greater_than_one,
cons(
make_nat_expr(prelude, 1),
cons(
make_nat_expr(prelude, 1),
cons(
make_nat_expr(prelude, 3),
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 5), cons(make_nat_expr(prelude, 1), nil())),
),
),
),
),
)
)
filtered = to_list(res)
assert len(filtered) == 2
assert count(filtered[0]) == 3
assert count(filtered[1]) == 5
@tvm.testing.uses_gpu
def test_zip():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
expected_type = relay.FuncType([rlist(a), rlist(b)], rlist(relay.TupleType([a, b])), [a, b])
assert prelude.mod[zip].checked_type == expected_type
l1 = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
l2 = cons(nil(), cons(cons(nil(), nil()), cons(cons(nil(), cons(nil(), nil())), nil())))
res = eval(zip(l1, l2))
zipped = to_list(res)
assert len(zipped) == 3
assert count(zipped[0][0]) == 1
assert len(to_list(zipped[0][1])) == 0
assert count(zipped[1][0]) == 2
assert len(to_list(zipped[1][1])) == 1
assert count(zipped[2][0]) == 3
assert len(to_list(zipped[2][1])) == 2
# test truncation
l3 = cons(make_nat_expr(prelude, 4), cons(make_nat_expr(prelude, 5), nil()))
shorter_res = eval(zip(l3, l2))
truncated = to_list(shorter_res)
assert len(truncated) == 2
assert count(truncated[0][0]) == 4
assert len(to_list(truncated[0][1])) == 0
assert count(truncated[1][0]) == 5
assert len(to_list(truncated[1][1])) == 1
l4 = cons(nil(), nil())
shortest_res = eval(zip(l3, l4))
singleton = to_list(shortest_res)
assert len(singleton) == 1
assert count(singleton[0][0]) == 4
assert len(to_list(singleton[0][1])) == 0
@tvm.testing.uses_gpu
def test_rev():
a = relay.TypeVar("a")
assert prelude.mod[rev].checked_type == relay.FuncType([rlist(a)], rlist(a), [a])
res = eval(
rev(
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
)
)
reversed = to_list(res)
assert len(reversed) == 3
assert count(reversed[0]) == 3
assert count(reversed[1]) == 2
assert count(reversed[2]) == 1
@tvm.testing.uses_gpu
def test_unfoldr():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
expected_type = relay.FuncType(
[relay.FuncType([a], optional(relay.TupleType([a, b]))), a], rlist(b), [a, b]
)
x = relay.Var("x", nat())
n = relay.Var("n", nat())
count_down = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(s, [relay.PatternVar(n)]), some(relay.Tuple([n, x]))
),
relay.Clause(relay.PatternConstructor(z, []), none()),
],
),
)
res = eval(unfoldr(count_down, make_nat_expr(prelude, 3)))
unfolded = to_list(res)
assert len(unfolded) == 3
assert count(unfolded[0]) == 3
assert count(unfolded[1]) == 2
assert count(unfolded[2]) == 1
@tvm.testing.uses_gpu
def test_unfoldl():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
expected_type = relay.FuncType(
[relay.FuncType([a], optional(relay.TupleType([a, b]))), a], rlist(b), [a, b]
)
x = relay.Var("x", nat())
n = relay.Var("n", nat())
count_down = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(s, [relay.PatternVar(n)]), some(relay.Tuple([n, x]))
),
relay.Clause(relay.PatternConstructor(z, []), none()),
],
),
)
res = eval(unfoldl(count_down, make_nat_expr(prelude, 3)))
unfolded = to_list(res)
assert len(unfolded) == 3
assert count(unfolded[0]) == 1
assert count(unfolded[1]) == 2
assert count(unfolded[2]) == 3
@tvm.testing.uses_gpu
def test_map_accumr():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
c = relay.TypeVar("c")
expected_type = relay.FuncType(
[relay.FuncType([a, b], relay.TupleType([a, c])), a, rlist(b)],
relay.TupleType([a, rlist(c)]),
[a, b, c],
)
assert prelude.mod[map_accumr].checked_type == expected_type
acc = relay.Var("acc", nat())
x = relay.Var("x", nat())
add_acc_to_each = relay.Function([acc, x], relay.Tuple([add(x, acc), add(x, acc)]))
vals = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
res = eval(map_accumr(add_acc_to_each, z(), vals))
sum = count(res[0])
new_vals = to_list(res[1])
assert sum == 6
assert len(new_vals) == 3
assert count(new_vals[0]) == 6
assert count(new_vals[1]) == 5
assert count(new_vals[2]) == 3
@tvm.testing.uses_gpu
def test_map_accuml():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
c = relay.TypeVar("c")
expected_type = relay.FuncType(
[relay.FuncType([a, b], relay.TupleType([a, c])), a, rlist(b)],
relay.TupleType([a, rlist(c)]),
[a, b, c],
)
assert prelude.mod[map_accuml].checked_type == expected_type
acc = relay.Var("acc", nat())
x = relay.Var("x", nat())
add_to_acc = relay.Function([acc, x], relay.Tuple([add(x, acc), x]))
vals = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
res = eval(map_accuml(add_to_acc, z(), vals))
sum = count(res[0])
new_vals = to_list(res[1])
assert sum == 6
assert len(new_vals) == 3
assert count(new_vals[0]) == 3
assert count(new_vals[1]) == 2
assert count(new_vals[2]) == 1
@tvm.testing.uses_gpu
def test_optional_matching():
x = relay.Var("x")
y = relay.Var("y")
v = relay.Var("v")
condense = relay.Function(
[x, y],
relay.Match(
x,
[
relay.Clause(relay.PatternConstructor(some, [relay.PatternVar(v)]), cons(v, y)),
relay.Clause(relay.PatternConstructor(none), y),
],
),
)
res = eval(
foldr(
condense,
nil(),
cons(
some(make_nat_expr(prelude, 3)),
cons(none(), cons(some(make_nat_expr(prelude, 1)), nil())),
),
)
)
reduced = to_list(res)
assert len(reduced) == 2
assert count(reduced[0]) == 3
assert count(reduced[1]) == 1
@tvm.testing.uses_gpu
def test_tmap():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[tmap].checked_type
rhs = relay.FuncType([relay.FuncType([a], b), tree(a)], tree(b), [a, b])
assert lhs == rhs
x = relay.Var("x")
add_one = relay.Function([x], s(x))
res = eval(tmap(add_one, rose(z(), cons(rose(z(), nil()), cons(rose(z(), nil()), nil())))))
tree_dict = tree_to_dict(res)
assert count(tree_dict["member"]) == 1
assert len(tree_dict["children"]) == 2
for subtree in tree_dict["children"]:
assert count(subtree["member"]) == 1
assert len(subtree["children"]) == 0
@tvm.testing.uses_gpu
def test_size():
a = relay.TypeVar("a")
lhs = prelude.mod[size].checked_type
rhs = relay.FuncType([tree(a)], relay.scalar_type("int32"), [a])
assert lhs == rhs
root = rose(z(), cons(rose(z(), nil()), cons(rose(z(), nil()), nil())))
t = rose(z(), cons(root, cons(root, cons(root, nil()))))
res = eval(size(t))
assert get_scalar(res) == 10
@tvm.testing.uses_gpu
def test_wildcard_match_solo():
x = relay.Var("x", nat())
copy = relay.Function([x], relay.Match(x, [relay.Clause(relay.PatternWildcard(), x)]), nat())
res = eval(copy(s(s(s(z())))))
assert count(res) == 3
@tvm.testing.uses_gpu
def test_wildcard_match_order():
x = relay.Var("x", rlist(nat()))
y = relay.Var("y")
a = relay.Var("a")
return_zero = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(relay.PatternWildcard(), z()),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(y), relay.PatternVar(a)]), y
),
relay.Clause(relay.PatternConstructor(nil), s(z())),
],
),
nat(),
)
res = eval(return_zero(cons(s(z()), nil())))
# wildcard pattern is evaluated first
assert count(res) == 0
@tvm.testing.uses_gpu
def test_nested_matches():
a = relay.TypeVar("a")
# TODO(@jroesch): inference should be able to handle this one
x = relay.Var("x", type_annotation=rlist(rlist(a)))
y = relay.Var("y")
w = relay.Var("w")
h = relay.Var("h")
t = relay.Var("t")
flatten = relay.GlobalVar("flatten")
# flatten could be written using a fold, but this way has nested matches
inner_match = relay.Match(
y,
[
relay.Clause(relay.PatternConstructor(nil), flatten(w)),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(h), relay.PatternVar(t)]),
cons(h, flatten(cons(t, w))),
),
],
)
prelude.mod[flatten] = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(relay.PatternConstructor(nil), nil()),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(y), relay.PatternVar(w)]),
inner_match,
),
],
),
rlist(a),
[a],
)
first_list = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
second_list = cons(
make_nat_expr(prelude, 4),
cons(make_nat_expr(prelude, 5), cons(make_nat_expr(prelude, 6), nil())),
)
final_list = cons(first_list, cons(second_list, nil()))
res = eval(flatten(final_list))
flat = to_list(res)
assert len(flat) == 6
for i in range(6):
assert count(flat[i]) == i + 1
@tvm.testing.uses_gpu
def test_match_full_var():
x = relay.Var("x")
v = relay.Var("v")
id_func = relay.Function([x], relay.Match(x, [relay.Clause(relay.PatternVar(v), v)]))
res1 = eval(id_func(nil()))
res2 = eval(id_func(cons(z(), cons(z(), nil()))))
empty = to_list(res1)
assert len(empty) == 0
zeroes = to_list(res2)
assert len(zeroes) == 2
assert count(zeroes[0]) == 0
assert count(zeroes[1]) == 0
@tvm.testing.uses_gpu
def test_nested_pattern_match():
x = relay.Var("x", rlist(nat()))
h1 = relay.Var("h1")
h2 = relay.Var("h2")
t = relay.Var("t")
match = relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternVar(h1),
relay.PatternConstructor(cons, [relay.PatternVar(h2), relay.PatternVar(t)]),
],
),
h2,
),
relay.Clause(relay.PatternWildcard(), z()),
],
)
get_second = relay.Function([x], match)
res = eval(get_second(cons(s(z()), cons(s(s(z())), nil()))))
assert count(res) == 2
@tvm.testing.uses_gpu
def test_compose():
n = relay.Var("n")
inc = relay.Function([n], s(n))
x = relay.Var("x")
res = eval(relay.Call(compose(inc, double), [s(s(z()))]))
assert count(res) == 5
@tvm.testing.uses_gpu
def test_iterate():
expr = relay.Call(iterate(double, relay.const(2)), [make_nat_expr(prelude, 3)])
res = eval(relay.Function([], expr)())
assert count(res) == 12
if __name__ == "__main__":
tvm.testing.main()
| 22,995 | 26.873939 | 100 | py |
tvm | tvm-main/tests/python/relay/test_typecall.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import relay
from tvm.relay import transform
def test_dup_type():
a = relay.TypeVar("a")
av = relay.Var("av", a)
make_id = relay.Function([av], relay.Tuple([av, av]), None, [a])
t = relay.scalar_type("float32")
b = relay.Var("b", t)
mod = tvm.IRModule.from_expr(make_id(b))
mod = transform.InferType()(mod)
inferred = mod["main"].body
assert inferred.checked_type == relay.TupleType([t, t])
def test_id_type():
mod = tvm.IRModule()
id_type = relay.GlobalTypeVar("id")
a = relay.TypeVar("a")
mod[id_type] = relay.TypeData(id_type, [a], [])
b = relay.TypeVar("b")
make_id = relay.Var("make_id", relay.FuncType([b], id_type(b), [b]))
t = relay.scalar_type("float32")
b = relay.Var("b", t)
mod["main"] = relay.Function([make_id, b], make_id(b))
mod = transform.InferType()(mod)
assert mod["main"].body.checked_type == id_type(t)
if __name__ == "__main__":
test_dup_type()
test_id_type()
| 1,802 | 33.018868 | 72 | py |
tvm | tvm-main/tests/python/relay/test_pass_lambda_lift.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.relay import transform
def test_basic():
mod = tvm.IRModule()
x2 = relay.var("x2", shape=(10, 5))
y2 = relay.var("y2", shape=(1, 5))
level2_func = relay.Function([x2, y2], relay.op.add(x2, y2))
x1 = relay.var("x1", shape=(10, 5))
y1 = relay.var("y1", shape=(1, 5))
level1_func = relay.Function([x1, y1], level2_func(x1, y1))
mod["main"] = level1_func
mod = relay.transform.InferType()(mod)
new_mod = transform.LambdaLift()(mod)
assert len(new_mod.functions) == 2
def test_closure():
mod = tvm.IRModule()
x = relay.var("x", shape=(2,))
y = relay.var("y", shape=(2,))
inner_func = relay.Function([x], x + y)
outer_func = relay.Function([y], inner_func)
clo = outer_func(relay.ones(shape=(2,), dtype="float32"))
mod["main"] = relay.Function([], relay.Call(clo, [relay.zeros(shape=(2,), dtype="float32")]))
mod = relay.transform.InferType()(mod)
new_mod = transform.LambdaLift()(mod)
assert len(new_mod.functions) == 3
def test_recursive():
mod = tvm.IRModule()
x = relay.var("x", shape=(2,))
i = relay.var("i", shape=(), dtype="int32")
s = relay.var("s", shape=(2,))
cond = i < relay.const(10, dtype="int32")
loop = relay.var("while_loop")
sb = relay.scope_builder.ScopeBuilder()
with sb.if_scope(cond):
ii = i + relay.const(1, dtype="int32")
ss = s + x
sb.ret(loop(ii, ss))
with sb.else_scope():
sb.ret(s)
func = relay.Function([i, s], sb.get())
ret = relay.Let(
loop, func, loop(relay.const(0, dtype="int32"), relay.zeros(shape=(2,), dtype="float32"))
)
mod["main"] = relay.Function([x], ret)
mod = relay.transform.InferType()(mod)
new_mod = transform.LambdaLift()(mod)
assert len(new_mod.functions) == 2
if __name__ == "__main__":
tvm.testing.main()
| 2,738 | 30.482759 | 97 | py |
tvm | tvm-main/tests/python/relay/test_pass_defunctionalization.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import numpy as np
import tvm
from tvm import relay
from tvm.relay.backend.interpreter import ConstructorValue
from tvm.relay import transform, ExprVisitor, TypeVisitor
from tvm.relay.testing import Prelude
# determine if type t is a FuncType or has a nested FuncType
def has_func_type(t):
class FuncTypeVisitor(TypeVisitor):
def __init__(self):
super().__init__()
self.has_func = False
def visit_func_type(self, ftt):
self.has_func = True
ftvisitor = FuncTypeVisitor()
ftvisitor.visit(t)
return ftvisitor.has_func
# determine whether a program has any higher order functions
# a higher order function is defined as one that:
# - has function type arguments
# - returns a function
def assert_no_higher_order_functions(expr, mod):
class CheckFirstOrderVisitor(ExprVisitor):
def __init__(self, mod):
super().__init__()
self.mod = mod
self.hof = []
self.visited_gv = set()
def visit_call(self, call):
is_higher_order = False
# check return type
if has_func_type(call.checked_type):
is_higher_order = True
# check argument types
for a in call.args:
if has_func_type(a.checked_type):
is_higher_order = True
# if it is higher order, save it for debugging later
if is_higher_order:
self.hof.append(call)
super().visit_call(call)
def visit_global_var(self, gv):
# visit global vars to visit entire program
if gv not in self.visited_gv:
self.visited_gv.add(gv)
self.visit(self.mod[gv])
mod = transform.InferType()(mod)
check_fo_visitor = CheckFirstOrderVisitor(mod)
check_fo_visitor.visit(expr)
nl = "\n--------\n"
errmsg = f"""found {len(check_fo_visitor.hof)} higher order functions:
{nl.join(expr.astext() for expr in check_fo_visitor.hof)}"""
assert len(check_fo_visitor.hof) == 0, errmsg
# assert that a program is defunctionalized and returns
# defunctionalized module
# assumes program starts from mod['main']
def defunctionalized(mod):
mod = transform.InferType()(mod)
mod["main"] = transform.Defunctionalization(mod["main"], mod)
mod = transform.InferType()(mod)
assert_no_higher_order_functions(mod["main"], mod)
return mod
# adt list to python list
def to_list(mod, l):
list = mod.get_global_type_var("List")
list_adt = mod[list]
cons = list_adt.constructors[0]
nil = list_adt.constructors[1]
assert isinstance(l, ConstructorValue)
val = l
ret = []
while True:
if val.tag == cons.tag:
ret.append(val.fields[0].numpy())
val = val.fields[1]
else:
assert val.tag == nil.tag
break
return ret
# list to adt list
def to_adt_list(mod, arr):
expr = mod["main"]
l = mod.get_global_type_var("List")
list_adt = mod[l]
cons = list_adt.constructors[0]
nil = list_adt.constructors[1]
li = nil()
for a in arr:
li = cons(relay.const(a), li)
adt = relay.create_executor(mod=mod).evaluate(li)
mod["main"] = expr
return adt
def test_simple():
code = """
#[version = "0.0.5"]
def @simple[A, B](%f: fn(A) -> B, %xs: A) -> B {
%f(%xs)
}
def @main(%l: Tensor[(5, 5), float32]) -> Tensor[(5, 5), float32] {
%0 = fn[A](%x: A) -> A {
%x
};
@simple(%0, %l)
}
"""
mod = tvm.relay.fromtext(code)
defunc_mod = defunctionalized(mod)
input = np.random.rand(5, 5).astype("float32")
out = relay.create_executor("debug", mod=mod).evaluate()(input)
defunc_out = relay.create_executor("debug", mod=defunc_mod).evaluate()(input)
np.testing.assert_equal(out.numpy(), defunc_out.numpy())
def test_global_recursion():
code = """
#[version = "0.0.5"]
type List[A] {
Cons(A, List[A]),
Nil,
}
def @id[A](%x: A) -> A {
%x
}
def @map[A, B](%f: fn(A) -> B, %xs: List[A]) -> List[B] {
match (%xs) {
Cons(%x, %rest) => Cons(%f(%x), @map(%f, %rest)),
Nil => Nil,
}
}
def @main(%l: List[float32]) -> List[float32] {
@map(@id, %l)
}
"""
mod = tvm.relay.fromtext(code)
defunc_mod = defunctionalized(mod)
input = np.random.rand(10).astype("float32")
out = relay.create_executor("debug", mod=mod).evaluate(mod["main"])(to_adt_list(mod, input))
defunc_out = relay.create_executor("debug", mod=defunc_mod).evaluate()(
to_adt_list(defunc_mod, input)
)
np.testing.assert_array_equal(to_list(mod, out), to_list(defunc_mod, defunc_out))
def test_recursive_datatype():
# CPS will create recursive datatype
code = """
#[version = "0.0.5"]
type List[A] {
Cons(A, List[A]),
Nil,
}
def @sum(%f: fn(int32) -> int32, %k: List[int32]) -> int32 {
match (%k) {
Cons(%x, %rest) => %0 = fn(%n) {
%x + %f(%n)
};
@sum(%0, %rest),
Nil => %f(0),
}
}
def @id[A](%x: A) -> A {
%x
}
def @main(%l: List[int32]) -> int32 {
@sum(@id, %l)
}
"""
mod = tvm.relay.fromtext(code)
defunc_mod = defunctionalized(mod)
input = np.random.randint(1, 100, 10)
out = relay.create_executor("debug", mod=mod).evaluate(mod["main"])(to_adt_list(mod, input))
defunc_out = relay.create_executor("debug", mod=defunc_mod).evaluate()(
to_adt_list(defunc_mod, input)
)
tvm.testing.assert_allclose(out.numpy(), defunc_out.numpy())
if __name__ == "__main__":
tvm.testing.main()
| 6,351 | 26.497835 | 96 | py |
tvm | tvm-main/tests/python/relay/test_pass_flexible_shape_dispatch.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test flexible shape dispatch pass"""
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay.testing.resnet import get_workload
from tvm.relay import vm
from tvm import runtime
def test_end_to_end():
# Load a resnet model.
mod, params = get_workload()
# Apply flexible dispatch pass.
mod = relay.transform.FlexibleShapeDispatch(axis=0, buckets=[1, 4], auto_pad=True)(mod)
# Compile and confirm result supports multiple shapes.
exe = relay.vm.compile(mod, "llvm", params=params)
vm = runtime.vm.VirtualMachine(exe, tvm.cpu())
# Evaluate various batch sizes
batch_1 = np.random.normal(size=[1, 3, 224, 224]).astype("float32")
assert list(vm.invoke("main", batch_1).shape) == [1, 1000]
batch_4 = np.random.normal(size=[4, 3, 224, 224]).astype("float32")
assert list(vm.invoke("main", batch_4).shape) == [4, 1000]
# Apply autopadding to an input.
batch_3 = np.random.normal(size=[3, 3, 224, 224]).astype("float32")
assert list(vm.invoke("main", batch_3).shape) == [3, 1000]
def test_multiple_inputs():
# Create a small relay module with multiple inputs to dispatch over.
x = relay.var("x", shape=[10, 10], dtype="float32")
w = relay.var("w", shape=[10, 10], dtype="float32")
y = x + w
mod = tvm.IRModule.from_expr(y)
# Apply flexible dispatch to dim 1 for both inputs.
mod = relay.transform.FlexibleShapeDispatch(axis=1, buckets=[5, 10], input_indices=[0, 1])(mod)
# Compile and confirm that output shapes are correct.
exe = relay.vm.compile(mod, "llvm")
vm = runtime.vm.VirtualMachine(exe, tvm.cpu())
x_w_5 = np.random.normal(size=[10, 5]).astype("float32")
assert list(vm.invoke("main", x_w_5, x_w_5).shape) == [10, 5]
x_w_10 = np.random.normal(size=[10, 10]).astype("float32")
assert list(vm.invoke("main", x_w_10, x_w_10).shape) == [10, 10]
def test_fixed_output():
# Test a graph where the output shape is not based on input dynamism.
x = relay.var("x", shape=[10, 10], dtype="float32")
w = relay.var("w", shape=[10, 10], dtype="float32")
y = relay.nn.dense(x, w)
mod = tvm.IRModule.from_expr(y)
# Apply flexible dispatch to dimension 1 for both inputs.
mod = relay.transform.FlexibleShapeDispatch(
axis=1, buckets=[5, 7], input_indices=[0, 1], affects_output=False
)(mod)
# Compile and confirm that output shapes are correct.
exe = relay.vm.compile(mod, "llvm")
vm = runtime.vm.VirtualMachine(exe, tvm.cpu())
x_w_5 = np.random.normal(size=[10, 5]).astype("float32")
assert list(vm.invoke("main", x_w_5, x_w_5).shape) == [10, 10]
x_w_7 = np.random.normal(size=[10, 7]).astype("float32")
assert list(vm.invoke("main", x_w_7, x_w_7).shape) == [10, 10]
return
def test_multiple_outputs():
# Create a graph with multiple outputs and test that it works.
x = relay.var("x", shape=[10, 10], dtype="float32")
y = relay.split(x, 2, axis=1)
mod = tvm.IRModule.from_expr(y.astuple())
# Apply flexible dispatch to batch dimension.
mod = relay.transform.FlexibleShapeDispatch(axis=0, buckets=[5, 10])(mod)
# Compile and confirm that both outputs are correct.
exe = relay.vm.compile(mod, "llvm")
vm = runtime.vm.VirtualMachine(exe, tvm.cpu())
x_5 = np.random.normal(size=[5, 10]).astype("float32")
result_5 = vm.invoke("main", x_5)
assert list(result_5[0].shape) == [5, 5]
assert list(result_5[1].shape) == [5, 5]
x_10 = np.random.normal(size=[10, 10]).astype("float32")
result_10 = vm.invoke("main", x_10)
assert list(result_10[0].shape) == [10, 5]
assert list(result_10[1].shape) == [10, 5]
if __name__ == "__main__":
tvm.testing.main()
| 4,525 | 36.716667 | 99 | py |
tvm | tvm-main/tests/python/relay/test_pass_plan_devices.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
"""Unit tests for the PlanDevices pass. We check:
- The pass alone given the expected AST, though we need to manually run InferTypes.
- The pass is idempotent.
- Execution on the VM backend yields the correct result."""
import tvm
from tvm import relay
from tvm.script import tir as T
import tvm.testing
import numpy as np
import os
HOST_DEVICE = tvm.device("cpu")
HOST_TARGET = tvm.target.Target("llvm")
CPU_DEVICE = tvm.device("cpu")
CPU_TARGET = tvm.target.Target("llvm").with_host(HOST_TARGET)
GPU_DEVICE = tvm.device("cuda")
GPU_TARGET = tvm.target.Target("cuda").with_host(HOST_TARGET)
TARGETS = [CPU_TARGET, GPU_TARGET]
HOST = tvm.target.VirtualDevice(HOST_DEVICE, HOST_TARGET) # device_type=1
CPU = tvm.target.VirtualDevice(CPU_DEVICE, CPU_TARGET) # device_type=1
GPU = tvm.target.VirtualDevice(GPU_DEVICE, GPU_TARGET) # device_type=2
DEFAULT = GPU
CPU_SCOPE_A = tvm.target.VirtualDevice(CPU_DEVICE, CPU_TARGET, memory_scope="scopeA")
CPU_SCOPE_B = tvm.target.VirtualDevice(CPU_DEVICE, CPU_TARGET, memory_scope="scopeB")
CTXT = tvm.transform.PassContext(config={"relay.fallback_device_type": DEFAULT.device_type_int})
core = tvm.IRModule()
core.import_from_std("core.rly")
recover_virtual_device_map = tvm._ffi.get_global_func("relay.transform.RecoverVirtualDeviceMap")
def rewrite_and_assert(in_mod, expected_mod):
"""Manually run the pass and assert it's structurally equals to the expected."""
config = tvm.target.make_compilation_config(CTXT, TARGETS)
actual_mod = relay.transform.InferType()(in_mod)
actual_mod = relay.transform.PlanDevices(config)(actual_mod)
actual_mod = relay.transform.InferType()(actual_mod)
expected_mod = relay.transform.InferType()(expected_mod)
if not tvm.ir.structural_equal(actual_mod, expected_mod, True):
# Print everything in full so we can see what's going on when things fail.
print("Input module:")
print(in_mod)
print("Expected module:")
print(expected_mod)
print("Actual module:")
print(actual_mod)
# Assert again so as to see the actual disagreeing sub-expressions.
tvm.ir.assert_structural_equal(actual_mod, expected_mod, True)
def eval_and_assert(in_mod: tvm.IRModule, reference_func, args):
"""Test the standard compilation flow gives us a function which agrees with the Numpy
reference implementation."""
if not tvm.runtime.enabled("cuda"):
print("Not evaluating since GPU is not available")
return
with tvm.transform.PassContext(opt_level=3):
compiled = relay.create_executor(
"vm", mod=in_mod, device=GPU_DEVICE, target=GPU_TARGET
).evaluate()
actual = compiled(*args).numpy()
expected = reference_func(*args)
tvm.testing.assert_allclose(actual, expected)
def rand(shape):
return np.random.rand(*shape).astype("float32")
def rands(shape, n):
return [rand(shape) for i in range(n)]
def exercise(in_mod: tvm.IRModule, expected_mod: tvm.IRModule, reference_func, args):
"""Test in_mod against expected_mod and reference_func using args."""
# Correctness
rewrite_and_assert(in_mod, expected_mod)
# Idempotence
rewrite_and_assert(expected_mod, expected_mod)
# The VM can compile and possibly even run the module
if not (reference_func is None) and not (args is None):
eval_and_assert(in_mod, reference_func, args)
def test_plain():
metatable = {"VirtualDevice": [CPU, GPU]}
# Everything defaults to GPU
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = add(%c, %d);
subtract(%0, %1)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %b {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32],
%c {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %d {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) {
%0 = add(%a, %b);
%1 = add(%c, %d);
subtract(%0, %1)
}
""",
"from_string",
None,
metatable,
)
def ref(a, b, c, d):
return np.subtract(np.add(a, b), np.add(c, d))
exercise(input(), expected(), ref, rands((5, 7), 4))
def test_left_add_on_cpu():
metatable = {"VirtualDevice": [CPU, GPU]}
# Force some args to be on CPU, rest default to GPU.
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0]);
%2 = add(%c, %d);
subtract(%1, %2)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %b {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
%c {virtual_device= meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %d {virtual_device= meta[VirtualDevice][1]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) {
%0 = add(%a, %b);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%2 = device_copy(%1, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%3 = add(%c, %d);
subtract(%2, %3)
}
""",
"from_string",
None,
metatable,
)
def ref(a, b, c, d):
return np.subtract(np.add(a, b), np.add(c, d))
exercise(input(), expected(), ref, rands((5, 7), 4))
def test_left_add_on_cpu_via_copy():
metatable = {"VirtualDevice": [CPU, GPU]}
# As for test_left_add_on_cpu, but with an explicit device_copy.
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = device_copy(%0, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%2 = add(%c, %d);
subtract(%1, %2)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %b {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
%c {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %d {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) {
%0 = add(%a, %b);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%2 = device_copy(%1, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%3 = add(%c, %d);
subtract(%2, %3)
}
""",
"from_string",
None,
metatable,
)
def ref(a, b, c, d):
return np.subtract(np.add(a, b), np.add(c, d))
exercise(input(), expected(), ref, rands((5, 7), 4))
def test_left_add_on_cpu_via_copy_as_map():
metatable = {"VirtualDevice": [CPU, GPU]}
# As for test_left_add_on_cpu, but with an explicit device_copy.
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = device_copy(%0, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%2 = add(%c, %d);
subtract(%1, %2)
}
""",
"from_string",
None,
metatable,
)
config = tvm.target.make_compilation_config(CTXT, TARGETS, HOST_TARGET)
actual_mod = relay.transform.InferType()(input())
actual_mod = relay.transform.PlanDevices(config)(actual_mod)
actual_mod = relay.transform.CapturePostDfsIndexInSpans()(actual_mod)
# Same expected result as for test_left_add_on_cpu, but we'll include indexes to help
# the test make sense.
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], // index 0
%b {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], // index 1
%c {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], // index 2
%d {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], // index 3
virtual_device=meta[VirtualDevice][1]) {
%0 = add(%a, %b); // index 8
%1 = on_device(%0,
virtual_device=meta[VirtualDevice][0],
constrain_result=True); // index 9
%2 = device_copy(%1,
src_virtual_device=meta[VirtualDevice][0],
dst_virtual_device=meta[VirtualDevice][1]); // index 10
%3 = add(%c, %d); // index 11
subtract(%2, %3) // index 12
} // index 13
""",
"from_string",
None,
metatable,
)
# Make sure actual matches.
tvm.ir.assert_structural_equal(actual_mod, expected(), True)
# Recover all the inferred virtual devices in map form
raw_map = recover_virtual_device_map(actual_mod, actual_mod["main"])
# Rewrite the map to be from post-dfs indexes to device types
map = {e.span.line: d.device_type for e, d in raw_map.items()}
# Now we can express the expected map
expected_map = {
0: CPU.device_type, # %a
1: CPU.device_type, # %b
2: GPU.device_type, # %c
3: GPU.device_type, # %d
8: CPU.device_type, # first add
9: CPU.device_type, # on_device
10: GPU.device_type, # device_copy
11: GPU.device_type, # second add
12: GPU.device_type, # subtract
13: GPU.device_type, # @main
}
assert map == expected_map
def test_both_adds_on_cpu():
metatable = {"VirtualDevice": [CPU, GPU]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = add(%c, %d);
%2 = on_device(%0, virtual_device=meta[VirtualDevice][0]);
%3 = on_device(%1, virtual_device=meta[VirtualDevice][0]);
subtract(%2, %3)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %b {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
%c {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %d {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) {
%0 = add(%a, %b);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%2 = add(%c, %d);
%3 = on_device(%2, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%4 = device_copy(%1, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%5 = device_copy(%3, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
subtract(%4, %5)
}
""",
"from_string",
None,
metatable,
)
def ref(a, b, c, d):
return np.subtract(np.add(a, b), np.add(c, d))
exercise(input(), expected(), ref, rands((5, 7), 4))
def test_sharing():
metatable = {"VirtualDevice": [CPU, GPU]}
# The same add sub-expression is annotated twice.
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0]);
%2 = on_device(%0, virtual_device=meta[VirtualDevice][0]);
subtract(%1, %2)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %b {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) {
%0 = add(%a, %b);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%2 = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%3 = device_copy(%1, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%4 = device_copy(%2, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
subtract(%3, %4)
}
""",
"from_string",
None,
metatable,
)
def ref(a, b):
x = np.add(a, b)
return np.subtract(x, x)
exercise(input(), expected(), ref, rands((5, 7), 2))
def test_let_on_cpu():
metatable = {"VirtualDevice": [CPU, GPU]}
# The device for a let-bound expression can flow from uses of the let-bound var.
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
let %l = add(%a, %b);
let %r = add(%c, %d);
%0 = on_device(%l, virtual_device=meta[VirtualDevice][0]);
subtract(%0, %r)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %b {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
%c {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %d {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) {
%0 = add(%a, %b);
let %l = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
let %r = on_device(add(%c, %d), virtual_device=meta[VirtualDevice][1], constrain_result=True);
%1 = device_copy(%l, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
subtract(%1, %r)
}
""",
"from_string",
None,
metatable,
)
def ref(a, b, c, d):
return np.subtract(np.add(a, b), np.add(c, d))
exercise(input(), expected(), ref, rands((5, 7), 4))
def test_func_param_on_cpu():
metatable = {"VirtualDevice": [CPU, GPU]}
# Devices for function parameters flow to call sites.
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
let %f = fn (%x, %y) {
%0 = add(%x, %y);
on_device(%0, virtual_device=meta[VirtualDevice][0])
};
%1 = %f(%a, %b);
%2 = add(%c, %d);
subtract(%1, %2)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %b {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
%c {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %d {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][0]) {
let %f = fn (%x {virtual_device=meta[VirtualDevice][0]}, %y {virtual_device=meta[VirtualDevice][0]},
virtual_device=meta[VirtualDevice][0]) {
add(%x, %y)
};
%0 = %f(%a, %b);
%1 = add(%c, %d);
subtract(%0, %1)
}
""",
"from_string",
None,
metatable,
)
def ref(a, b, c, d):
return np.subtract(np.add(a, b), np.add(c, d))
exercise(input(), expected(), ref, rands((5, 7), 4))
def test_func_result_on_cpu():
metatable = {"VirtualDevice": [CPU, GPU]}
# Devices for call sites flow to function results.
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
let %f = fn (%x, %y) {
add(%x, %y)
};
%0 = %f(%a, %b);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0]);
%2 = add(%c, %d);
subtract(%1, %2)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %b {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
%c {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %d {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) {
let %f = fn (%x {virtual_device=meta[VirtualDevice][0]}, %y {virtual_device=meta[VirtualDevice][0]},
virtual_device=meta[VirtualDevice][0]) {
add(%x, %y)
};
%1 = %f(%a, %b);
%2 = on_device(%1, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%3 = device_copy(%2, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%4 = add(%c, %d);
subtract(%3, %4)
}
""",
"from_string",
None,
metatable,
)
def ref(a, b, c, d):
return np.subtract(np.add(a, b), np.add(c, d))
exercise(input(), expected(), ref, rands((5, 7), 4))
def test_higher_order():
metatable = {"VirtualDevice": [CPU, GPU]}
# The constraint on %a flows back to %y via %f and %h
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) {
let %f = fn (%g) {
fn (%a) {
%0 = on_device(%a, virtual_device=meta[VirtualDevice][0]);
%1 = %g(%0);
add(%1, %x)
}
};
let %h = fn (%b) {
negative(%b)
};
%2 = %f(%h);
%3 = %2(%y);
subtract(%x, %3)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %y {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) {
let %f = fn (%g {virtual_device=meta[VirtualDevice][1]}, virtual_device=meta[VirtualDevice][1]) {
fn (%a {virtual_device=meta[VirtualDevice][0]}, virtual_device=meta[VirtualDevice][1]) {
%0 = device_copy(%a, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%1 = %g(%0);
add(%1, %x)
}
};
let %h = fn (%b {virtual_device=meta[VirtualDevice][1]}, virtual_device=meta[VirtualDevice][1]) {
negative(%b)
};
%2 = %f(%h);
%3 = %2(%y);
subtract(%x, %3)
}
""",
"from_string",
None,
metatable,
)
def ref(x, y):
def f(g):
return lambda a: np.add(g(a), x)
def h(b):
return np.negative(b)
return np.subtract(x, f(h)(y))
exercise(input(), expected(), ref, rands((5, 7), 2))
def test_function_in_tuple():
metatable = {"VirtualDevice": [CPU, GPU]}
# Since %f ends up in a tuple its argument and result is forced to be on the CPU
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) {
let %f = fn (%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32]) {
%0 = on_device(%b, virtual_device=meta[VirtualDevice][0]);
add(%a, %0)
};
let %t = (%f, %x);
%1 = %t.1;
%2 = %t.0;
%2(%1, %y)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %y {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][0]) {
let %f = fn (%a {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %b {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][0]) {
add(%a, %b)
};
let %t = on_device((%f, %x), virtual_device=meta[VirtualDevice][0], constrain_result=True);
%0 = %t.1;
%1 = %t.0;
%1(%0, %y)
}
""",
"from_string",
None,
metatable,
)
def ref(x, y):
return np.add(x, y)
exercise(input(), expected(), ref, rands((5, 7), 2))
def test_device_copy():
const = rand((5, 7))
metatable = {"VirtualDevice": [CPU, GPU], "relay.Constant": [relay.const(const)]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32]) {
%0 = device_copy(%x, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
add(%0, meta[relay.Constant][0])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) {
%0 = device_copy(%x, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
add(%0, meta[relay.Constant][0])
}
""",
"from_string",
None,
metatable,
)
def ref(x):
return np.add(x, const)
exercise(input(), expected(), ref, rands((5, 7), 1))
def test_shape_of():
metatable = {"VirtualDevice": [HOST, GPU]}
# We need to use constrain_result=True in the on_device call so that the tensor will be on the GPU. Otherwise the
# result defaults to the result device for @main which is the CPU, thus forcing a copy.
# TODO(mbs): Perhaps the defaulting heuristics are being too clever?
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(?, ?), float32]) {
%0 = on_device(%x, virtual_device=meta[VirtualDevice][1], constrain_result=True);
vm.shape_of(%0, dtype="int64")
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][1]}: Tensor[(?, ?), float32],
virtual_device=meta[VirtualDevice][0]) {
vm.shape_of(%x, dtype="int64")
}
""",
"from_string",
None,
metatable,
)
def ref(x):
return x.shape
exercise(input(), expected(), ref, rands((5, 7), 1))
def test_alloc_storage():
metatable = {"VirtualDevice": [HOST, GPU]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%size: int64, %alignment: int64) {
memory.alloc_storage(%size, %alignment, virtual_device=meta[VirtualDevice][1])
}
""",
"from_string",
core,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%size {virtual_device=meta[VirtualDevice][0]}: int64, %alignment {virtual_device=meta[VirtualDevice][0]}: int64,
virtual_device=meta[VirtualDevice][1]) {
memory.alloc_storage(%size, %alignment, virtual_device=meta[VirtualDevice][1])
}
""",
"from_string",
core,
metatable,
)
# Don't try to execute, too fiddly to setup.
exercise(input(), expected(), None, None)
def test_alloc_tensor():
shape = np.array([3, 2])
metatable = {
"VirtualDevice": [HOST, GPU],
"relay.Constant": [relay.const(shape, dtype="int64")],
}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%sto: Storage[]) {
memory.alloc_tensor(%sto, 0, meta[relay.Constant][0],
const_shape=meta[relay.Constant][0], assert_shape=[])
}
""",
"from_string",
core,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%sto {virtual_device=meta[VirtualDevice][1]}: Storage[], virtual_device=meta[VirtualDevice][1]) {
%0 = on_device(0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%1 = on_device(meta[relay.Constant][0], virtual_device=meta[VirtualDevice][0], constrain_result=True);
memory.alloc_tensor(%sto, %0, %1, const_shape=meta[relay.Constant][0], assert_shape=[])
}
""",
"from_string",
core,
metatable,
)
# Don't try to execute, too fiddly to setup.
exercise(input(), expected(), None, None)
def test_reshape_tensor():
newshape = [2, 4, 2]
metatable = {
"VirtualDevice": [HOST, GPU],
"relay.Constant": [relay.const(newshape, dtype="int64")],
}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(2, 8), float32]) {
vm.reshape_tensor(%x, meta[relay.Constant][0], newshape=[2, 4, 2])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][1]}: Tensor[(2, 8), float32],
virtual_device=meta[VirtualDevice][1]) {
%0 = on_device(meta[relay.Constant][0], virtual_device=meta[VirtualDevice][0], constrain_result=True);
vm.reshape_tensor(%x, %0, newshape=[2, 4, 2])
}
""",
"from_string",
None,
metatable,
)
def ref(x):
return np.reshape(x, newshape)
exercise(input(), expected(), ref, rands((2, 8), 1))
def test_dynamic_input():
metatable = {"VirtualDevice": [GPU]}
# There's nothing special about inferring devices for partially unknown types.
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x0: Tensor[(?, ?), float32], %x1: Tensor[(?, ?), float32]) {
add(%x0, %x1)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x0 {virtual_device=meta[VirtualDevice][0]}: Tensor[(?, ?), float32], %x1 {virtual_device=meta[VirtualDevice][0]}: Tensor[(?, ?), float32],
virtual_device=meta[VirtualDevice][0]) {
add(%x0, %x1)
}
""",
"from_string",
None,
metatable,
)
def ref(x0, x1):
return np.add(x0, x1)
exercise(input(), expected(), ref, rands((5, 7), 2))
def test_redundant_annotation():
metatable = {"VirtualDevice": [CPU, GPU]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32], %z: Tensor[(5, 7), float32]) {
%0 = add(%x, %y);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0]);
%2 = subtract(%1, %z);
%3 = on_device(%0, virtual_device=meta[VirtualDevice][0]);
add(%2, %3)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %y {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %z {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) {
%0 = add(%x, %y);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%2 = device_copy(%1, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%3 = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%4 = subtract(%2, %z);
%5 = device_copy(%3, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
add(%4, %5)
}
""",
"from_string",
None,
metatable,
)
def ref(x, y, z):
a = np.add(x, y)
return np.add(np.subtract(a, z), a)
exercise(input(), expected(), ref, rands((5, 7), 3))
def test_annotate_expr():
metatable = {"VirtualDevice": [CPU, GPU]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32], %z: Tensor[(5, 7), float32]) {
%0 = add(%x, %y);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][1]);
%2 = subtract(%1, %z);
on_device(%2, virtual_device=meta[VirtualDevice][0])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %y {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %z {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][0]) {
%0 = add(%x, %y);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][1], constrain_result=True);
%2 = device_copy(%1, src_virtual_device=meta[VirtualDevice][1], dst_virtual_device=meta[VirtualDevice][0]);
subtract(%2, %z)
}
""",
"from_string",
None,
metatable,
)
def ref(x, y, z):
return np.subtract(np.add(x, y), z)
exercise(input(), expected(), ref, rands((5, 7), 3))
def test_annotate_all():
metatable = {"VirtualDevice": [CPU, GPU]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32], %z: Tensor[(5, 7), float32]) {
%0 = add(%x, %y);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0]);
%2 = subtract(%1, %z);
on_device(%2, virtual_device=meta[VirtualDevice][0])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %y {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %z {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][0]) {
%0 = add(%x, %y);
subtract(%0, %z)
}
""",
"from_string",
None,
metatable,
)
def ref(x, y, z):
return np.subtract(np.add(x, y), z)
exercise(input(), expected(), ref, rands((5, 7), 3))
def test_conv_network():
r"""The network and devices are as follows:
data1 data2 <--- CPU
| |
conv2d conv2d <--- CPU
\ /
\ /
add <--- GPU
|
conv2d <--- CPU
|
<result> <--- CPU
"""
metatable = {"VirtualDevice": [CPU, GPU]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data1: Tensor[(1, 64, 56, 56), float32], %data2: Tensor[(1, 64, 56, 56), float32],
%weight: Tensor[(64, 64, 3, 3), float32]) {
%0 = nn.conv2d(%data1, %weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%1 = nn.conv2d(%data2, %weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%2 = on_device(%0, virtual_device=meta[VirtualDevice][0]);
%3 = on_device(%1, virtual_device=meta[VirtualDevice][0]);
%4 = add(%2, %3);
%5 = on_device(%4, virtual_device=meta[VirtualDevice][1]);
%6 = nn.conv2d(%5, %weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
on_device(%6, virtual_device=meta[VirtualDevice][0])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data1 {virtual_device=meta[VirtualDevice][0]}: Tensor[(1, 64, 56, 56), float32], %data2 {virtual_device=meta[VirtualDevice][0]}: Tensor[(1, 64, 56, 56), float32],
%weight {virtual_device=meta[VirtualDevice][0]}: Tensor[(64, 64, 3, 3), float32],
virtual_device=meta[VirtualDevice][0]) {
%0 = nn.conv2d(%data1, %weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%2 = nn.conv2d(%data2, %weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%3 = on_device(%2, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%4 = device_copy(%1, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%5 = device_copy(%3, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%6 = add(%4, %5);
%7 = on_device(%6, virtual_device=meta[VirtualDevice][1], constrain_result=True);
%8 = device_copy(%7, src_virtual_device=meta[VirtualDevice][1], dst_virtual_device=meta[VirtualDevice][0]);
nn.conv2d(%8, %weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3])
}
""",
"from_string",
None,
metatable,
)
# Don't try to execute, we don't have a reference conv2d
exercise(input(), expected(), None, None)
def test_tuple_get_item():
metatable = {"VirtualDevice": [CPU, GPU]}
# Note that the device copy should be placed after projection rather than before. This is handled by
# a heuristic in the pass.
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(3, 3, 4), float32]) {
let %t = split(%x, indices_or_sections=3);
%0 = on_device(%t, virtual_device=meta[VirtualDevice][0]);
%1 = on_device(%t, virtual_device=meta[VirtualDevice][0]);
%2 = %0.0;
%3 = %1.1;
%4 = subtract(%2, %3);
on_device(%4, virtual_device=meta[VirtualDevice][1])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(3, 3, 4), float32],
virtual_device=meta[VirtualDevice][1]) {
%0 = split(%x, indices_or_sections=3);
let %t = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%1 = %t.0;
%2 = on_device(%1, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%3 = %t.1;
%4 = on_device(%3, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%5 = device_copy(%2, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%6 = device_copy(%4, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
subtract(%5, %6)
}
""",
"from_string",
None,
metatable,
)
def ref(x):
t = np.split(x, 3)
return np.subtract(t[0], t[1])
exercise(input(), expected(), ref, rands((3, 3, 4), 1))
def test_propogation():
r""" The network and devices are as follows:
x <--- CPU
|
negative <--- CPU
/ \
negative negative <--- GPU
\ /
add <--- GPU
|
negative <--- CPU
|
<result> <--- CPU
"""
metatable = {"VirtualDevice": [CPU, GPU]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32]) {
%0 = negative(%x);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0]);
%2 = negative(%1);
%3 = on_device(%0, virtual_device=meta[VirtualDevice][0]);
%4 = negative(%3);
%5 = on_device(%2, virtual_device=meta[VirtualDevice][1]);
%6 = on_device(%4, virtual_device=meta[VirtualDevice][1]);
%7 = add(%5, %6);
%8 = on_device(%7, virtual_device=meta[VirtualDevice][1]);
%9 = negative(%8);
on_device(%9, virtual_device=meta[VirtualDevice][0])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][0]) {
%0 = negative(%x);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%2 = device_copy(%1, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%3 = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%4 = device_copy(%3, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%5 = negative(%2);
%6 = negative(%4);
%7 = add(%5, %6);
%8 = on_device(%7, virtual_device=meta[VirtualDevice][1], constrain_result=True);
%9 = device_copy(%8, src_virtual_device=meta[VirtualDevice][1], dst_virtual_device=meta[VirtualDevice][0]);
negative(%9)
}
""",
"from_string",
None,
metatable,
)
def ref(x):
y = np.negative(x)
return np.negative(np.add(np.negative(y), np.negative(y)))
exercise(input(), expected(), ref, rands((5, 7), 1))
def test_fusible_network():
r""" The network is as follows:
x y <--- GPU
\ /
add <--- GPU
/ \
negative \ <--- CPU
\ \
\ negative <--- GPU
\ /
add <--- GPU
|
negative <--- CPU
|
<result> <--- CPU
"""
metatable = {"VirtualDevice": [CPU, GPU]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) {
%0 = add(%x, %y);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][1]);
%2 = negative(%1);
%3 = on_device(%2, virtual_device=meta[VirtualDevice][0]);
%4 = negative(%0);
%5 = add(%3, %4);
%6 = on_device(%5, virtual_device=meta[VirtualDevice][1]);
%7 = negative(%6);
on_device(%7, virtual_device=meta[VirtualDevice][0])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %y {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][0]) {
%0 = add(%x, %y);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][1], constrain_result=True);
%2 = device_copy(%1, src_virtual_device=meta[VirtualDevice][1], dst_virtual_device=meta[VirtualDevice][0]);
%3 = negative(%2);
%4 = on_device(%3, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%5 = device_copy(%4, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%6 = negative(%0);
%7 = add(%5, %6);
%8 = on_device(%7, virtual_device=meta[VirtualDevice][1], constrain_result=True);
%9 = device_copy(%8, src_virtual_device=meta[VirtualDevice][1], dst_virtual_device=meta[VirtualDevice][0]);
negative(%9)
}
""",
"from_string",
None,
metatable,
)
def ref(x, y):
z = np.add(x, y)
return np.negative(np.add(np.negative(z), np.negative(z)))
exercise(input(), expected(), ref, rands((5, 7), 2))
def test_unpropagatable_graph():
r"""The network is as follows:
a b <--- CPU
\ /
\ / c d <--- GPU
\ / \ /
add \ / <--- CPU
\ \ /
\ multiply <--- GPU
\ /
subtract <--- CPU
|
<result> <--- CPU
"""
metatable = {"VirtualDevice": [CPU, GPU]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = multiply(%c, %d);
%2 = on_device(%0, virtual_device=meta[VirtualDevice][0]);
%3 = on_device(%1, virtual_device=meta[VirtualDevice][1]);
%4 = subtract(%2, %3);
on_device(%4, virtual_device=meta[VirtualDevice][0])
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %b {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
%c {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %d {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][0]) {
%0 = multiply(%c, %d);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][1], constrain_result=True);
%2 = add(%a, %b);
%3 = device_copy(%1, src_virtual_device=meta[VirtualDevice][1], dst_virtual_device=meta[VirtualDevice][0]);
subtract(%2, %3)
}
""",
"from_string",
None,
metatable,
)
def ref(a, b, c, d):
return np.subtract(np.add(a, b), np.multiply(c, d))
exercise(input(), expected(), ref, rands((5, 7), 4))
def test_conditional():
metatable = {"VirtualDevice": [CPU, GPU]}
# The conditional is over a function type, thus exercising the first-order/higher-order domain handling.
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: bool, %y: Tensor[(5, 7), float32], %z: Tensor[(5, 7), float32]) {
let %f = fn (%a) {
%0 = on_device(%y, virtual_device=meta[VirtualDevice][0], constrain_result=True);
add(%a, %0)
};
let %g = fn (%a1) {
subtract(%a1, %y)
};
let %h = if (%x) {
%f
} else {
%g
};
%h(%z)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: bool, %y {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %z {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][0]) {
let %f = fn (%a {virtual_device=meta[VirtualDevice][0]}, virtual_device=meta[VirtualDevice][0]) {
add(%a, %y)
};
let %g = fn (%a1 {virtual_device=meta[VirtualDevice][0]}, virtual_device=meta[VirtualDevice][0]) {
subtract(%a1, %y)
};
let %h = on_device(if (%x) {
%f
} else {
%g
}, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%h(%z)
}
""",
"from_string",
None,
metatable,
)
def ref(x, y, z):
def f(a):
return np.add(a, y)
def g(a):
return np.subtract(a, y)
h = f if x else g
return h(z)
exercise(input(), expected(), ref, [True, rand((5, 7)), rand((5, 7))])
def test_global():
metatable = {"VirtualDevice": [CPU, GPU]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @f(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = on_device(%b, virtual_device=meta[VirtualDevice][0]);
add(%a, %0)
}
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
@f(%y, %x)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @f(%a {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %b {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) -> Tensor[(5, 7), float32] {
%0 = device_copy(%b, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
add(%a, %0)
}
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %y {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) -> Tensor[(5, 7), float32] {
@f(%y, %x)
}
""",
"from_string",
None,
metatable,
)
def ref(x, y):
def f(a, b):
return np.add(a, b)
return f(x, y)
exercise(input(), expected(), ref, rands((5, 7), 2))
def test_ref():
metatable = {"VirtualDevice": [CPU, GPU]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) {
let %r = ref(%x);
%0 = on_device(%y, virtual_device=meta[VirtualDevice][0]);
ref_write(%r, %0);
%1 = ref_read(%r);
add(%x, %1)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %y {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) {
let %r = on_device(ref(%x), virtual_device=meta[VirtualDevice][1], constrain_result=True);
%0 = device_copy(%y, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
on_device(ref_write(%r, %0), virtual_device=meta[VirtualDevice][1], constrain_result=True);
%1 = ref_read(%r);
add(%x, %1)
}
""",
"from_string",
None,
metatable,
)
def ref(x, y):
r = {"value": x}
r["value"] = y
return np.add(x, r["value"])
# Don't try to execute, no backend currently supports both hetrogeneous devices and references.
exercise(input(), expected(), None, None)
def test_adt():
metatable = {"VirtualDevice": [CPU, GPU]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
type List[A] {
Cons(A, List[A]),
Nil,
}
def @main(%x : Tensor[(5, 7), float32], %y : Tensor[(5, 7), float32]) {
%0 = on_device(%y, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%1 = Nil;
%2 = Cons(%0, %1);
let %l = Cons(%x, %2);
match? (%l) {
Cons(%z, _) => %z
}
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
type List[A] {
Cons(A, List[A]),
Nil,
}
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %y {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][0]) {
%0 = Nil;
%1 = Cons(%y, %0);
let %l = on_device(Cons(%x, %1), virtual_device=meta[VirtualDevice][0], constrain_result=True);
match? (%l) {
Cons(%z, _) => %z
}
}
""",
"from_string",
None,
metatable,
)
def ref(x, y):
l = [x, y]
return l[0]
exercise(input(), expected(), ref, rands((5, 7), 2))
def test_free_on_device():
"""Tests that the 'free' form of on_device (ie with constrain_body=False) can be used to allow
a device_copy to be inserted if necessary, but otherwise does not prevent the flow of
device information."""
metatable = {
"VirtualDevice": [
CPU, # no memory scope constraint
CPU_SCOPE_A, # constrain to scopeA
CPU_SCOPE_B,
]
} # constrain to scopeB
# Everything defaults to GPU
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @on_scope_b(%x {virtual_device=meta[VirtualDevice][2]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][2]) -> Tensor[(5, 7), float32] {
%x
}
def @main(%a {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32], %b {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %c {virtual_device=meta[VirtualDevice][2]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) {
// %a's memory scope is unconstrained, so will take on "scopeB" and on_device has no effect
%0 = @on_scope_b(on_device(%a, virtual_device=meta[VirtualDevice][0], constrain_body=False));
// %b's memory scope is "scopeA", so will require a "scopeA"->"scopeB" copy.
%1 = @on_scope_b(on_device(%b, virtual_device=meta[VirtualDevice][0], constrain_body=False));
// %c's memory scope is "scopeB", so no copy required.
%2 = @on_scope_b(on_device(%c, virtual_device=meta[VirtualDevice][0], constrain_body=False));
// result's memory scope is is on "scopeA", so will require a "scopeB"->"scopeA" copy.
%3 = add(add(%0, %1), %2);
on_device(%3, virtual_device=meta[VirtualDevice][0], constrain_body=False)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @on_scope_b(%x {virtual_device=meta[VirtualDevice][2]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][2]) -> Tensor[(5, 7), float32] {
%x
}
def @main(%a {virtual_device=meta[VirtualDevice][2]}: Tensor[(5, 7), float32], %b {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32], %c {virtual_device=meta[VirtualDevice][2]}: Tensor[(5, 7), float32],
virtual_device=meta[VirtualDevice][1]) {
%0 = @on_scope_b(%a);
%1 = device_copy(%b, src_virtual_device=meta[VirtualDevice][1], dst_virtual_device=meta[VirtualDevice][2]);
%2 = @on_scope_b(%1);
%3 = @on_scope_b(%c);
%4 = add(add(%0, %2), %3);
%5 = on_device(%4, virtual_device=meta[VirtualDevice][2], constrain_result=True);
device_copy(%5, src_virtual_device=meta[VirtualDevice][2], dst_virtual_device=meta[VirtualDevice][1])
}
""",
"from_string",
None,
metatable,
)
exercise(input(), expected(), None, None)
def test_lowered():
"""
Tests propagation of memory scopes from PrimFuncs and insertion
of device_copies to mediate any scope changes.
"""
@T.prim_func
def input_gem(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128], scope="scopeA") # will flow out
B = T.match_buffer(b, [128, 128], scope="") # will flow in
C = T.match_buffer(c, [128, 128], scope="scopeB") # will flow out
D = T.match_buffer(d, [128, 128], scope="scopeA") # will flow out
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
D[vi, vj] = C[vi, vj]
D[vi, vj] = D[vi, vj] + A[vi, vk] * B[vj, vk]
@T.prim_func
def expected_gem(a: T.handle, b: T.handle, c: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128], scope="scopeA")
B = T.match_buffer(b, [128, 128], scope="scopeB") # flowed in
C = T.match_buffer(c, [128, 128], scope="scopeB")
D = T.match_buffer(d, [128, 128], scope="scopeA")
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
D[vi, vj] = C[vi, vj]
D[vi, vj] = D[vi, vj] + A[vi, vk] * B[vj, vk]
metatable = {
"VirtualDevice": [
CPU, # meta[VirtualDevice][0], no memory scope
CPU_SCOPE_A, # meta[VirtualDevice][1], "scopeA"
CPU_SCOPE_B,
]
} # meta[VirtualDevice][2], "scopeB"
gem_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
gem_gv = relay.GlobalVar("gem", type_annot=gem_ty)
def input():
mod = tvm.ir.IRModule()
mod[gem_gv] = input_gem
# - %x on CPU, no memory scope constraint, so will be constrained by first param of gem to "scopeA".
# - %y on CPU "scopeB", so will flow in to second param of gem.
# - %z on CPU "scopeA", so will clash with third param of gem and will need device_copy.
# - result on CPU "scopeB", but result of gem on "scopeA" so will need device_copy
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
%y {virtual_device=meta[VirtualDevice][2]}: Tensor[(128, 128), float32],
%z {virtual_device=meta[VirtualDevice][1]}: Tensor[(128, 128), float32],
virtual_device=meta[VirtualDevice][2]) {
call_lowered(@gem, (%x, %y, %z))
}
""",
"from_string",
mod,
metatable,
)
def expected():
mod = tvm.ir.IRModule()
mod[gem_gv] = expected_gem
# - %x now on CPU "scopeA", no device_copy needed.
# - %y still on CPU "scopeB", no device_copy needed.
# - %z still on CPU "scopeA", needs device_copy to "scopeB".
# - result still on CPU "scopeB", needs device_copy from "scopeA".
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][1]}: Tensor[(128, 128), float32],
%y {virtual_device=meta[VirtualDevice][2]}: Tensor[(128, 128), float32],
%z {virtual_device=meta[VirtualDevice][1]}: Tensor[(128, 128), float32],
virtual_device=meta[VirtualDevice][2]) {
%0 = device_copy(%z, src_virtual_device=meta[VirtualDevice][1], dst_virtual_device=meta[VirtualDevice][2]);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][2], constrain_result=True);
%2 = call_lowered(@gem, (%x, %y, %1));
%3 = on_device(%2, virtual_device=meta[VirtualDevice][1], constrain_result=True);
device_copy(%3, src_virtual_device=meta[VirtualDevice][1], dst_virtual_device=meta[VirtualDevice][2])
}
""",
"from_string",
mod,
metatable,
)
exercise(input(), expected(), None, None)
def test_stack_overflow():
metatable = {"VirtualDevice": [CPU, GPU]}
# Everything defaults to GPU
def input():
tmp = "test_stack_overflow_input.txt"
mod = """
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = add(%c, %d);
"""
end = 1555
for i in range(2, end):
s1 = "\n\t" + "%" + str(i) + " = add(%" + str(i - 1) + ", %" + str(i - 2) + ");"
mod += s1
mod += "\n\t" + "add(%" + str(end - 1) + ", %" + str(end - 2) + ")"
mod += "\n\t}"
return tvm.relay.parse(
mod,
"from_string",
None,
metatable,
)
config = tvm.target.make_compilation_config(CTXT, TARGETS)
actual_mod = relay.transform.InferType()(input())
actual_mod = relay.transform.PlanDevices(config)(actual_mod)
relay.transform.InferType()(actual_mod)
def test_primitive():
"""Annotations on Primitive functions should be accepted, even though the body
of the Primitive function is not considered during PlanDevices."""
global_virtual_device = tvm.target.VirtualDevice(memory_scope="global")
texture_virtual_device = tvm.target.VirtualDevice(memory_scope="global.texture")
metatable = {
"VirtualDevice": [
global_virtual_device,
texture_virtual_device,
]
}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data1: Tensor[(1, 32, 40, 40), float32],
%data2: Tensor[(1, 32, 40, 40), float32]) {
%0 = fn (%a, Primitive=1) {
layout_transform(%a, src_layout="NCHW", dst_layout="NCHW4c")
};
%1 = %0(%data1);
%3 = %0(%data2);
%5 = fn (%a {virtual_device=meta[VirtualDevice][0]}, // global
%b {virtual_device=meta[VirtualDevice][0]}, // global
virtual_device=meta[VirtualDevice][1], // texture
Primitive=1) {
add(%a, %b)
};
%6 = %5(%1, %3);
%10 = fn (%a,
virtual_device=meta[VirtualDevice][0], // global
Primitive=1) {
layout_transform(%a, src_layout="NCHW4c", dst_layout="NCHW")
};
%10(%6)
}
""",
"from_string",
None,
metatable,
)
print(mod)
config = tvm.target.make_compilation_config(CTXT, GPU_TARGET)
mod = relay.transform.InferType()(mod)
# PlanDevices should succeed.
mod = relay.transform.PlanDevices(config)(mod)
print(mod)
def test_conflicated_inputs():
metatable = {"VirtualDevice": [CPU, GPU]}
def input():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0]);
%2 = add(%b, %c);
%3 = on_device(%2, virtual_device=meta[VirtualDevice][1]);
subtract(%1, %3)
}
""",
"from_string",
None,
metatable,
)
def expected():
return tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
%b {virtual_device=meta[VirtualDevice][0]}: Tensor[(5, 7), float32],
%c {virtual_device=meta[VirtualDevice][1]}: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%2 = device_copy(%b, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%3 = device_copy(%1, src_virtual_device=meta[VirtualDevice][0], dst_virtual_device=meta[VirtualDevice][1]);
%4 = add(%2, %c);
subtract(%3, %4)
}
""",
"from_string",
None,
metatable,
)
def ref(a, b, c):
return np.subtract(np.add(a, b), np.add(b, c))
exercise(input(), expected(), ref, rands((5, 7), 3))
if __name__ == "__main__":
tvm.testing.main()
| 69,188 | 35.763549 | 228 | py |
tvm | tvm-main/tests/python/relay/test_tensor_array.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import relay
from tvm.relay import testing
from tvm.relay.backend.interpreter import ConstructorValue
from tvm.relay import create_executor
from tvm.relay.prelude import Prelude, StaticTensorArrayOps
from tvm.relay.testing import count as count_, make_nat_value, make_nat_expr
import numpy as np
def vmobj_to_list(mod, o, dtype="float32"):
_, tensor_nil, _, _, _, _, _, _, _ = mod.get_type(f"tensor_{dtype}_t")
if isinstance(o, tvm.nd.NDArray):
return [o.numpy().tolist()]
elif isinstance(o, tvm.runtime.container.ADT):
if len(o) == 0:
if tensor_nil.tag == o.tag:
return [0]
return []
result = []
for f in o:
result.extend(vmobj_to_list(mod, f, dtype))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(mod, o.fields[1], dtype)
hd = vmobj_to_list(mod, o.fields[0], dtype)
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].numpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def check_tensor_array(ta_mod, ref_res, *args, dtype="float32", rtol=1e-5):
for kind in ["debug", "vm"]:
for target, dev in [("llvm", tvm.cpu(0))]: # testing.enabled_targets():
if kind == "debug" and dev.device_type != tvm.cpu().device_type:
continue
result = relay.create_executor(kind, mod=ta_mod, device=dev, target=target).evaluate()(
*args
)
got = vmobj_to_list(ta_mod, result, dtype)
tvm.testing.assert_allclose(ref_res, got, rtol=rtol, atol=rtol)
@tvm.testing.uses_gpu
def test_tensor_expand_dims():
def run(dtype):
x = relay.var("x")
mod = tvm.IRModule()
p = Prelude(mod)
expand_dims_func = p.get_global_var("tensor_expand_dims", dtype)
tensor1 = p.get_tensor_ctor("tensor1", dtype)
mod["main"] = relay.Function([x], expand_dims_func(tensor1(x)))
x_np = np.random.uniform(low=0.0, high=8.0, size=(1,)).astype(dtype)
expected = [np.expand_dims(x_np, axis=0)]
check_tensor_array(mod, expected, x_np)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_constructor():
def run(dtype):
x = relay.var("x")
mod = tvm.IRModule()
p = Prelude(mod)
tensor_array = p.get_global_var("tensor_array", dtype)
mod["main"] = relay.Function([x], tensor_array(x))
expected = np.array([0, 0, 0, 0, 0])
check_tensor_array(mod, expected, 5, dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_read():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
l = relay.var("l")
i = relay.var("i")
read_func = p.get_global_var("tensor_array_read", dtype)
tensor_array = p.get_global_var("tensor_array", dtype)
mod["main"] = relay.Function([l, i], read_func(tensor_array(l), i))
expected = [0]
check_tensor_array(mod, expected, *(1, 0), dtype=dtype)
check_tensor_array(mod, expected, *(5, 1), dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_write():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
tensor_t = p.get_type("tensor_t", dtype)
v1 = relay.var("v1")
v2 = relay.var("v2")
tensor_array = p.get_global_var("tensor_array", dtype)
init_tensor_array = tensor_array(relay.const(2))
write_func = p.get_global_var("tensor_array_write", dtype)
tensor1 = p.get_tensor_ctor("tensor1", dtype)
tensor_array1 = write_func(init_tensor_array, relay.const(0), tensor1(v1))
tensor_array2 = write_func(tensor_array1, relay.const(1), tensor1(v2))
mod["main"] = relay.Function([v1, v2], tensor_array2)
expected = [3, 7]
check_tensor_array(mod, expected, *(3, 7), dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_stack():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
tensor_t = p.get_type("tensor_t", dtype)
rlist = p.mod.get_global_type_var(f"List")
tensor_array = p.get_global_var("tensor_array", dtype)
tensor1 = p.get_tensor_ctor("tensor1", dtype)
write = p.get_global_var("tensor_array_write", dtype)
stack = p.get_global_var("tensor_array_stack", dtype)
# TODO extract test case from inference failures
# setting this wrong causes crashes
v = relay.var("v", shape=(1,), dtype=dtype)
init_tensor_array = tensor_array(relay.const(3))
tensor_array1 = write(init_tensor_array, relay.const(0), tensor1(v))
tensor_array2 = write(tensor_array1, relay.const(1), tensor1(v))
tensor_array3 = write(tensor_array2, relay.const(2), tensor1(v))
tensor_array4 = stack(tensor_array3)
mod["main"] = relay.Function([v], tensor_array4, tensor_t())
t = np.random.uniform(low=0.0, high=8.0, size=(1,)).astype(dtype)
expected = [np.stack([t, t, t])]
check_tensor_array(mod, expected, t, dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_unstack():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
unstack_tensor1 = p.get_global_var("tensor_array_unstack_tensor1", dtype)
v = relay.var("v")
mod["main"] = relay.Function([v], unstack_tensor1(v))
t = np.random.uniform(low=0.0, high=8.0, size=(1,)).astype(dtype)
check_tensor_array(mod, t, t, dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_take():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
take = p.get_global_var("tensor_take", dtype)
tensor2 = p.get_tensor_ctor("tensor2", dtype)
v = relay.var("v")
lower = relay.var("lower")
upper = relay.var("upper")
mod["main"] = relay.Function([v, lower, upper], take(tensor2(v), lower, upper))
v_data = np.random.uniform(low=0.0, high=8.0, size=(10, 10)).astype(dtype)
expected = [np.take(v_data, range(2, 5), axis=0)]
check_tensor_array(mod, expected, *(v_data, 2, 5), dtype=dtype)
expected = [np.take(v_data, range(0, 9), axis=0)]
check_tensor_array(mod, expected, *(v_data, 0, 9), dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_concatenate():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
concat = p.get_global_var("tensor_concatenate", dtype)
tensor1 = p.get_tensor_ctor("tensor1", dtype)
v1 = relay.var("v1", shape=(tvm.tir.Any(),), dtype=dtype)
v2 = relay.var("v2", shape=(tvm.tir.Any(),), dtype=dtype)
mod["main"] = relay.Function([v1, v2], concat(tensor1(v1), tensor1(v2)))
v1_data = np.random.uniform(low=0.0, high=8.0, size=(5,)).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=(5,)).astype(dtype)
expected = [np.concatenate((v1_data, v2_data))]
check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_concat():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
v1 = relay.var("v1")
v2 = relay.var("v2")
tensor_array = p.get_global_var("tensor_array", dtype)
tensor_array1 = tensor_array(relay.const(2))
write_func = p.get_global_var("tensor_array_write", dtype)
concat_func = p.get_global_var("tensor_array_concat", dtype)
tensor1 = p.get_tensor_ctor("tensor2", dtype)
tensor_array1 = write_func(tensor_array1, relay.const(0), tensor1(v1))
tensor_array1 = write_func(tensor_array1, relay.const(1), tensor1(v2))
tensor_array_concat = concat_func(tensor_array1)
mod["main"] = relay.Function([v1, v2], tensor_array_concat)
v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=(1, 3)).astype(dtype)
expected = [np.concatenate((v1_data, v2_data), axis=0)]
check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_scatter():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
# tensor array
v1 = relay.var("v1")
v2 = relay.var("v2")
v3 = relay.var("v2")
tensor_array = p.get_global_var("tensor_array", dtype)
tensor_array1 = tensor_array(relay.const(3))
write_func = p.get_global_var("tensor_array_write", dtype)
scatter_func = p.get_global_var("tensor_array_scatter", dtype)
tensor2 = p.get_tensor_ctor("tensor2", dtype)
tensor_array1 = write_func(tensor_array1, relay.const(0), tensor2(v1))
tensor_array1 = write_func(tensor_array1, relay.const(1), tensor2(v2))
tensor_array1 = write_func(tensor_array1, relay.const(2), tensor2(v3))
# indices array
index = relay.var("index")
# values array
value_0 = relay.var("value_0")
value_1 = relay.var("value_1")
values_array = tensor_array(relay.const(2))
values_array = write_func(values_array, relay.const(0), tensor2(value_0))
values_array = write_func(values_array, relay.const(1), tensor2(value_1))
# create the scatter function
tensor_array_scatter = scatter_func(tensor_array1, index, values_array)
mod["main"] = relay.Function([v1, v2, v3, index, value_0, value_1], tensor_array_scatter)
# initialize and check
v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
v3_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
index_data = np.array([0, 1], dtype="int32")
val1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
val2_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
expected = [val1_data, val2_data, v3_data]
check_tensor_array(
mod,
expected,
*(v1_data, v2_data, v3_data, index_data, val1_data, val2_data),
dtype=dtype,
)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_tensor_array_split():
def run(dtype):
mod = tvm.IRModule()
p = Prelude(mod)
# tensor array
v1 = relay.var("v1")
v2 = relay.var("v2")
v3 = relay.var("v2")
tensor_array = p.get_global_var("tensor_array", dtype)
tensor_array1 = tensor_array(relay.const(3))
write_func = p.get_global_var("tensor_array_write", dtype)
split_func = p.get_global_var("tensor_array_split", dtype)
tensor2 = p.get_tensor_ctor("tensor2", dtype)
tensor_array1 = write_func(tensor_array1, relay.const(0), tensor2(v1))
tensor_array1 = write_func(tensor_array1, relay.const(1), tensor2(v2))
tensor_array1 = write_func(tensor_array1, relay.const(2), tensor2(v3))
# value tensor
value = relay.var("value")
# lengths tensor
ta_len = relay.var("length")
# create the scatter function
tensor_array_split = split_func(tensor_array1, tensor2(value), ta_len)
mod["main"] = relay.Function([v1, v2, v3, value, ta_len], tensor_array_split)
# initialize and check
v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
v3_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
value_data = np.random.uniform(low=0.0, high=8.0, size=(4, 3)).astype(dtype)
length_data = np.array([2, 2], dtype="int32")
expected = np.concatenate([value_data, v3_data])
expected = np.split(expected, indices_or_sections=[2, 4])
check_tensor_array(
mod, expected, *(v1_data, v2_data, v3_data, value_data, length_data), dtype=dtype
)
run("float32")
run("int32")
@tvm.testing.uses_gpu
def test_static_tensor_take():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
take = p.get_global_var_static("tensor_take", dtype, shape)
tensor_constructor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
v = relay.var("v")
lower = relay.var("lower")
upper = relay.var("upper")
mod["main"] = relay.Function([v, lower, upper], take(tensor_constructor(v), lower, upper))
v_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
expected = [np.take(v_data, range(2, 5), axis=0)]
check_tensor_array(mod, expected, *(v_data, 2, 5), dtype=dtype)
expected = [np.take(v_data, range(0, 9), axis=0)]
check_tensor_array(mod, expected, *(v_data, 0, 9), dtype=dtype)
run("float32", [10, 10])
run("int32", [15, 11])
@tvm.testing.uses_gpu
def test_static_tensor_concatenate():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
concat = p.get_global_var_static("tensor_concatenate", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
v1 = relay.var("v1")
v2 = relay.var("v2")
mod["main"] = relay.Function([v1, v2], concat(tensor(v1), tensor(v2)))
v1_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
expected = [np.concatenate((v1_data, v2_data))]
check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)
run(
"float32",
[
5,
],
)
run("int32", [2, 3])
@tvm.testing.uses_gpu
def test_static_tensor_expand_dims():
def run(dtype, shape):
x = relay.var("x")
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
expand_dims_func = p.get_global_var_static("tensor_expand_dims", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
mod["main"] = relay.Function([x], expand_dims_func(tensor(x)))
x_np = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
expected = [np.expand_dims(x_np, axis=0)]
check_tensor_array(mod, expected, x_np)
run("float32", [])
run(
"int32",
[
2,
],
)
@tvm.testing.uses_gpu
def test_static_tensor_array_constructor():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
tensor_constructor = p.get_name_static("tensor_constructor", dtype, shape)
assert tensor_constructor != None
run("float32", [1, 1])
@tvm.testing.uses_gpu
def test_static_tensor_array_read():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
np_data_list = []
ta_length = 3
for _ in range(ta_length):
np_data_list.append(np.random.uniform(0, 10, size=shape).astype(dtype))
v0 = relay.var("v0")
v1 = relay.var("v1")
v2 = relay.var("v2")
n = relay.var("n")
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
init_tensor_array = tensor_array(relay.const(ta_length))
read_func = p.get_global_var_static("tensor_array_read", dtype, shape)
write_func = p.get_global_var_static("tensor_array_write", dtype, shape)
tensor_array0 = write_func(init_tensor_array, relay.const(0), tensor(v0))
tensor_array1 = write_func(tensor_array0, relay.const(1), tensor(v1))
tensor_array2 = write_func(tensor_array1, relay.const(2), tensor(v2))
mod["main"] = relay.Function([v0, v1, v2, n], read_func(tensor_array2, n))
expected = [np_data_list[0]]
check_tensor_array(mod, expected, *list(np_data_list + [0]), dtype=dtype)
expected = [np_data_list[1]]
check_tensor_array(mod, expected, *list(np_data_list + [1]), dtype=dtype)
expected = [np_data_list[2]]
check_tensor_array(mod, expected, *list(np_data_list + [2]), dtype=dtype)
run("float32", [])
run("int32", [2, 3])
@tvm.testing.uses_gpu
def test_static_tensor_array_write():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
ta_length = 2
np_data_list = [
np.random.uniform(0, 10, size=shape).astype(dtype) for _ in range(ta_length)
]
v0 = relay.var("v0")
v1 = relay.var("v1")
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
init_tensor_array = tensor_array(relay.const(ta_length))
write_func = p.get_global_var_static("tensor_array_write", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
tensor_array0 = write_func(init_tensor_array, relay.const(0), tensor(v0))
tensor_array1 = write_func(tensor_array0, relay.const(1), tensor(v1))
mod["main"] = relay.Function([v0, v1], tensor_array1)
expected = np_data_list
check_tensor_array(mod, expected, *np_data_list, dtype=dtype)
run("float32", [])
run("int32", [2, 3])
@tvm.testing.uses_gpu
def test_static_tensor_array_unstack():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
unstack_tensor = p.get_global_var_static("tensor_array_unstack", dtype, shape)
v = relay.var("v")
mod["main"] = relay.Function([v], unstack_tensor(v))
t = np.random.uniform(low=0, high=10, size=shape).astype(dtype)
(*expected,) = t
check_tensor_array(mod, expected, t, dtype=dtype)
run("float32", [4])
run("int32", [2, 3])
@tvm.testing.uses_gpu
def test_static_tensor_array_scatter():
def run(dtype, shape, indices_shape=None):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
if indices_shape is not None:
static_tensor_array_ops.define_tensor_array_scatter(indices_shape, True)
# tensor array
v1 = relay.var("v1")
v2 = relay.var("v2")
v3 = relay.var("v2")
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
tensor_array0 = tensor_array(relay.const(3))
write_func = p.get_global_var_static("tensor_array_write", dtype, shape)
scatter_func = p.get_global_var_static("tensor_array_scatter", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
tensor_array1 = write_func(tensor_array0, relay.const(0), tensor(v1))
tensor_array1 = write_func(tensor_array1, relay.const(1), tensor(v2))
tensor_array1 = write_func(tensor_array1, relay.const(2), tensor(v3))
# indices array
index = relay.var("index")
# values array
value_0 = relay.var("value_0")
value_1 = relay.var("value_1")
values_array = tensor_array(relay.const(2))
values_array = write_func(values_array, relay.const(0), tensor(value_0))
values_array = write_func(values_array, relay.const(1), tensor(value_1))
# create the scatter function
tensor_array_scatter = scatter_func(tensor_array1, index, values_array)
mod["main"] = relay.Function([v1, v2, v3, index, value_0, value_1], tensor_array_scatter)
# initialize and check
v1_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
v3_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
index_data = np.array([0, 1], dtype="int32")
val1_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
val2_data = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
expected = [val1_data, val2_data, v3_data]
check_tensor_array(
mod,
expected,
*(v1_data, v2_data, v3_data, index_data, val1_data, val2_data),
dtype=dtype,
)
run("float32", [2, 3])
run("int32", [2, 3])
run(
"float32",
[2, 3],
[
2,
],
)
@tvm.testing.uses_gpu
def test_static_tensor_array_split():
def run(dtype, shape, value_shape=None, lengths_shape=None):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
if value_shape is not None or lengths_shape is not None:
static_tensor_array_ops.define_tensor_array_split(value_shape, lengths_shape, False)
# tensor array
v1 = relay.var("v1")
v2 = relay.var("v2")
v3 = relay.var("v2")
adt_shape = [
relay.Any(),
] + shape[1:]
test_ops = StaticTensorArrayOps(p, dtype, adt_shape)
test_ops.register()
tensor_array = test_ops.get_global_var("tensor_array")
tensor_array1 = tensor_array(relay.const(3))
write_func = test_ops.get_global_var("tensor_array_write")
split_ops = StaticTensorArrayOps(p, dtype, shape)
split_ops.register()
split_func = split_ops.get_global_var("tensor_array_split")
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, test_ops.shape)
tensor_array1 = write_func(tensor_array1, relay.const(0), tensor(v1))
tensor_array1 = write_func(tensor_array1, relay.const(1), tensor(v2))
tensor_array1 = write_func(tensor_array1, relay.const(2), tensor(v3))
# value tensor
value = relay.var("value")
# lengths tensor
ta_len = relay.var("length")
# create the split function
if value_shape is None:
tensor1 = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
else:
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, value_shape)
static_tensor_array_ops.register()
tensor1 = p.get_tensor_ctor_static("tensor_constructor", dtype, test_ops.shape)
tensor_array_split = split_func(tensor_array1, tensor1(value), ta_len)
mod["main"] = relay.Function([v1, v2, v3, value, ta_len], tensor_array_split)
# initialize and check
v1_data = np.random.uniform(low=0.0, high=8.0, size=[2, 3]).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=[2, 3]).astype(dtype)
v3_data = np.random.uniform(low=0.0, high=8.0, size=[2, 3]).astype(dtype)
value_data = np.random.uniform(low=0.0, high=8.0, size=value_shape or shape).astype(dtype)
length_data = np.array([2, 2], dtype="int32")
expected = np.concatenate([value_data, v3_data])
expected = np.split(expected, indices_or_sections=[2, 4])
check_tensor_array(
mod, expected, *(v1_data, v2_data, v3_data, value_data, length_data), dtype=dtype
)
run("float32", [4, 3])
run("int32", [4, 3])
run(
"int32",
[relay.Any(), 3],
[4, 3],
[
2,
],
)
@tvm.testing.uses_gpu
def test_static_tensor_array_concat():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
v1 = relay.var("v1")
v2 = relay.var("v2")
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
tensor_array1 = tensor_array(relay.const(2))
write_func = p.get_global_var_static("tensor_array_write", dtype, shape)
concat_func = p.get_global_var_static("tensor_array_concat", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
tensor_array1 = write_func(tensor_array1, relay.const(0), tensor(v1))
tensor_array1 = write_func(tensor_array1, relay.const(1), tensor(v2))
tensor_array_concat = concat_func(tensor_array1)
mod["main"] = relay.Function([v1, v2], tensor_array_concat)
v1_data = np.random.uniform(low=0.0, high=8.0, size=(2, 3)).astype(dtype)
v2_data = np.random.uniform(low=0.0, high=8.0, size=(1, 3)).astype(dtype)
expected = [np.concatenate((v1_data, v2_data), axis=0)]
check_tensor_array(mod, expected, *(v1_data, v2_data), dtype=dtype)
run("float32", [relay.Any(), 3])
run("int32", [relay.Any(), 3])
@tvm.testing.uses_gpu
def test_static_tensor_array_gather():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
write = p.get_global_var_static("tensor_array_write", dtype, shape)
gather = p.get_global_var_static("tensor_array_gather", dtype, shape)
v = relay.var("v")
indice = relay.var("indice")
init_tensor_array = tensor_array(relay.const(3))
tensor_array1 = write(init_tensor_array, relay.const(0), tensor(v))
tensor_array2 = write(tensor_array1, relay.const(1), tensor(v))
tensor_array3 = write(tensor_array2, relay.const(2), tensor(v))
out = gather(tensor_array3, indice)
mod["main"] = relay.Function([v, indice], out)
t = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
indice_data = np.array([0, 2], dtype="int32")
expected = [np.stack([t, t])]
check_tensor_array(mod, expected, *(t, indice_data), dtype=dtype)
run("float32", [])
run("int32", [2, 3])
@tvm.testing.uses_gpu
def test_static_tensor_array_stack():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
write = p.get_global_var_static("tensor_array_write", dtype, shape)
stack = p.get_global_var_static("tensor_array_stack", dtype, shape)
v = relay.var("v")
init_tensor_array = tensor_array(relay.const(3))
tensor_array1 = write(init_tensor_array, relay.const(0), tensor(v))
tensor_array2 = write(tensor_array1, relay.const(1), tensor(v))
tensor_array3 = write(tensor_array2, relay.const(2), tensor(v))
tensor_array4 = stack(tensor_array3)
mod["main"] = relay.Function([v], tensor_array4)
t = np.random.uniform(low=0.0, high=8.0, size=shape).astype(dtype)
expected = [np.stack([t, t, t])]
check_tensor_array(mod, expected, t, dtype=dtype)
run("float32", [])
run("int32", [2, 3])
@tvm.testing.uses_gpu
def test_static_tensor_get_data():
def run(dtype, shape):
mod = tvm.IRModule()
p = Prelude(mod)
static_tensor_array_ops = StaticTensorArrayOps(p, dtype, shape)
static_tensor_array_ops.register()
np_data_list = []
ta_length = 3
for _ in range(ta_length):
np_data_list.append(np.random.uniform(0, 10, size=shape).astype(dtype))
v0 = relay.var("v0")
v1 = relay.var("v1")
v2 = relay.var("v2")
n = relay.var("n")
tensor = p.get_tensor_ctor_static("tensor_constructor", dtype, shape)
tensor_array = p.get_global_var_static("tensor_array", dtype, shape)
init_tensor_array = tensor_array(relay.const(ta_length))
read_func = p.get_global_var_static("tensor_array_read", dtype, shape)
write_func = p.get_global_var_static("tensor_array_write", dtype, shape)
get_data_func = p.get_global_var_static("tensor_get_data", dtype, shape)
tensor_array0 = write_func(init_tensor_array, relay.const(0), tensor(v0))
tensor_array1 = write_func(tensor_array0, relay.const(1), tensor(v1))
tensor_array2 = write_func(tensor_array1, relay.const(2), tensor(v2))
mod["main"] = relay.Function([v0, v1, v2, n], get_data_func(read_func(tensor_array2, n)))
expected = [np_data_list[0]]
check_tensor_array(mod, expected, *list(np_data_list + [0]), dtype=dtype)
expected = [np_data_list[1]]
check_tensor_array(mod, expected, *list(np_data_list + [1]), dtype=dtype)
expected = [np_data_list[2]]
check_tensor_array(mod, expected, *list(np_data_list + [2]), dtype=dtype)
run("float32", [])
run("int32", [2, 3])
if __name__ == "__main__":
tvm.testing.main()
| 31,293 | 38.814249 | 99 | py |
tvm | tvm-main/tests/python/relay/test_op_qnn_conv2_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import relay, te
from tvm.contrib import graph_executor
from tvm.relay import transform
from tvm.relay.testing import run_infer_type
from tvm.relay.testing.temp_op_attr import TempOpAttr
def get_ref_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups,
channels=None,
):
casted_data = relay.op.cast(data, "int32")
casted_kernel = relay.op.cast(kernel, "int32")
shifted_data = relay.op.subtract(casted_data, relay.const(input_zero_point, "int32"))
shifted_kernel = relay.op.subtract(casted_kernel, relay.const(kernel_zero_point, "int32"))
func = relay.op.nn.conv2d_transpose(
shifted_data,
shifted_kernel,
padding=padding,
strides=strides,
dilation=dilation,
groups=groups,
channels=channels,
kernel_size=kernel_size,
out_dtype=out_dtype,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
func = relay.Function(relay.analysis.free_vars(func), func)
return func
def get_qnn_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
channels,
groups,
):
func = relay.qnn.op.conv2d_transpose(
data,
kernel,
input_zero_point=relay.const(input_zero_point, "int32"),
kernel_zero_point=relay.const(kernel_zero_point, "int32"),
input_scale=relay.const(input_scale, "float32"),
kernel_scale=relay.const(kernel_scale, "float32"),
kernel_size=kernel_size,
strides=strides,
dilation=dilation,
padding=padding,
out_dtype=out_dtype,
groups=groups,
channels=channels,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
mod = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(mod)
return mod
def get_funcs(
data_shape,
data_dtype,
kernel_shape,
kernel_dtype,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups=1,
channels=None,
):
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
ref_func = get_ref_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
groups,
channels,
)
ref_func = run_infer_type(ref_func)
ref_func = tvm.IRModule.from_expr(ref_func)
qnn_func = get_qnn_func(
data,
kernel,
input_zero_point,
kernel_zero_point,
input_scale,
kernel_scale,
kernel_size,
padding,
strides,
dilation,
data_layout,
kernel_layout,
out_dtype,
channels,
groups,
)
return (ref_func, qnn_func)
def verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype):
def get_inputs(data_shape, data_dtype, kernel_shape, kernel_dtype):
# Keeping inputs multiple of 4 because of a bug in Average Pool2d
# https://discuss.tvm.apache.org/t/pool2d-gives-bad-output-for-integer-inputs/3377
low = -128
high = 127
if data_dtype == "uint8":
low = 0
high = 255
golden_data = np.random.randint(low=low, high=high, size=data_shape).astype(data_dtype)
low = -128
high = 127
if kernel_dtype == "uint8":
low = 0
high = 255
golden_weight = np.random.randint(low=low, high=high, size=kernel_shape).astype(
kernel_dtype
)
return (golden_data, golden_weight)
def get_output(func, golden_inputs):
with tvm.transform.PassContext(opt_level=2):
golden_data, golden_weight = golden_inputs
params = {"kernel": golden_weight}
libs = relay.build(func, "llvm", params=params)
mod = graph_executor.create(libs.graph_json, libs.lib, device=tvm.cpu(0))
mod.set_input("data", golden_data)
mod.set_input(**libs.params)
mod.run()
res = mod.get_output(0).numpy()
return res
golden_inputs = get_inputs(data_shape, data_dtype, kernel_shape, kernel_dtype)
golden_output = get_output(ref_func, golden_inputs)
qnn_output = get_output(qnn_func, golden_inputs)
np.testing.assert_equal(qnn_output, golden_output)
def test_no_zero_point():
# uint8 input
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (1, 3, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input
data_shape = (2, 1, 2, 4)
data_dtype = "int8"
kernel_shape = (1, 3, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_kernel_zero_point():
# uint8 input
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=1,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input
data_shape = (2, 1, 2, 4)
data_dtype = "int8"
kernel_shape = (1, 3, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=0,
kernel_zero_point=5,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_input_zero_point():
# uint8 input
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=0,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_both_zero_point():
# uint8 input
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_different_dtype():
# uint8 input and int8 weight
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "int8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
channels=kernel_shape[1],
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# int8 input and uint8 weight
data_shape = (2, 4, 2, 4)
data_dtype = "int8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
channels=kernel_shape[1],
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_layout():
# uint8 input
data_shape = (2, 2, 4, 4) # NHWC
data_dtype = "uint8"
kernel_shape = (2, 2, 3, 4) # HWOI
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
data_shape = (2, 2, 4, 3) # NHWC
data_dtype = "uint8"
kernel_shape = (2, 2, 1, 3) # HWOI
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=5,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_padding():
# uint8 input
data_shape = (1, 4, 2, 2)
data_dtype = "uint8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=5,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# Try different layout
data_shape = (2, 2, 4, 4) # NHWC
data_dtype = "uint8"
kernel_shape = (2, 2, 3, 4) # HWOI
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
# Try asymmetric padding
data_shape = (2, 8, 6, 4) # NHWC
data_dtype = "uint8"
kernel_shape = (2, 2, 3, 4) # HWOI
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(1, 1, 2, 2),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_const_folding():
data_shape = (2, 4, 2, 4)
data_dtype = "uint8"
kernel_shape = (4, 3, 2, 2)
kernel_dtype = "uint8"
golden_weight = np.random.randint(low=0, high=255, size=kernel_shape).astype(kernel_dtype)
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.const(golden_weight)
qnn_func = get_qnn_func(
data,
kernel,
input_zero_point=8,
kernel_zero_point=3,
kernel_size=(2, 2),
input_scale=1.0,
kernel_scale=1.0,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
channels=kernel_shape[1],
groups=1,
)
folded_mod = transform.FoldConstant()(qnn_func)
folded_func = folded_mod["main"]
assert "reshape" not in folded_func.astext()
def test_broadcast_layout():
# Test broadcast support for NHWC layout.
data_shape = (1, 229, 229, 3) # NHWC
data_dtype = "uint8"
kernel_shape = (7, 7, 64, 3) # HWOI
kernel_dtype = "int8"
_, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=8,
kernel_zero_point=3,
input_scale=1.0,
kernel_scale=1.0,
kernel_size=(7, 7),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
func = qnn_func["main"].body
bias = relay.var("bias", shape=(64,), dtype="int32")
bias2 = relay.var("bias2", shape=(1, 233, 233, 64), dtype="int32")
# Check broadcast support on both lhs and rhs
func = relay.add(func, bias2)
func = relay.add(bias2, func)
func = relay.add(bias, func)
func = relay.add(func, bias)
func = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(func)
with tvm.transform.PassContext(opt_level=3):
libs = relay.build(mod, "llvm -mcpu=skylake-avx512")
def test_non_scalar_input_scale_zp():
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (1, 3, 2, 2)
kernel_dtype = "uint8"
ref_func, qnn_func = get_funcs(
data_shape=data_shape,
data_dtype=data_dtype,
kernel_shape=kernel_shape,
kernel_dtype=kernel_dtype,
input_zero_point=[0],
kernel_zero_point=0,
input_scale=[1.0],
kernel_scale=1.0,
kernel_size=(2, 2),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
verify(ref_func, qnn_func, data_shape, data_dtype, kernel_shape, kernel_dtype)
def test_per_channel_kernel_scale():
data_shape = (2, 1, 2, 4)
data_dtype = "uint8"
kernel_shape = (1, 3, 2, 2)
kernel_dtype = "uint8"
data = relay.var("data", shape=data_shape, dtype=data_dtype)
kernel = relay.var("kernel", shape=kernel_shape, dtype=kernel_dtype)
kernel_scales = [2, 2, 2]
kernel_scales = relay.const(np.array(kernel_scales).astype("float32"))
func = relay.qnn.op.conv2d_transpose(
data,
kernel,
input_zero_point=relay.const(0, "int32"),
kernel_zero_point=relay.const(0, "int32"),
input_scale=relay.const(2.0, "float32"),
kernel_scale=kernel_scales,
kernel_size=(2, 2),
channels=kernel_shape[0],
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_layout="NCHW",
kernel_layout="IOHW",
out_dtype="int32",
)
mod = relay.Function(relay.analysis.free_vars(func), func)
mod = tvm.IRModule.from_expr(mod)
if __name__ == "__main__":
test_no_zero_point()
test_input_zero_point()
test_kernel_zero_point()
test_both_zero_point()
test_different_dtype()
test_layout()
test_padding()
test_const_folding()
test_broadcast_layout()
test_per_channel_kernel_scale()
| 20,481 | 27.606145 | 95 | py |
tvm | tvm-main/tests/python/relay/test_vm_serialization.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, missing-docstring, no-else-return
"""Unit tests for the Relay VM serialization and deserialization."""
import pytest
import numpy as np
import tvm
from tvm.runtime import vm as _vm
from tvm.relay import vm as rly_vm
from tvm import relay
from tvm.relay.scope_builder import ScopeBuilder
from tvm.relay import transform
from tvm.relay.prelude import Prelude
from tvm.contrib import utils
from tvm.relay import testing
def create_exec(f, target="llvm", params=None):
if isinstance(f, relay.Expr):
mod = tvm.IRModule()
mod["main"] = f
executable = rly_vm.compile(mod, target=target, params=params)
return executable
else:
assert isinstance(f, tvm.IRModule), "expected mod as tvm.IRModule"
executable = rly_vm.compile(f, target=target, params=params)
return executable
def get_serialized_output(mod, *data, params=None, target="llvm", device=tvm.cpu()):
exe = create_exec(mod, target, params=params)
code, lib = exe.save()
des_exec = _vm.Executable.load_exec(code, lib)
des_vm = _vm.VirtualMachine(des_exec, device)
result = des_vm.run(*data)
return result
def run_network(mod, params, dtype="float32"):
def get_vm_output(mod, data, params, target, device, dtype="float32"):
result = relay.create_executor("vm", mod=mod, device=device).evaluate()(data, **params)
return result.numpy().astype(dtype)
data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype(dtype)
target = "llvm"
dev = tvm.cpu(0)
tvm_out = get_vm_output(mod, tvm.nd.array(data.astype(dtype)), params, target, dev, dtype)
vm_out = get_serialized_output(
mod, tvm.nd.array(data.astype(dtype)), params=params, target=target, device=dev
)
tvm.testing.assert_allclose(vm_out.numpy().astype(dtype), tvm_out, rtol=1e-5, atol=1e-5)
def test_serializer():
mod = tvm.IRModule({})
a = relay.const(1.0, "float32")
x = relay.var("x", shape=(10, 10), dtype="float32")
f1 = relay.Function([x], x + a)
glb_f1 = relay.GlobalVar("f1")
mod[glb_f1] = f1
# TODO(@jroesch): look into optimizing away the need to do this
mod = transform.InferType()(mod)
b = relay.const(2.0, "float32")
y = relay.var("y", shape=(10, 10), dtype="float32")
f2 = relay.Function([y], y - b)
glb_f2 = relay.GlobalVar("f2")
mod[glb_f2] = f2
# TODO(@jroesch): look into optimizing away the need to do this
mod = transform.InferType()(mod)
x1 = relay.var("x1", shape=(10, 10), dtype="float32")
y1 = relay.var("y1", shape=(10, 10), dtype="float32")
main = relay.Function([x1, y1], glb_f1(x1) * glb_f2(y1))
mod["main"] = main
exe = create_exec(mod)
glbs = exe.globals
assert len(glbs) == 3
assert "f1" in glbs
assert "f2" in glbs
assert "main" in glbs
prim_ops = exe.primitive_ops
assert any(item.startswith("vm_mod_fused_add") for item in prim_ops)
assert any(item.startswith("vm_mod_fused_subtract") for item in prim_ops)
assert any(item.startswith("vm_mod_fused_multiply") for item in prim_ops)
code = exe.bytecode
assert "main(x1, y1)" in code
assert "f1(x)" in code
assert "f2(y)" in code
code, lib = exe.save()
assert isinstance(code, bytearray)
assert isinstance(lib, tvm.runtime.Module)
def test_save_load():
x = relay.var("x", shape=(10, 10))
f = relay.Function([x], x + x)
x_data = np.random.rand(10, 10).astype("float32")
# serialize.
vm = create_exec(f)
code, lib = vm.save()
assert isinstance(code, bytearray)
# save and load the code and lib file.
tmp = utils.tempdir()
path_lib = tmp.relpath("lib.so")
lib.export_library(path_lib)
with open(tmp.relpath("code.ro"), "wb") as fo:
fo.write(code)
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_code = bytearray(open(tmp.relpath("code.ro"), "rb").read())
# deserialize.
des_exec = _vm.Executable.load_exec(loaded_code, loaded_lib)
des_vm = _vm.VirtualMachine(des_exec, tvm.cpu())
res = des_vm.run(x_data)
tvm.testing.assert_allclose(res.numpy(), x_data + x_data)
def test_const():
c = relay.const(1.0, "float32")
x = relay.var("x", shape=(10, 10), dtype="float32")
f = relay.Function([x], x + c)
x_data = np.random.rand(10, 10).astype("float32")
res = get_serialized_output(f, x_data)
tvm.testing.assert_allclose(res.numpy(), x_data + 1)
def test_if():
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(10, 10))
equal = relay.op.equal(x, y)
equal = relay.op.nn.batch_flatten(equal)
f = relay.Function([x, y], relay.If(relay.op.min(equal, axis=[0, 1]), x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(10, 10).astype("float32")
# same
res = get_serialized_output(f, x_data, x_data)
tvm.testing.assert_allclose(res.numpy(), x_data)
# diff
res = get_serialized_output(f, x_data, y_data)
tvm.testing.assert_allclose(res.numpy(), y_data)
def test_loop():
mod = tvm.IRModule({})
sum_up = relay.GlobalVar("sum_up")
i = relay.var("i", shape=[], dtype="int32")
accum = relay.var("accum", shape=[], dtype="int32")
sb = ScopeBuilder()
with sb.if_scope(relay.equal(i, relay.const(0, "int32"))):
sb.ret(accum)
with sb.else_scope():
one_less = relay.subtract(i, relay.const(1, "int32"))
new_accum = relay.add(accum, i)
sb.ret(relay.Call(sum_up, [one_less, new_accum]))
func = relay.Function([i, accum], sb.get())
mod[sum_up] = func
mod = transform.InferType()(mod)
loop_bound = 0
i_data = np.array(loop_bound, dtype="int32")
accum_data = np.array(0, dtype="int32")
iarg = relay.var("i", shape=[], dtype="int32")
aarg = relay.var("accum", shape=[], dtype="int32")
mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg))
result = get_serialized_output(mod, i_data, accum_data)
tvm.testing.assert_allclose(result.numpy(), sum(range(1, loop_bound + 1)))
def test_tuple():
ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))])
tup = relay.var("tup", type_annotation=ttype)
f = relay.Function([tup], relay.TupleGetItem(tup, 1))
i_data = np.random.rand(41).astype("float32")
j_data = np.random.rand(10).astype("float32")
result = get_serialized_output(f, (i_data, j_data))
tvm.testing.assert_allclose(result.numpy(), j_data)
def test_adt_list():
mod = tvm.IRModule()
p = Prelude(mod)
_, cons, nil = mod.get_type("List")
l1 = cons(relay.const(1), nil())
l21 = cons(relay.const(2), l1)
l321 = cons(relay.const(3), l21)
f = relay.Function([], l321)
mod["main"] = f
result = get_serialized_output(mod)
assert len(result) == 2
assert len(result[1]) == 2
assert len(result[1][1]) == 2
res = []
res.append(result[0].numpy().tolist())
res.append(result[1][0].numpy().tolist())
res.append(result[1][1][0].numpy().tolist())
tvm.testing.assert_allclose(res, np.array([3, 2, 1]))
def test_adt_compose():
mod = tvm.IRModule()
p = Prelude(mod)
compose = mod.get_global_var("compose")
# add_one = fun x -> x + 1
sb = relay.ScopeBuilder()
x = relay.var("x", "float32")
x1 = sb.let("x1", x)
xplusone = x1 + relay.const(1.0, "float32")
sb.ret(xplusone)
body = sb.get()
add_one = relay.GlobalVar("add_one")
add_one_func = relay.Function([x], body)
# add_two = compose(add_one, add_one)
sb = relay.ScopeBuilder()
y = relay.var("y", "float32")
add_two_func = sb.let("add_two", compose(add_one_func, add_one_func))
add_two_res = add_two_func(y)
sb.ret(add_two_res)
add_two_body = sb.get()
mod[add_one] = add_one_func
f = relay.Function([y], add_two_body)
mod["main"] = f
x_data = np.array(np.random.rand()).astype("float32")
result = get_serialized_output(mod, x_data)
tvm.testing.assert_allclose(result.numpy(), x_data + 2.0)
def test_closure():
x = relay.var("x", shape=())
y = relay.var("y", shape=())
f = relay.Function([x], x + y)
ff = relay.Function([y], f)
clo = ff(relay.const(1.0))
main = clo(relay.const(2.0))
res = get_serialized_output(main)
tvm.testing.assert_allclose(res.numpy(), 3.0)
def test_synthetic():
mod, params = testing.synthetic.get_workload()
run_network(mod, params)
def test_mobilenet():
mod, params = testing.mobilenet.get_workload(batch_size=1)
run_network(mod, params)
def test_vm_shape_of():
x = relay.var("x", shape=(relay.Any(), relay.Any(), relay.Any()), dtype="float32")
relu_x = relay.nn.relu(x)
data = np.random.uniform(size=(2, 3, 4)).astype("float32")
args = [data]
newshape_var = relay.var("newshape", shape=(2,), dtype="int64")
args.append(np.array((1, -1), dtype="int64"))
main = relay.Function([x, newshape_var], relay.reshape(relu_x, newshape=newshape_var))
res = get_serialized_output(main, *args).numpy()
tvm.testing.assert_allclose(res.flatten(), data.flatten())
def test_dynamic_bcast():
dtype = "float32"
x = relay.var("x", shape=(relay.Any(), 2), dtype=dtype)
y = relay.var("y", shape=(3, 2), dtype=dtype)
mod = tvm.IRModule()
mod["main"] = relay.Function([x, y], relay.add(x, y))
x_data = np.random.uniform(size=(1, 2)).astype(dtype)
y_data = np.random.uniform(size=(3, 2)).astype(dtype)
res_np = np.add(x_data, y_data)
for target, dev in testing.enabled_targets():
res = get_serialized_output(mod, *(x_data, y_data), target=target, device=dev)
tvm.testing.assert_allclose(res.numpy(), res_np)
if __name__ == "__main__":
tvm.testing.main()
| 10,642 | 32.259375 | 95 | py |
tvm | tvm-main/tests/python/relay/test_pass_canonicalize_cast.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import tvm.relay as relay
import tvm.relay.transform as _transform
def test_canonicalize_cast():
def before(data, conv_weight, bias1, bias2):
x = relay.nn.conv2d(
data, conv_weight, channels=16, kernel_size=(3, 3), padding=(1, 1), out_dtype="int8"
)
x1 = relay.cast(x, dtype="int32")
y1 = relay.add(x1, bias1)
y2 = relay.add(x1, bias2)
y = relay.add(y1, y2)
return relay.Function([data, conv_weight, bias1, bias2], y)
def expected(data, conv_weight, bias1, bias2):
x = relay.nn.conv2d(
data, conv_weight, channels=16, kernel_size=(3, 3), padding=(1, 1), out_dtype="int8"
)
x1 = relay.cast(x, dtype="int32")
x2 = relay.cast(x, dtype="int32")
y1 = relay.add(x1, bias1)
y2 = relay.add(x2, bias2)
y = relay.add(y1, y2)
return relay.Function([data, conv_weight, bias1, bias2], y)
def check(shape):
data = relay.var("data", shape=shape, dtype="int8")
conv_weight = relay.var("weight")
bias1 = relay.var("bias1", shape=(16, 1, 1), dtype="int32")
bias2 = relay.var("bias2", shape=(16, 1, 1), dtype="int32")
y = before(data, conv_weight, bias1, bias2)
mod = tvm.IRModule.from_expr(y)
seq = tvm.transform.Sequential(
[_transform.InferType(), _transform.CanonicalizeCast(), _transform.InferType()]
)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
y = mod["main"]
y_expected = expected(data, conv_weight, bias1, bias2)
gv = relay.GlobalVar("expected")
mod[gv] = y_expected
mod = _transform.InferType()(mod)
y_expected = mod["expected"]
assert tvm.ir.structural_equal(y, y_expected)
check((1, 16, 7, 7))
if __name__ == "__main__":
test_canonicalize_cast()
| 2,695 | 36.971831 | 96 | py |
tvm | tvm-main/tests/python/relay/test_pass_annotate_spans_defuse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for annotating spans."""
import tvm
import tvm.relay as relay
from tvm.relay import testing
import tvm.testing
def test_annotate_spans_compatibility():
data = relay.var("data", relay.TensorType((1, 3, 64, 64), "float32"))
weight = relay.var("weight")
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
simple_net = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=3, padding=(1, 1)
)
simple_net = relay.nn.batch_norm(simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar)[0]
simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net)
module, params = testing.create_workload(simple_net)
# Apply some simple passes to legalize the IR.
with tvm.transform.PassContext(opt_level=0):
module, params = relay.optimize(
module, target=tvm.testing.enabled_targets()[0][0], params=params
)
seq = tvm.transform.Sequential([relay.transform.AnnotateSpans(), relay.transform.DefuseOps()])
with tvm.transform.PassContext(opt_level=3):
module = seq(module)
if __name__ == "__main__":
tvm.testing.main()
| 2,017 | 35.035714 | 98 | py |
tvm | tvm-main/tests/python/relay/benchmarking/benchmark_vm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Benchmarking Relay VM using models from MXNet."""
import numpy as np
import tvm
from tvm import te
from tvm.contrib import graph_executor
from tvm import relay
from tvm.runtime import container
from tvm.runtime import vm as vm_rt
from tvm.relay import testing
from tvm.relay import vm
def benchmark_execution(
mod,
params,
measure=True,
data_shape=(1, 3, 224, 224),
out_shape=(1, 1000),
dtype="float32",
model="unknown",
):
def get_graph_executor_output(
mod, data, params, target, dev, dtype="float32", number=2, repeat=20
):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
m.set_input("data", data)
m.run()
out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
if measure:
print("Evaluate graph executor inference cost of {} on " "{}".format(model, repr(dev)))
ftimer = m.module.time_evaluator("run", dev, number=1, repeat=20)
# Measure in millisecond.
prof_res = np.array(ftimer().results) * 1000
print(
"Mean graph executor inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), np.std(prof_res))
)
return out.numpy()
def get_vm_output(mod, data, params, target, dev, dtype="float32", number=2, repeat=20):
with tvm.transform.PassContext(opt_level=3):
exe = vm.compile(mod, target, params=params)
rly_vm = vm_rt.VirtualMachine(exe, dev)
result = rly_vm.run(data)
if measure:
print("Evaluate vm inference cost of {} on {}".format(model, repr(dev)))
ftimer = rly_vm.module.time_evaluator("invoke", dev, number=number, repeat=repeat)
# Measure in millisecond.
prof_res = np.array(ftimer("main", data).results) * 1000
print(
"Mean vm inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), np.std(prof_res))
)
return result.numpy().astype(dtype)
# random input
data = np.random.uniform(size=data_shape).astype(dtype)
for target, dev in testing.enabled_targets():
tvm_out = get_graph_executor_output(
mod, tvm.nd.array(data.astype(dtype)), params, target, dev, dtype
)
vm_out = get_vm_output(mod, tvm.nd.array(data.astype(dtype)), params, target, dev, dtype)
tvm.testing.assert_allclose(vm_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_mlp():
image_shape = (1, 1, 28, 28)
mod, params = testing.mlp.get_workload(1)
benchmark_execution(mod, params, data_shape=image_shape, out_shape=(1, 10), model="mlp")
def test_vgg():
for n in [11, 16]:
mod, params = testing.vgg.get_workload(1, num_layers=n)
model = "vgg" + str(n)
benchmark_execution(mod, params, model=model)
def test_resnet():
for n in [18, 50]:
mod, params = testing.resnet.get_workload(batch_size=1, num_layers=n)
model = "resnet" + str(n)
benchmark_execution(mod, params, model=model)
def test_squeezenet():
for version in ["1.0", "1.1"]:
mod, params = testing.squeezenet.get_workload(version=version)
model = "squeezenet" + version
benchmark_execution(mod, params, model=model)
def test_inception_v3():
image_shape = (3, 299, 299)
mod, params = testing.inception_v3.get_workload(image_shape=image_shape)
benchmark_execution(mod, params, data_shape=(1, 3, 299, 299), model="inception_v3")
def test_dqn():
image_shape = (1, 4, 84, 84)
mod, params = testing.dqn.get_workload(batch_size=1, image_shape=image_shape)
benchmark_execution(mod, params, data_shape=image_shape, out_shape=(1, 18))
def test_dcgan():
image_shape = (1, 100)
mod, params = testing.dcgan.get_workload(batch_size=1)
benchmark_execution(mod, params, data_shape=image_shape, out_shape=(1, 3, 64, 64))
def test_mobilenet():
mod, params = testing.mobilenet.get_workload(batch_size=1)
benchmark_execution(mod, params, model="mobilenet")
# TODO: enable when the low building performance (several minutes) fixed.
def test_mobilenet_nhwc():
image_shape = (1, 224, 224, 3)
mod, params = testing.mobilenet.get_workload(
batch_size=1, image_shape=image_shape[1:], layout="NHWC"
)
benchmark_execution(mod, params, measure=False, data_shape=image_shape)
def test_densenet():
mod, params = testing.densenet.get_workload(batch_size=1)
benchmark_execution(mod, params, model="densenet")
if __name__ == "__main__":
test_resnet()
test_vgg()
test_squeezenet()
test_mobilenet()
test_densenet()
test_inception_v3()
test_mlp()
test_dqn()
test_dcgan()
| 5,661 | 33.108434 | 99 | py |
tvm | tvm-main/tests/python/relay/opencl_texture/test_depthwise_conv2d_nhwc_texture.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from utils.adreno_utils import build_run_compare
dtype = tvm.testing.parameter("float32")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_deeplabv3_1_129_129_144x3_3_144_1(remote, target, dtype):
input_shape = (1, 129, 129, 144)
filter_shape = (3, 3, 144, 1)
kernel_size = (filter_shape[0], filter_shape[1])
bias_shape = (filter_shape[2],)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype=dtype,
groups=filter_shape[2],
channels=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
mod = relay.Function([A, B, bias], conv)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_deeplabv3_4_35_35_576x3_3_576_1(remote, target, dtype):
input_shape = (4, 35, 35, 576)
filter_shape = (3, 3, 576, 1)
kernel_size = (filter_shape[0], filter_shape[1])
bias_shape = (filter_shape[2],)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype=dtype,
groups=filter_shape[2],
channels=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
mod = relay.Function([A, B, bias], conv)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_deeplabv3_1_129_129_144x3_3_144_1_with_padding(remote, target, dtype):
input_shape = (1, 129, 129, 144)
filter_shape = (3, 3, 144, 1)
kernel_size = (filter_shape[0], filter_shape[1])
bias_shape = (filter_shape[2],)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
padding=[3, 3, 3, 3],
strides=[2, 2],
out_dtype=dtype,
groups=filter_shape[2],
channels=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_1_513_513_7x3_3_7_1(remote, target, dtype):
input_shape = (1, 513, 513, 7)
filter_shape = (3, 3, 7, 1)
bias_shape = (filter_shape[2],)
kernel_size = (filter_shape[0], filter_shape[1])
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype=dtype,
channels=filter_shape[2],
groups=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.ones(filter_shape).astype(dtype)
bias_data = np.ones(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_1_513_513_3x3_3_3_1(remote, target, dtype):
input_shape = (1, 513, 513, 3)
filter_shape = (3, 3, 3, 1)
bias_shape = (filter_shape[2],)
kernel_size = (filter_shape[0], filter_shape[1])
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype=dtype,
channels=filter_shape[2],
groups=filter_shape[2],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.ones(filter_shape).astype(dtype)
bias_data = np.ones(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_to_3_channels(remote, target, dtype):
input_shape = (1, 200, 200, 3)
filter_shape = (1, 1, 3, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
D = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWOI",
padding=[0, 0, 0, 0],
out_dtype=dtype,
channels=3,
groups=3,
kernel_size=(1, 1),
)
mod = relay.Function([A, B], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
initializer("weight", filter_data)
params1 = {
"weight": tvm.nd.array(filter_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, [])
if __name__ == "__main__":
tvm.testing.main()
| 8,639 | 32.103448 | 96 | py |
tvm | tvm-main/tests/python/relay/opencl_texture/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import rpc
import pytest
@pytest.fixture(scope="session")
def remote():
if (
"TVM_TRACKER_HOST" in os.environ
and "TVM_TRACKER_PORT" in os.environ
and "RPC_DEVICE_KEY" in os.environ
):
rpc_tracker_host = os.environ["TVM_TRACKER_HOST"]
rpc_tracker_port = int(os.environ["TVM_TRACKER_PORT"])
rpc_device_key = os.environ["RPC_DEVICE_KEY"]
tracker = rpc.connect_tracker(rpc_tracker_host, rpc_tracker_port)
remote = tracker.request(rpc_device_key, priority=0, session_timeout=600)
return remote
else:
return None
| 1,419 | 34.5 | 81 | py |
tvm | tvm-main/tests/python/relay/opencl_texture/test_conv2d_nhwc_texture.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from tvm.contrib import utils
from utils.adreno_utils import gpu_preprocess, build_run_compare
import pytest
dtype = tvm.testing.parameter("float32")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_deeplabv3_1_257_257_32x1_1_32_16(remote, target, dtype):
input_shape = (1, 257, 257, 32)
filter_shape = (1, 1, 32, 16)
bias_shape = (filter_shape[-1],)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype=dtype,
channels=filter_shape[-1],
kernel_size=(1, 1),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_deeplabv3_1_257_257_32x1_1_32_16_with_padding(remote, target, dtype):
input_shape = (1, 257, 257, 32)
filter_shape = (1, 1, 32, 16)
bias_shape = (filter_shape[-1],)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[3, 3, 3, 3],
strides=[2, 2],
out_dtype=dtype,
channels=filter_shape[-1],
kernel_size=(1, 1),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_4_35_35_32x3_3_144_16(remote, target, dtype):
input_shape = (4, 35, 35, 32)
filter_shape = (3, 3, 32, 16)
bias_shape = (filter_shape[-1],)
kernel_size = (filter_shape[0], filter_shape[1])
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype=dtype,
channels=filter_shape[-1],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_deeplabv3_1_513_513_3x3_3_3_32(remote, target, dtype):
input_shape = (1, 513, 513, 3)
filter_shape = (3, 3, 3, 32)
bias_shape = (filter_shape[-1],)
kernel_size = (filter_shape[0], filter_shape[1])
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype=dtype,
channels=filter_shape[-1],
kernel_size=kernel_size,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.ones(filter_shape).astype(dtype)
bias_data = np.ones(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_64x35x35_96x64x3x3_nopad(remote, target, dtype):
input_shape = (1, 42, 42, 32)
filter_shape = (3, 3, 32, 96)
bias_shape = (1, 1, 1, 96)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=96,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, [], gpu_preprocess
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_64x35x35_96x64x3x3_nopad_pass(remote, target, dtype):
input_shape = (1, 40, 40, 32)
filter_shape = (2, 2, 32, 96)
bias_shape = (1, 1, 1, 96)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=96,
kernel_size=(2, 2),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, [], gpu_preprocess
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_35_35_strides(remote, target, dtype):
input_shape = (1, 35, 35, 48)
filter_shape = (5, 5, 48, 64)
bias_shape = (1, 1, 1, 64)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[2, 2, 2, 2],
strides=[1, 1],
out_dtype=dtype,
channels=64,
kernel_size=(5, 5),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, [], gpu_preprocess
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_resnet50_v2_nhwc_3c(remote, target, dtype):
input_shape = (1, 224, 224, 3)
filter_shape = (7, 7, 3, 64)
bias_shape = (1, 1, 1, 64)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[3, 3, 3, 3],
strides=[2, 2],
out_dtype=dtype,
channels=64,
kernel_size=(7, 7),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_nhwc_3c(remote, target, dtype):
input_shape = (1, 299, 299, 3)
filter_shape = (3, 3, 3, 64)
bias_shape = (1, 1, 1, 64)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=64,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_1x1_16c16spatial(remote, target, dtype):
input_shape = (1, 128, 128, 16)
filter_shape = (4, 4, 16, 32)
bias_shape = (1, 1, 1, 32)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(4, 4),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_4x4_16c16pad(remote, target, dtype):
input_shape = (1, 256, 256, 32)
filter_shape = (4, 4, 32, 32)
bias_shape = (1, 1, 1, 32)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(4, 4),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_4x4x4_16c16pad(remote, target, dtype):
input_shape = (1, 256, 256, 32)
filter_shape = (4, 4, 32, 4)
bias_shape = (1, 1, 1, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=4,
kernel_size=(4, 4),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_yolov3_v2_nhwc_3c(remote, target, dtype):
input_shape = (1, 13, 13, 1024)
filter_shape = (1, 1, 1024, 255)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=255,
kernel_size=(1, 1),
)
mod = relay.Function([A, B], conv)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
initializer("weight", filter_data)
params = {
"weight": tvm.nd.array(filter_data),
}
build_run_compare(remote, mod, params, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_vgg16_winograd_4d(remote, target, dtype):
input_shape = (1, 28, 28, 512)
filter_shape = (3, 3, 512, 512)
bias_shape = (1, 1, 1, 512)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[1, 1, 1, 1],
channels=512,
kernel_size=[3, 3],
out_dtype=dtype,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
temp = utils.tempdir()
stat_file = temp.relpath("stat.log")
with open(stat_file, "w") as f:
f.write(
f'{{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256", "conv2d_nhwc_winograd.image2d", [["TENSOR", [1, 28, 28, 512], "{dtype}"], ["TENSOR", [3, 3, 512, 512], "{dtype}"], [1, 1], [1, 1, 1, 1], [1, 1], "{dtype}"], {{}}], "config": {{"index": 1591, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 4], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 2]], ["tile_rc", "sp", [-1, 8]]]}}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}}\n'
)
graph = build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, stat_file=stat_file
)
matches = re.findall("winograd", graph)
assert len(matches) > 0
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_vgg16_winograd_4d_expand_spatial_dims(remote, target, dtype):
input_shape = (1, 28, 28, 1)
filter_shape = (3, 3, 1, 64)
bias_shape = (1, 1, 1, 64)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[0, 0, 0, 0],
kernel_size=[3, 3],
out_dtype=dtype,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
temp = utils.tempdir()
stat_file = temp.relpath("stat.log")
with open(stat_file, "w") as f:
f.write(
f'{{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256", "conv2d_nhwc_winograd.image2d", [["TENSOR", [1, 28, 28, 1], "{dtype}"], ["TENSOR", [3, 3, 1, 64], "{dtype}"], [1, 1], [0, 0, 0, 0], [1, 1], "{dtype}"], {{}}], "config": {{"index": 1591, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 4], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 2]], ["tile_rc", "sp", [-1, 8]]]}}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}}\n'
)
graph = build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, stat_file=stat_file
)
matches = re.findall("winograd", graph)
assert len(matches) > 0
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_winograd_conv(remote, target, dtype):
input_shape = (1, 3, 3, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
filter_shape3 = (3, 3, 4, 8)
bias_shape3 = (1, 1, 1, 8)
B3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
D = relay.nn.conv2d(
A,
B3,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[1, 1, 1, 1],
channels=8,
kernel_size=[3, 3],
out_dtype=dtype,
)
filter_shape4 = (3, 3, 8, 8)
bias_shape4 = (1, 1, 1, 8)
B4 = relay.var("weight4", shape=filter_shape4, dtype=dtype)
D = relay.nn.conv2d(
D,
B4,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[1, 1, 1, 1],
channels=8,
kernel_size=[3, 3],
out_dtype=dtype,
)
mod = relay.Function([A, B3, B4], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data3 = np.zeros(filter_shape3).astype(dtype)
bias_data3 = np.zeros(bias_shape3).astype(dtype)
filter_data4 = np.zeros(filter_shape4).astype(dtype)
bias_data4 = np.zeros(bias_shape4).astype(dtype)
initializer("weight", filter_data3)
initializer("bias", bias_data3)
initializer("weight", filter_data4)
initializer("bias", bias_data4)
params1 = {
"weight3": tvm.nd.array(filter_data3),
"weight4": tvm.nd.array(filter_data4),
}
temp = utils.tempdir()
stat_file = temp.relpath("stat.log")
with open(stat_file, "w") as f:
f.write(
f'{{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256", "conv2d_nhwc_winograd.image2d", [["TENSOR", [1, 3, 3, 4], "{dtype}"], ["TENSOR", [3, 3, 4, 8], "{dtype}"], [1, 1], [1, 1, 1, 1], [1, 1], "{dtype}"], {{}}], "config": {{"index": 1591, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 4], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 2]], ["tile_rc", "sp", [-1, 8]]]}}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}}\n'
)
graph = build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, stat_file=stat_file
)
matches = re.findall("winograd", graph)
assert len(matches) > 0
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_winograd_non_rect(remote, target, dtype):
input_shape = (1, 36, 64, 771)
A = relay.var("data", shape=input_shape, dtype=dtype)
filter_shape = (3, 3, 771, 128)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
D = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[1, 1, 1, 1],
channels=128,
kernel_size=[3, 3],
out_dtype=dtype,
)
mod = relay.Function([A, B], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
initializer("weight", filter_data)
params1 = {
"weight": tvm.nd.array(filter_data),
}
temp = utils.tempdir()
stat_file = temp.relpath("stat.log")
with open(stat_file, "w") as f:
f.write(
f'{{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256 -texture_spatial_limit=16384 -thread_warp_size=1", "conv2d_nhwc_winograd.image2d", [["TENSOR", [1, 36, 64, 771], "{dtype}"], ["TENSOR", [3, 3, 771, 128], "{dtype}"], [1, 1], [1, 1, 1, 1], [1, 1], "{dtype}"], {{}}], "config": {{"index": 5399, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 16], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 8]], ["tile_rc", "sp", [-1, 193]]]}}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}}\n'
)
graph = build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, stat_file=stat_file
)
matches = re.findall("winograd", graph)
assert len(matches) > 0
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_to_3_channels(remote, target, dtype):
input_shape = (1, 200, 200, 256)
filter_shape = (1, 1, 256, 3)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
D = relay.nn.conv2d(
A,
B,
data_layout="NHWC",
kernel_layout="HWIO",
padding=[0, 0, 0, 0],
out_dtype=dtype,
channels=3,
kernel_size=(1, 1),
)
mod = relay.Function([A, B], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
initializer("weight", filter_data)
params1 = {
"weight": tvm.nd.array(filter_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, [])
if __name__ == "__main__":
tvm.testing.main()
| 26,384 | 33.177461 | 608 | py |
tvm | tvm-main/tests/python/relay/opencl_texture/test_injection_texture.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import pytest
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from tvm.contrib import utils
from utils.adreno_utils import gpu_preprocess, build_run_compare
dtype = tvm.testing.parameter("float32")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_layout_transform_to_block_nchw4c(remote, target, dtype):
"""Verification of the case NCHW->NCHW4c"""
input_shape = (1, 32, 720, 1280)
A = relay.var("data", shape=input_shape, dtype=dtype)
lt = relay.layout_transform(A, "NCHW", "NCHW4c")
mod = relay.Function([A], lt)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_layout_transform_to_block_nchw(remote, target, dtype):
"""Verification of the case NCHW4c->NCHW"""
input_shape = (1, 36, 1, 1, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
lt = relay.layout_transform(A, "NCHW4c", "NCHW")
mod = relay.Function([A], lt)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_layout_transform_to_block_nhwc4c(remote, target, dtype):
"""Verification of the case NHWC->NHWC4c"""
input_shape = (1, 1, 1, 144)
A = relay.var("data", shape=input_shape, dtype=dtype)
lt = relay.layout_transform(A, "NHWC", "NHWC4c")
mod = relay.Function([A], lt)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@pytest.mark.skipif(
tvm.testing.utils.IS_IN_CI, reason="Skip because GPU in CI doesn't support FP16"
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_layout_transform_to_block_nhwc(remote, target, dtype):
"""Verification of the case NHWC4c->NHWC"""
input_shape = (1, 80, 80, 36, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
mean = relay.mean(A, axis=[1, 2], keepdims=True)
cast = relay.cast(mean, "float16")
lt = relay.layout_transform(cast, "NHWC4c", "NHWC")
mod = relay.Function([A], lt)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
if __name__ == "__main__":
test_layout_transform_to_block_nhwc(None, "opencl -device=adreno", "float16")
| 3,226 | 36.523256 | 86 | py |
tvm | tvm-main/tests/python/relay/opencl_texture/test_network.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.contrib import utils
from tvm.relay import testing
from tvm.relay.op import register_mixed_precision_conversion
from utils.adreno_utils import build_run_compare, get_model, gpu_preprocess
def _test_mobilenet_v1(remote, target, calc_dtype, acc_dtype):
mod, params, inputs, dtypes = get_model(
"https://github.com/mlcommons/mobile_models/raw/main/v0_7/tflite/mobilenet_edgetpu_224_1.0_float.tflite",
"mobilenet_edgetpu_224_1.0_float.tflite",
"tflite",
)
if calc_dtype == "float16":
from tvm.driver.tvmc.transform import apply_graph_transforms
mod = apply_graph_transforms(
mod,
{
"mixed_precision": True,
"mixed_precision_ops": ["nn.conv2d", "nn.dense"],
"mixed_precision_calculation_type": calc_dtype,
"mixed_precision_acc_type": acc_dtype,
},
)
build_run_compare(remote, mod, params, inputs, dtypes, target, [])
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/13443")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
@pytest.mark.skipif(tvm.testing.utils.IS_IN_CI, reason="CI doesn't support fp16(half datatypes)")
def test_mobilenet_v1_fp16(remote, target):
_test_mobilenet_v1(remote, target, "float16", "float16")
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/13443")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_mobilenet_v1_fp32(remote, target):
_test_mobilenet_v1(remote, target, "float32", "float32")
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/13443")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_mobilenet_v1_fp16_acc32(remote, target):
_test_mobilenet_v1(remote, target, "float16", "float32")
if __name__ == "__main__":
tvm.testing.main()
| 2,815 | 36.052632 | 113 | py |
tvm | tvm-main/tests/python/relay/opencl_texture/test_pool_texture.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
from utils.adreno_utils import build_run_compare
dtype = tvm.testing.parameter("float32")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_global_pool2d_nchw_wide(remote, target, dtype):
"""
Use case of NCHW global pooling with big spatial valies
"""
input_shape = (1, 32, 160, 160)
A = relay.var("data", shape=input_shape, dtype=dtype)
C = relay.nn.global_avg_pool2d(A)
mod = relay.Function([A], C)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_global_pool2d_nchw4c_wide(remote, target, dtype):
"""
Use case of blocked NCHW4c global pooling with big spatial valies
"""
input_shape = (1, 8, 160, 160, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
C = relay.nn.global_avg_pool2d(A, layout="NCHW4c")
mod = relay.Function([A], C)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_global_pool2d_nchw_deep(remote, target, dtype):
"""
Use case of NCHW deep global pooling
"""
input_shape = (1, 2048, 20, 20)
A = relay.var("data", shape=input_shape, dtype=dtype)
C = relay.nn.global_avg_pool2d(A)
mod = relay.Function([A], C)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_global_pool2d_nchw4c_deep(remote, target, dtype):
"""
Use case of blocked NCHW4c deep global pooling
"""
input_shape = (1, 512, 20, 20, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
C = relay.nn.global_avg_pool2d(A, layout="NCHW4c")
mod = relay.Function([A], C)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_global_pool2d_nhwc(remote, target, dtype):
"""
Use case of NHWC global pooling with big spatial valies
"""
input_shape = (1, 160, 160, 32)
A = relay.var("data", shape=input_shape, dtype=dtype)
C = relay.nn.global_avg_pool2d(A, layout="NHWC")
mod = relay.Function([A], C)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_global_pool2d_nhwc4c(remote, target, dtype):
"""
Use case of NHWC deep global pooling
"""
input_shape = (1, 160, 160, 8, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
C = relay.nn.global_avg_pool2d(A, layout="NHWC4c")
mod = relay.Function([A], C)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_global_max_pool2d_nchw_wide(remote, target, dtype):
"""
Use case of NCHW global pooling with big spatial valies
"""
input_shape = (1, 32, 160, 160)
A = relay.var("data", shape=input_shape, dtype=dtype)
C = relay.nn.global_max_pool2d(A)
mod = relay.Function([A], C)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_global_max_pool2d_nchw4c_wide(remote, target, dtype):
"""
Use case of blocked NCHW4c global pooling with big spatial valies
"""
input_shape = (1, 8, 160, 160, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
C = relay.nn.global_max_pool2d(A, layout="NCHW4c")
mod = relay.Function([A], C)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
| 4,781 | 34.161765 | 86 | py |
tvm | tvm-main/tests/python/relay/opencl_texture/test_conv2d_nchw_texture.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from tvm.contrib import utils
from utils.adreno_utils import gpu_preprocess, build_run_compare
import pytest
dtype = tvm.testing.parameter("float32")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_64x35x35_96x64x3x3_nopad(remote, target, dtype):
input_shape = (1, 32, 42, 42)
filter_shape = (96, 32, 3, 3)
bias_shape = (1, 96, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=96,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, [], gpu_preprocess
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_64x35x35_96x64x3x3_nopad_pass(remote, target, dtype):
input_shape = (1, 32, 40, 40)
filter_shape = (96, 32, 2, 2)
bias_shape = (1, 96, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=96,
kernel_size=(2, 2),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, [], gpu_preprocess
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_35_35_strides(remote, target, dtype):
input_shape = (1, 48, 35, 35)
filter_shape = (64, 48, 5, 5)
bias_shape = (1, 64, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[2, 2, 2, 2],
strides=[1, 1],
out_dtype=dtype,
channels=64,
kernel_size=(5, 5),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, [], gpu_preprocess
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_resnet50_v2_nchw_3c(remote, target, dtype):
input_shape = (1, 3, 224, 224)
filter_shape = (64, 3, 7, 7)
bias_shape = (1, 64, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[3, 3, 3, 3],
strides=[2, 2],
out_dtype=dtype,
channels=64,
kernel_size=(7, 7),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_inceptionv3_nchw_3c(remote, target, dtype):
input_shape = (1, 3, 299, 299)
filter_shape = (64, 3, 3, 3)
bias_shape = (1, 64, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=64,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_1x1_16c16spatial(remote, target, dtype):
input_shape = (1, 16, 256, 256)
filter_shape = (32, 16, 4, 4)
bias_shape = (1, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(4, 4),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_4x4_16c16pad(remote, target, dtype):
input_shape = (1, 32, 256, 256)
filter_shape = (32, 32, 4, 4)
bias_shape = (1, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(4, 4),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_4x4x4_16c16pad(remote, target, dtype):
input_shape = (1, 32, 256, 256)
filter_shape = (4, 32, 4, 4)
bias_shape = (1, 4, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[3, 3, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=4,
kernel_size=(4, 4),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_yolov3_v2_nchw_3c(remote, target, dtype):
input_shape = (1, 1024, 13, 13)
filter_shape = (255, 1024, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=255,
kernel_size=(1, 1),
)
mod = relay.Function([A, B], conv)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
initializer("weight", filter_data)
params = {
"weight": tvm.nd.array(filter_data),
}
build_run_compare(remote, mod, params, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_vgg16_winograd_4d(remote, target, dtype):
input_shape = (1, 512, 28, 28)
filter_shape = (512, 512, 3, 3)
bias_shape = (1, 512, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
channels=512,
kernel_size=[3, 3],
out_dtype=dtype,
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
temp = utils.tempdir()
stat_file = temp.relpath("stat.log")
with open(stat_file, "w") as f:
f.write(
f'{{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256", "conv2d_nchw_winograd.image2d", [["TENSOR", [1, 512, 28, 28], "{dtype}"], ["TENSOR", [512, 512, 3, 3], "{dtype}"], [1, 1], [1, 1, 1, 1], [1, 1], "{dtype}"], {{}}], "config": {{"index": 1591, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 4], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 2]], ["tile_rc", "sp", [-1, 8]]]}}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}}\n'
)
graph = build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, stat_file=stat_file
)
matches = re.findall("winograd", graph)
assert len(matches) > 0
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_winograd_conv(remote, target, dtype):
input_shape = (1, 4, 3, 3)
A = relay.var("data", shape=input_shape, dtype=dtype)
filter_shape3 = (8, 4, 3, 3)
bias_shape3 = (8,)
B3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
D = relay.nn.conv2d(
A, B3, padding=[1, 1, 1, 1], channels=8, kernel_size=[3, 3], out_dtype=dtype
)
filter_shape4 = (8, 8, 3, 3)
bias_shape4 = (8,)
B4 = relay.var("weight4", shape=filter_shape4, dtype=dtype)
D = relay.nn.conv2d(
D, B4, padding=[1, 1, 1, 1], channels=8, kernel_size=[3, 3], out_dtype=dtype
)
mod = relay.Function([A, B3, B4], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data3 = np.zeros(filter_shape3).astype(dtype)
bias_data3 = np.zeros(bias_shape3).astype(dtype)
filter_data4 = np.zeros(filter_shape4).astype(dtype)
bias_data4 = np.zeros(bias_shape4).astype(dtype)
initializer("weight", filter_data3)
initializer("bias", bias_data3)
initializer("weight", filter_data4)
initializer("bias", bias_data4)
params1 = {
"weight3": tvm.nd.array(filter_data3),
"weight4": tvm.nd.array(filter_data4),
}
temp = utils.tempdir()
stat_file = temp.relpath("stat.log")
with open(stat_file, "w") as f:
f.write(
f'{{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256", "conv2d_nchw_winograd.image2d", [["TENSOR", [1, 4, 3, 3], "{dtype}"], ["TENSOR", [8, 4, 3, 3], "{dtype}"], [1, 1], [1, 1, 1, 1], [1, 1], "{dtype}"], {{}}], "config": {{"index": 1591, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 4], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 2]], ["tile_rc", "sp", [-1, 8]]]}}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}}\n'
)
graph = build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, stat_file=stat_file
)
matches = re.findall("winograd", graph)
assert len(matches) > 0
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_residual_block(remote, target, dtype):
"""
- some kind of residual block followed by convolution to have texture after residual block
- scalar data type verification which should be mapped to global memory scope
layout_transform (NCHW->NCHW4c)
| <- buffer
conv2d (1) <- to get textures as output
/ \
conv2d (2) |
\ /
add <- add should be fused into conv2d (2)
multiply to scalar <- buffer to the input of multiply scalar value
relu
| <- texture in intermediate tensor
conv2d (3)
relu
| <- buffer
layout_transform (NCHW4c->NCHW)
"""
input_shape = (1, 32, 40, 40)
filter_shape1 = (32, 32, 2, 2)
filter_shape2 = (32, 32, 1, 1)
filter_shape3 = (32, 32, 2, 2)
bias_shape1 = (1, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
B1 = relay.var("bias1", shape=bias_shape1, dtype=dtype)
W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
W3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
conv1 = relay.nn.conv2d(
A,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
D = relay.op.add(conv1, B1)
D = relay.op.nn.relu(D)
conv2 = relay.nn.conv2d(
D,
W2,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
D = relay.op.add(conv2, D)
D = D * relay.const(0.15, dtype)
D = relay.op.nn.relu(D)
conv3 = relay.nn.conv2d(
D,
W3,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
D = relay.op.nn.relu(conv3)
mod = relay.Function([A, W1, B1, W2, W3], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data1 = np.zeros(filter_shape1).astype(dtype)
bias_data1 = np.zeros(bias_shape1).astype(dtype)
initializer("weight", filter_data1)
initializer("bias", bias_data1)
filter_data2 = np.zeros(filter_shape2).astype(dtype)
initializer("weight", filter_data2)
filter_data3 = np.zeros(filter_shape3).astype(dtype)
initializer("weight", filter_data3)
params1 = {
"weight1": tvm.nd.array(filter_data1),
"bias1": tvm.nd.array(bias_data1),
"weight2": tvm.nd.array(filter_data2),
"weight3": tvm.nd.array(filter_data3),
}
if dtype == "float16":
static_memory_scope = [
"",
"global.texture",
"global.texture-weight",
"global.texture-weight",
"global.texture",
"global.texture-weight",
"global",
"global.texture",
"global.texture-weight",
"",
"",
]
else:
static_memory_scope = [
"",
"global.texture",
"global",
"global.texture-weight",
"global.texture",
"global.texture-weight",
"global.texture",
"global",
"",
"",
]
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, static_memory_scope
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_concat(remote, target, dtype):
"""
layout_transform (NCHW->NCHW4c)
| <- buffer
conv2d (1) <- to get textures as output
/ \
conv2d (2) conv2d (3)
\ / <- concat does not support textures, there we should have buffers
concatenation
| <- buffer
layout_transform (NCHW4c->NCHW)
"""
input_shape = (1, 32, 40, 40)
filter_shape1 = (96, 32, 2, 2)
filter_shape2 = (32, 96, 2, 2)
filter_shape3 = (5, 96, 2, 2)
bias_shape1 = (1, 96, 1, 1)
bias_shape2 = (1, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
B1 = relay.var("bias1", shape=bias_shape1, dtype=dtype)
W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
W3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
B2 = relay.var("bias2", shape=bias_shape2, dtype=dtype)
# C = relay.nn.relu(A)
conv1 = relay.nn.conv2d(
A,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=96,
kernel_size=(2, 2),
)
D = relay.op.add(conv1, B1)
D = relay.op.nn.relu(D)
conv2 = relay.nn.conv2d(
D,
W2,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
conv2 = relay.op.add(conv2, B2)
conv2 = relay.op.nn.relu(conv2)
conv3 = relay.nn.conv2d(
D,
W3,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[2, 2],
out_dtype=dtype,
channels=5,
kernel_size=(2, 2),
)
t = relay.Tuple([conv2, conv3])
c = relay.op.concatenate(t, axis=1)
mod = relay.Function([A, W1, B1, W2, B2, W3], c)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data1 = np.zeros(filter_shape1).astype(dtype)
bias_data1 = np.zeros(bias_shape1).astype(dtype)
initializer("weight", filter_data1)
initializer("bias", bias_data1)
filter_data2 = np.zeros(filter_shape2).astype(dtype)
bias_data2 = np.zeros(bias_shape2).astype(dtype)
initializer("weight", filter_data2)
initializer("bias", bias_data2)
filter_data3 = np.zeros(filter_shape3).astype(dtype)
initializer("weight", filter_data3)
params1 = {
"weight1": tvm.nd.array(filter_data1),
"bias1": tvm.nd.array(bias_data1),
"weight2": tvm.nd.array(filter_data2),
"bias2": tvm.nd.array(bias_data2),
"weight3": tvm.nd.array(filter_data3),
}
static_memory_scope = [
"",
"global",
"global.texture-weight",
"global.texture-weight",
"global",
"global.texture-weight",
"global.texture-weight",
"",
"",
"",
"",
"",
]
static_memory_scope = []
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, static_memory_scope
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_pooling_branching_texture_params(remote, target, dtype):
"""
Verification of the pooling and many branches having textures
layout_transform (NCHW->NCHW4c)
| <- buffer
conv2d (0) <- to get textures
| <- textures
pooling
/ \ \ <- textures
conv2d (1) conv2d (2) conv2d (3)
\ / |
add | <- to have the only one output, will be fused
\ /
add <- to have the only one output, will be fused
| <- buffer
layout_transform (NCHW4c->NCHW)
"""
input_shape = (1, 32, 40, 40)
filter_shape0 = (32, 32, 1, 1)
filter_shape1 = (32, 32, 2, 2)
filter_shape2 = (32, 32, 1, 1)
filter_shape3 = (32, 32, 2, 2)
bias_shape1 = (1, 32, 1, 1)
# bias_shape2 = (1, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
W0 = relay.var("weight0", shape=filter_shape0, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
B1 = relay.var("bias1", shape=bias_shape1, dtype=dtype)
W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
W3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
conv0 = relay.nn.conv2d(
A,
W0,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
pool = relay.nn.avg_pool2d(conv0, pool_size=(2, 2), strides=(2, 2))
conv1 = relay.nn.conv2d(
pool,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 1, 1],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
conv1 = relay.op.add(conv1, B1)
conv1 = relay.op.nn.relu(conv1)
conv2 = relay.nn.conv2d(
pool,
W2,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
conv3 = relay.nn.conv2d(
pool,
W3,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 1, 1, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
conv3 = relay.op.nn.relu(conv3)
res = relay.op.add(conv1, conv2)
res = relay.op.add(res, conv3)
mod = relay.Function([A, W0, W1, B1, W2, W3], res)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data0 = np.zeros(filter_shape0).astype(dtype)
filter_data1 = np.zeros(filter_shape1).astype(dtype)
bias_data1 = np.zeros(bias_shape1).astype(dtype)
initializer("weight", filter_data1)
initializer("bias", bias_data1)
filter_data2 = np.zeros(filter_shape2).astype(dtype)
initializer("weight", filter_data2)
filter_data3 = np.zeros(filter_shape3).astype(dtype)
initializer("weight", filter_data3)
params1 = {
"weight0": tvm.nd.array(filter_data0),
"weight1": tvm.nd.array(filter_data1),
"bias1": tvm.nd.array(bias_data1),
"weight2": tvm.nd.array(filter_data2),
"weight3": tvm.nd.array(filter_data3),
}
static_memory_scope = [
"",
"global.texture",
"global.texture-weight",
"global.texture",
"global.texture",
"global",
"global.texture-weight",
"global.texture-weight",
"global.texture",
"global",
"global.texture",
"",
"",
]
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, static_memory_scope
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_branching_texture_params(remote, target, dtype):
"""
Verification of passing texture to several consumers markup of relay variables in
primary functions + on_device
layout_transform (NCHW->NCHW4c)
| <- buffer
conv2d (0) <- to get textures
/ \ \ <- here should be textures and textures in params
conv2d (1) conv2d (2) conv2d (3)
\ / |
add | <- to have the only one output
\ /
add <- to have the only one output
| <- buffer
layout_transform (NCHW4c->NCHW)
"""
input_shape = (1, 32, 40, 40)
filter_shape0 = (32, 32, 1, 1)
filter_shape1 = (32, 32, 2, 2)
filter_shape2 = (32, 32, 1, 1)
filter_shape3 = (32, 32, 2, 2)
bias_shape1 = (1, 32, 1, 1)
# bias_shape2 = (1, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
W0 = relay.var("weight0", shape=filter_shape0, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
B1 = relay.var("bias1", shape=bias_shape1, dtype=dtype)
W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
W3 = relay.var("weight3", shape=filter_shape3, dtype=dtype)
conv0 = relay.nn.conv2d(
A,
W0,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
conv1 = relay.nn.conv2d(
conv0,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 1, 1],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
conv1 = relay.op.add(conv1, B1)
conv1 = relay.op.nn.relu(conv1)
conv2 = relay.nn.conv2d(
conv0,
W2,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
conv3 = relay.nn.conv2d(
conv0,
W3,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 1, 1, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(2, 2),
)
conv3 = relay.op.nn.relu(conv3)
res = relay.op.add(conv1, conv2)
res = relay.op.add(res, conv3)
mod = relay.Function([A, W0, W1, B1, W2, W3], res)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data0 = np.zeros(filter_shape0).astype(dtype)
filter_data1 = np.zeros(filter_shape1).astype(dtype)
bias_data1 = np.zeros(bias_shape1).astype(dtype)
initializer("weight", filter_data1)
initializer("bias", bias_data1)
filter_data2 = np.zeros(filter_shape2).astype(dtype)
initializer("weight", filter_data2)
filter_data3 = np.zeros(filter_shape3).astype(dtype)
initializer("weight", filter_data3)
params1 = {
"weight0": tvm.nd.array(filter_data0),
"weight1": tvm.nd.array(filter_data1),
"bias1": tvm.nd.array(bias_data1),
"weight2": tvm.nd.array(filter_data2),
"weight3": tvm.nd.array(filter_data3),
}
static_memory_scope = [
"",
"global.texture",
"global.texture-weight",
"global.texture",
"global",
"global.texture-weight",
"global.texture-weight",
"global.texture",
"global",
"global.texture",
"",
"",
]
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, static_memory_scope
)
# function repeat, params scope are different in reused functions
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_different_lowering_same_op(remote, target, dtype):
"""
Use case for verification of caching compiled functions
Three convolutions following by each other in this case should be
compiled in three different entities and lowered differently because
they are differ in input param memory scopes and in output memory scope
layout_transform (NCHW->NCHW4c)
| <- buffer
conv2d (1) <- buffer as input tensor and texture as output
| <- texture
conv2d (2) <- texture as input and texture as output
| <- texture
conv2d (3) <- texture as input and buffer as output
| <- buffer
layout_transform (NCHW4c->NCHW)
"""
input_shape = (1, 32, 40, 40)
filter_shape1 = (32, 32, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
conv1 = relay.nn.conv2d(
A,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
conv2 = relay.nn.conv2d(
conv1,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
conv3 = relay.nn.conv2d(
conv2,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
strides=[1, 1],
out_dtype=dtype,
channels=32,
kernel_size=(1, 1),
)
mod = relay.Function([A, W1], conv3)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data1 = np.zeros(filter_shape1).astype(dtype)
params1 = {
"weight1": tvm.nd.array(filter_data1),
}
static_memory_scope = [
"",
"global.texture",
"global.texture-weight",
"global.texture",
"global.texture",
"",
"",
]
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, static_memory_scope
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_winograd_non_rect(remote, target, dtype):
input_shape = (1, 771, 36, 64)
A = relay.var("data", shape=input_shape, dtype=dtype)
filter_shape = (128, 771, 3, 3)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
D = relay.nn.conv2d(
A, B, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3], out_dtype=dtype
)
mod = relay.Function([A, B], D)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
initializer("weight", filter_data)
params1 = {
"weight": tvm.nd.array(filter_data),
}
temp = utils.tempdir()
stat_file = temp.relpath("stat.log")
with open(stat_file, "w") as f:
f.write(
f'{{"input": ["opencl -keys=adreno,opencl,gpu -device=adreno -max_num_threads=256 -texture_spatial_limit=16384 -thread_warp_size=1", "conv2d_nchw_winograd.image2d", [["TENSOR", [1, 771, 36, 64], "{dtype}"], ["TENSOR", [128, 771, 3, 3], "{dtype}"], [1, 1], [1, 1, 1, 1], [1, 1], "{dtype}"], {{}}], "config": {{"index": 5399, "code_hash": null, "entity": [["auto_unroll_max_step", "ot", 16], ["tile_y", "sp", [-1, 1, 32]], ["tile_x", "sp", [-1, 4, 8]], ["tile_rc", "sp", [-1, 193]]]}}, "result": [[0.0037244], 0, 7.06374192237854, 1653898629.7427933], "version": 0.2, "tvm_version": "0.8.dev0"}}\n'
)
graph = build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, stat_file=stat_file
)
matches = re.findall("winograd", graph)
assert len(matches) > 0
# function repeat, params scope are different in reused functions
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_injective_nwo_inputs1(remote, target, dtype):
"""
Use case for verification of stability of annotation primary functions
having several ops accepting data outside of Primary function
The visiting of ops during traversing of graph inside primary function
can depend on order of relay graph creation. Thus the annotation mechanism
should be reliable for graph traversal order
The current decision if Prim Function support textures or not depend on
*any* op accepting input of the function and if op support textures
Input
/ \
layout_transform (NCHW->NCHW4c) |
| /
conv2d (1) /
| /
conv2d (2) mean /
/ \ / <- Primary function several head ops
(1)add (2)layout_transform |
| (NCHW4c->NCHW) |
| | \ /
| | (3) add
| | |
layout_transform \ /
(NCHW4c->NCHW) \ /
\ mul
\ /
add
This test verifies a case when the latest op which is visited is (3) and does not
support textures, but there is (1) supporting textures, thus the whole func will
support textures
"""
input_shape = (1, 4, 40, 40)
filter_shape1 = (4, 4, 3, 3)
filter_shape2 = (4, 4, 3, 3)
filter_shape3 = (4, 4, 3, 3)
A = relay.var("data", shape=input_shape, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
mean = relay.mean(A, axis=1, keepdims=True)
conv1 = relay.nn.conv2d(
A,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[1, 1],
out_dtype=dtype,
channels=4,
kernel_size=(3, 3),
)
conv2 = relay.nn.conv2d(
conv1,
W2,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[1, 1],
out_dtype=dtype,
channels=4,
kernel_size=(3, 3),
)
ad3 = relay.op.add(conv1, conv2)
ad1 = relay.op.add(mean, conv1)
ad2 = relay.op.multiply(ad1, conv2)
ad4 = relay.op.add(ad3, ad2)
mod = relay.Function([A, W1, W2], ad4)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data1 = np.zeros(filter_shape1).astype(dtype)
filter_data2 = np.zeros(filter_shape2).astype(dtype)
initializer("weight", filter_data1)
initializer("weight", filter_data2)
params1 = {
"weight1": tvm.nd.array(filter_data1),
"weight2": tvm.nd.array(filter_data2),
}
static_memory_scope = [
"",
"global.texture",
"global",
"global.texture",
"global",
"global.texture",
"global",
"global",
]
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, static_memory_scope
)
# function repeat, params scope are different in reused functions
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_injective_nwo_inputs2(remote, target, dtype):
"""
Use case for verification of stability of annotation primary functions
having several ops accepting data outside of Primary function
The visiting of ops during traversing of graph inside primary function
can depend on order of relay graph creation. Thus the annotation mechanism
should be reliable for graph traversal order
The current decision if Prim Function support textures or not depend on
*any* op accepting input of the function and if op support textures
Input
/ \
layout_transform (NCHW->NCHW4c) |
| /
conv2d (1) /
| /
conv2d (2) mean /
/ \ / <- Primary function several head ops
(1)add (2)layout_transform |
| (NCHW4c->NCHW) |
| | \ /
| | (3) add
| | |
layout_transform \ /
(NCHW4c->NCHW) \ /
\ mul
\ /
add
This test verifies a case when the latest op which is (1), it supports textures
an whole prim function is considered as a func working with textures
"""
input_shape = (1, 4, 40, 40)
filter_shape1 = (4, 4, 3, 3)
filter_shape2 = (4, 4, 3, 3)
filter_shape3 = (4, 4, 3, 3)
A = relay.var("data", shape=input_shape, dtype=dtype)
W1 = relay.var("weight1", shape=filter_shape1, dtype=dtype)
W2 = relay.var("weight2", shape=filter_shape2, dtype=dtype)
mean = relay.mean(A, axis=1, keepdims=True)
conv1 = relay.nn.conv2d(
A,
W1,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[1, 1],
out_dtype=dtype,
channels=4,
kernel_size=(3, 3),
)
conv2 = relay.nn.conv2d(
conv1,
W2,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[1, 1],
out_dtype=dtype,
channels=4,
kernel_size=(3, 3),
)
ad3 = relay.op.add(conv1, conv2)
ad1 = relay.op.add(mean, conv1)
ad2 = relay.op.multiply(ad1, conv2)
ad4 = relay.op.add(ad2, ad3)
mod = relay.Function([A, W1, W2], ad4)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data1 = np.zeros(filter_shape1).astype(dtype)
filter_data2 = np.zeros(filter_shape2).astype(dtype)
initializer("weight", filter_data1)
initializer("weight", filter_data2)
params1 = {
"weight1": tvm.nd.array(filter_data1),
"weight2": tvm.nd.array(filter_data2),
}
static_memory_scope = [
"",
"global.texture",
"global",
"global.texture",
"global",
"global",
"global.texture",
"global",
]
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, static_memory_scope
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_to_3_channels(remote, target, dtype):
input_shape = (1, 256, 200, 200)
filter_shape = (3, 256, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
D = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
out_dtype=dtype,
channels=3,
kernel_size=(1, 1),
)
mod = relay.Function([A, B], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
initializer("weight", filter_data)
params1 = {
"weight": tvm.nd.array(filter_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, [])
if __name__ == "__main__":
tvm.testing.main()
| 44,121 | 32.324773 | 608 | py |
tvm | tvm-main/tests/python/relay/opencl_texture/test_reduction_texture.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from tvm.contrib import utils
from utils.adreno_utils import gpu_preprocess, build_run_compare
dtype = tvm.testing.parameter("float32")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_mean(remote, target, dtype):
# NCHW
input_shape = (1, 3, 720, 1280)
A = relay.var("data", shape=input_shape, dtype=dtype)
mean = relay.mean(A, axis=1, keepdims=True)
mod = relay.Function([A], mean)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_argmax(remote, target, dtype):
# NCHW
input_shape = (1, 3, 720, 1280)
A = relay.var("data", shape=input_shape, dtype=dtype)
argmax = relay.op.argmax(A, axis=[1])
mod = relay.Function([A], argmax)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_reduction_max(remote, target, dtype):
# NCHW
input_shape = (1, 3, 720, 1280)
A = relay.var("data", shape=input_shape, dtype=dtype)
argmax = relay.op.max(A, axis=[1])
mod = relay.Function([A], argmax)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_mean_nd4(remote, target, dtype):
# NCHW
input_shape = (1, 3, 729, 729)
A = relay.var("data", shape=input_shape, dtype=dtype)
mean = relay.mean(A, axis=1, keepdims=True)
mod = relay.Function([A], mean)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_argmax_nd4(remote, target, dtype):
# NCHW
input_shape = (1, 3, 729, 729)
A = relay.var("data", shape=input_shape, dtype=dtype)
argmax = relay.op.argmax(A, axis=[1])
mod = relay.Function([A], argmax)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_reduction_max_nd4(remote, target, dtype):
# NCHW
input_shape = (1, 3, 729, 729)
A = relay.var("data", shape=input_shape, dtype=dtype)
argmax = relay.op.max(A, axis=[1])
mod = relay.Function([A], argmax)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_mean_b4(remote, target, dtype):
# NCHW
input_shape = (1, 3, 720, 320, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
mean = relay.mean(A, axis=1, keepdims=True)
mod = relay.Function([A], mean)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_argmax_b4(remote, target, dtype):
# NCHW
input_shape = (1, 3, 720, 320, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
argmax = relay.op.argmax(A, axis=[1])
mod = relay.Function([A], argmax)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_reduction_max_b4(remote, target, dtype):
# NCHW
input_shape = (1, 3, 720, 320, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
argmax = relay.op.max(A, axis=[1])
mod = relay.Function([A], argmax)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_mean_global_pooling(remote, target, dtype):
"""
Use case of blocked NCHW4c global pooling with big spatial valies
"""
input_shape = (1, 160, 160, 32)
A = relay.var("data", shape=input_shape, dtype=dtype)
mean = relay.mean(A, axis=[1, 2], keepdims=True)
mod = relay.Function([A], mean)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_mean_global_pooling_block4(remote, target, dtype):
"""
Use case of blocked NCHW4c global pooling with big spatial valies
"""
input_shape = (1, 160, 160, 8, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
mean = relay.mean(A, axis=[1, 2], keepdims=True)
mod = relay.Function([A], mean)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_max_global_pooling_block4(remote, target, dtype):
"""
Use case of blocked NCHW4c global pooling with big spatial valies
"""
input_shape = (1, 160, 160, 8, 4)
A = relay.var("data", shape=input_shape, dtype=dtype)
mean = relay.max(A, axis=[1, 2], keepdims=True)
mod = relay.Function([A], mean)
build_run_compare(remote, mod, {}, {"data": input_shape}, {"data": dtype}, target)
if __name__ == "__main__":
tvm.testing.main()
| 6,241 | 33.296703 | 86 | py |
tvm | tvm-main/tests/python/relay/opencl_texture/test_depthwise_conv2d_nchw_texture.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from utils.adreno_utils import gpu_preprocess, build_run_compare
dtype = tvm.testing.parameter("float32")
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_bias_nchwc(remote, target, dtype):
input_shape = (1, 64, 112, 112)
filter_shape = (64, 1, 3, 3)
bias_shape = (1, 64, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[2, 2],
out_dtype=dtype,
channels=64,
groups=64,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, [], gpu_preprocess
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_nchwc(remote, target, dtype):
input_shape = (1, 64, 112, 112)
filter_shape = (64, 1, 3, 3)
bias_shape = (1, 64, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[2, 2],
out_dtype=dtype,
channels=64,
groups=64,
kernel_size=(3, 3),
)
mod = relay.Function([A, B], conv)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
params1 = {
"weight": tvm.nd.array(filter_data),
}
build_run_compare(
remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, [], gpu_preprocess
)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_bias_nchw(remote, target, dtype):
input_shape = (1, 64, 112, 112)
filter_shape = (64, 1, 3, 3)
bias_shape = (1, 64, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[2, 2],
out_dtype=dtype,
channels=64,
groups=64,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_depthwise_conv2d_repack_bias_nchw(remote, target, dtype):
input_shape = (1, 63, 112, 112)
filter_shape = (63, 1, 3, 3)
bias_shape = (1, 63, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
bias = relay.var("bias", shape=bias_shape, dtype=dtype)
# C = relay.nn.relu(A)
conv = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[1, 1, 1, 1],
strides=[2, 2],
out_dtype=dtype,
channels=63,
groups=63,
kernel_size=(3, 3),
)
D = relay.op.add(conv, bias)
D = relay.op.nn.relu(D)
mod = relay.Function([A, B, bias], D)
# mod, params = relay.testing.init.create_workload(func)
np.random.seed(1)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
bias_data = np.zeros(bias_shape).astype(dtype)
initializer("weight", filter_data)
initializer("bias", bias_data)
params1 = {
"weight": tvm.nd.array(filter_data),
"bias": tvm.nd.array(bias_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target)
@tvm.testing.requires_opencl
@tvm.testing.parametrize_targets("opencl -device=adreno")
def test_conv2d_to_3_channels(remote, target, dtype):
input_shape = (1, 3, 200, 200)
filter_shape = (3, 1, 1, 1)
A = relay.var("data", shape=input_shape, dtype=dtype)
B = relay.var("weight", shape=filter_shape, dtype=dtype)
D = relay.nn.conv2d(
A,
B,
data_layout="NCHW",
kernel_layout="OIHW",
padding=[0, 0, 0, 0],
out_dtype=dtype,
channels=3,
groups=3,
kernel_size=(1, 1),
)
mod = relay.Function([A, B], D)
np.random.seed(0)
initializer = relay.testing.init.Xavier()
filter_data = np.zeros(filter_shape).astype(dtype)
initializer("weight", filter_data)
params1 = {
"weight": tvm.nd.array(filter_data),
}
build_run_compare(remote, mod, params1, {"data": input_shape}, {"data": dtype}, target, [])
if __name__ == "__main__":
tvm.testing.main()
| 7,157 | 30.394737 | 96 | py |
tvm | tvm-main/tests/python/relay/opencl_texture/utils/adreno_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utils for adreno compute/schedules"""
import os
import tvm
import numpy as np
from tvm import relay
from tvm import autotvm
from tvm import rpc
from tvm.contrib import utils, ndk
from tvm.relay import testing
from tvm.relay.transform import recast
from tvm.contrib import graph_runtime
import json
def get_cpu_reference(mod, params1, input_shape, inputs):
mod_fp32 = recast(mod, "float32", "float32", ops=["nn.conv2d", "add", "nn.relu"])
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod_fp32, "llvm", params=params1)
ctx = tvm.cpu()
m = graph_runtime.create(graph, lib, ctx)
if isinstance(input_shape, dict):
for key in input_shape:
m.set_input(key, inputs[-1])
else:
m.set_input("data", inputs[-1])
m.set_input(**params)
m.run()
return [
m.get_output(0).asnumpy(),
]
# build module run with opencl and cpu, compare results
def build_run_compare(
remote,
tvm_mod,
params1,
input_shape,
dtypes,
target="llvm",
static_mem_scopes=[],
gpu_preprocess=None,
stat_file=None,
):
if remote is None:
target_host = "llvm"
else:
target_host = "llvm -mtriple=arm64-linux-android"
if gpu_preprocess:
tvm_mod_nchwc = gpu_preprocess(tvm_mod)
else:
tvm_mod_nchwc = tvm_mod
if stat_file is not None:
with autotvm.apply_history_best(stat_file):
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(
tvm_mod_nchwc, target_host=target_host, target=target, params=params1
)
else:
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(
tvm_mod_nchwc, target_host=target_host, target=target, params=params1
)
# verification that storage_scope has expected textures scopes
graph_json = json.loads(graph)
if "storage_scope" in graph_json["attrs"]:
assert (
len(static_mem_scopes) == len(graph_json["attrs"]["storage_scope"][1])
or len(static_mem_scopes) == 0
)
else:
assert len(static_mem_scopes) == 0
for i in range(0, len(static_mem_scopes)):
assert static_mem_scopes[i] == graph_json["attrs"]["storage_scope"][1][i]
if remote is None:
ctx = tvm.opencl()
m = graph_runtime.create(graph, lib, ctx)
else:
temp = utils.tempdir()
dso_binary = "dev_lib_cl.so"
dso_binary_path = temp.relpath(dso_binary)
ctx = remote.cl(0)
lib.export_library(dso_binary_path, ndk.create_shared)
remote.upload(dso_binary_path)
rlib = remote.load_module(dso_binary)
m = graph_runtime.create(graph, rlib, ctx)
m.set_input(**params)
inputs = []
for key in input_shape:
inputs.append(np.random.normal(size=input_shape[key]).astype(dtypes[key]))
m.set_input(key, inputs[-1])
m.run()
ref_outputs = get_cpu_reference(tvm_mod, params1, input_shape, inputs)
for i, ref_output in enumerate(ref_outputs):
tvm_output = m.get_output(i)
output = tvm_output.asnumpy()
np.testing.assert_allclose(output, ref_output, rtol=1e-1, atol=1e-1)
return graph
def gpu_preprocess(tvm_mod):
layout_config = relay.transform.LayoutConfig()
desired_layouts = {"nn.conv2d": ["NCHW4c", "OIHW4o"]}
with layout_config:
seq = tvm.transform.Sequential([relay.transform.ConvertLayout(desired_layouts)])
with tvm.transform.PassContext(opt_level=3):
mod = tvm.IRModule.from_expr(tvm_mod)
tvm_mod_nchwc = seq(mod)
return tvm_mod_nchwc
def get_model(url, local_file, module):
def get_tensor_type_str(tensor_type):
"""Get tensor type string representation when given TFLite tensor type"""
try:
from tflite.TensorType import TensorType
except ImportError:
raise ImportError("The tflite package must be installed")
if tensor_type == TensorType.INT8:
return "int8"
if tensor_type == TensorType.INT16:
return "int16"
if tensor_type == TensorType.UINT8:
return "uint8"
if tensor_type == TensorType.FLOAT16:
return "float16"
if tensor_type == TensorType.FLOAT32:
return "float32"
if tensor_type == TensorType.INT32:
return "int32"
if tensor_type == TensorType.INT64:
return "int64"
if tensor_type == TensorType.BOOL:
return "bool"
raise NotImplementedError(
"Tensor type {} is currently not supported".format(str(tensor_type))
)
if url is None:
model_path = local_file
else:
model_path = tvm.contrib.download.download_testdata(url, local_file, module=module)
with open(model_path, "rb") as f:
tflite_model_buf = f.read()
try:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except ImportError:
raise ImportError("The tflite package must be installed")
# keep the same as tflite
assert tflite_model.SubgraphsLength() == 1, "only support one subgraph (main subgraph)"
subgraph = tflite_model.Subgraphs(0)
# model inputs
model_inputs = subgraph.InputsAsNumpy()
shape_dict = {}
dtype_dict = {}
for model_input in model_inputs:
model_input_name = subgraph.Tensors(model_input).Name().decode("utf-8")
model_shape_length = subgraph.Tensors(model_input).ShapeLength()
model_input_shape = [
subgraph.Tensors(model_input).Shape(i) for i in range(model_shape_length)
]
shape_dict[model_input_name] = model_input_shape
dtype_dict[model_input_name] = get_tensor_type_str(subgraph.Tensors(model_input).Type())
# model Outputs
model_outputs = subgraph.OutputsAsNumpy()
shape_dict_out = {}
dtype_dict_out = {}
for model_output in model_outputs:
model_output_name = subgraph.Tensors(model_output).Name().decode("utf-8")
model_shape_length = subgraph.Tensors(model_output).ShapeLength()
model_output_shape = [
subgraph.Tensors(model_output).Shape(i) for i in range(model_shape_length)
]
shape_dict_out[model_output_name] = model_output_shape
dtype_dict_out[model_output_name] = get_tensor_type_str(
subgraph.Tensors(model_input).Type()
)
mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict
)
layout_config = relay.transform.LayoutConfig(skip_layers=[])
desired_layouts = {"nn.conv2d": ["NCHW", "default"]}
seq = tvm.transform.Sequential([relay.transform.ConvertLayout(desired_layouts)])
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
return mod, params, shape_dict, dtype_dict
| 7,915 | 34.026549 | 96 | py |
tvm | tvm-main/tests/python/relay/backend/test_pass_lower_te.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Exercises the LowerTE pass.
import tvm
import tvm.testing
import logging
logging.basicConfig()
logger = logging.getLogger("test_pass_lower_te")
logger.setLevel(logging.INFO)
# Since the TE compiler needs a good refactor it has not been exposed as a 'standard' pass
# in relay.transform. For testing grab it directly.
LowerTE = tvm._ffi.get_global_func("relay.tec.LowerTE")
def transform(mod):
logger.info("Starting module:\n%s", mod)
host_target = tvm.target.Target("llvm")
prim_target = tvm.target.Target("llvm", host=host_target)
ctxt = tvm.transform.PassContext()
config = tvm.target.make_compilation_config(ctxt, prim_target)
mod = tvm.relay.transform.PlanDevices(config)(mod)
mod = tvm.relay.transform.InferType()(mod)
mod = LowerTE("test", config)(mod)
mod = tvm.relay.transform.InferType()(mod)
logger.info("After LowerTE:\n%s", mod)
return mod
# All attempts to use structural equalty tests against an expected IRModule parsed from
# Relay text were thwarted by the difficulty of setting up the expected call_lower attributes
# with the right GlobalVar instances. So the following assert structural correctness the hard way.
def test_lower_primitive():
input_mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = fn(%x : Tensor[(5, 7), float32], %y : Tensor[(5, 7), float32], Primitive=1) -> Tensor[(5, 7), float32] {
add(%x, %y)
};
%0(%a, %a)
}
""",
"from_string",
None,
None,
)
actual_mod = transform(input_mod)
# Expected:
# def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
# %0 = (%a, %a);
# call_lowered(@test_fused_add, %0, metadata={relay_attrs={Primitive=1},all_prim_fn_vars=[@test_fused_add]})
# }
# def @test_fused_add = <lowered PrimFunc>
main = actual_mod["main"]
call = main.body
assert call.op.name == "call_lowered"
assert len(call.args) == 2
assert call.args[0].name_hint == "test_fused_add"
assert len(call.args[1].fields) == 2
assert call.args[1].fields[0].name_hint == "a"
assert call.args[1].fields[1].name_hint == "a"
assert call.attrs.metadata["relay_attrs"].Primitive == 1
assert len(call.attrs.metadata["all_prim_fn_vars"]) == 1
assert call.attrs.metadata["all_prim_fn_vars"][0].name_hint == "test_fused_add"
test_fused_add = actual_mod["test_fused_add"]
assert isinstance(test_fused_add, tvm.tir.PrimFunc)
def test_lower_compiler():
@tvm._ffi.register_func("relay.ext.test_pass_lower_te")
def relay_ext_test_pass_lower_te(func):
return None
input_mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = fn(%x : Tensor[(5, 7), float32], %y : Tensor[(5, 7), float32], Primitive=1, Compiler="test_pass_lower_te", global_symbol="test_add") -> Tensor[(5, 7), float32] {
add(%x, %y)
};
%0(%a, %a)
}
""",
"from_string",
None,
None,
)
actual_mod = transform(input_mod)
# Expected:
# def @main(%a : Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
# %0 = (%a, %a)
# call_lowered(@test_add , %0, metadata={relay_attrs={Primitive=1, Compiler="test_pass_lower_te", global_symbol="test_add"}}, all_prim_fn_vars=[]})
# }
# def @test_add(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32], Extern=1) -> Tensor[(5, 7), float32] {
# add(%x, %y)
# }
main = actual_mod["main"]
call = main.body
assert call.op.name == "call_lowered"
assert len(call.args) == 2
assert call.args[0].name_hint == "test_add"
assert len(call.args[1].fields) == 2
assert call.args[1].fields[0].name_hint == "a"
assert call.args[1].fields[1].name_hint == "a"
assert call.attrs.metadata["relay_attrs"].Primitive == 1
assert call.attrs.metadata["relay_attrs"].Compiler == "test_pass_lower_te"
assert call.attrs.metadata["relay_attrs"].global_symbol == "test_add"
assert len(call.attrs.metadata["all_prim_fn_vars"]) == 0
test_add = actual_mod["test_add"]
assert isinstance(test_add, tvm.relay.Function)
assert test_add.attrs["Extern"] == 1
def test_lower_extern():
input_mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
@my_add(%a, %a)
}
def @my_add(%x : Tensor[(5, 7), float32], %y : Tensor[(5, 7), float32], Extern=1) -> Tensor[(5, 7), float32] {
add(%x, %y)
}
""",
"from_string",
None,
None,
)
actual_mod = transform(input_mod)
# Expected:
# def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
# %0 = (%a, %a);
# call_lowered(@my_add, %0, metadata={relay_attrs={Extern=1}}, all_prim_fn_vars=[]})
# }
# def @my_add(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32], Extern=1) -> Tensor[(5, 7), float32] {
# add(%x, %y)
# }
main = actual_mod["main"]
call = main.body
assert call.op.name == "call_lowered"
assert len(call.args) == 2
assert call.args[0].name_hint == "my_add"
assert len(call.args[1].fields) == 2
assert call.args[1].fields[0].name_hint == "a"
assert call.args[1].fields[1].name_hint == "a"
assert call.attrs.metadata["relay_attrs"].Extern == 1
assert len(call.attrs.metadata["all_prim_fn_vars"]) == 0
test_add = actual_mod["my_add"]
assert isinstance(test_add, tvm.relay.Function)
assert test_add.attrs["Extern"] == 1
def test_lower_extern_with_dynamic_shape():
input_mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(?, ?), float32] {
@my_dyn(%a, %a)
}
def @my_dyn(%x : Tensor[(5, 7), float32], %y : Tensor[(5, 7), float32], Extern=1) -> Tensor[(?, ?), float32] {
add(%x, %y)
}
""",
"from_string",
None,
None,
)
actual_mod = transform(input_mod)
# Expected:
# def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(?, ?), float32] {
# %0 = (%a, %a);
# call_lowered(@my_dyn, %0, metadata={prim_shape_fn_var='test_shape_func_add', relay_attrs={Extern=1}, prim_shape_fn_states=[2, 2], prim_shape_fn_num_inputs=2, all_prim_shape_fn_vars=['shape_func_add'], prim_shape_fn_num_outputs=1, all_prim_fn_vars=[]})
# }
# def @my_dyn(%x: Tensor[(5, 7), float32] , %y: Tensor[(5, 7), float32] , Extern=1) -> Tensor[(?, ?), float32] {
# add(%x, %y)
# }
# def @test_shape_func_add = <shape PrimFunc>
main = actual_mod["main"]
call = main.body
assert call.op.name == "call_lowered"
assert len(call.args) == 2
assert call.args[0].name_hint == "my_dyn"
assert len(call.args[1].fields) == 2
assert call.args[1].fields[0].name_hint == "a"
assert call.args[1].fields[1].name_hint == "a"
assert call.attrs.metadata["prim_shape_fn_var"].name_hint == "test_shape_func_add"
assert call.attrs.metadata["relay_attrs"].Extern == 1
assert len(call.attrs.metadata["prim_shape_fn_states"]) == 2
assert call.attrs.metadata["prim_shape_fn_states"][0] == 2
assert call.attrs.metadata["prim_shape_fn_states"][1] == 2
assert call.attrs.metadata["prim_shape_fn_num_inputs"] == 2
assert len(call.attrs.metadata["all_prim_shape_fn_vars"]) == 1
assert call.attrs.metadata["all_prim_shape_fn_vars"][0].name_hint == "test_shape_func_add"
assert call.attrs.metadata["prim_shape_fn_num_outputs"] == 1
assert len(call.attrs.metadata["all_prim_fn_vars"]) == 0
my_dyn = actual_mod["my_dyn"]
assert isinstance(my_dyn, tvm.relay.Function)
assert my_dyn.attrs["Extern"] == 1
shape_func_add = actual_mod["test_shape_func_add"]
assert isinstance(shape_func_add, tvm.tir.PrimFunc)
if __name__ == "__main__":
tvm.testing.main()
| 8,959 | 36.024793 | 259 | py |
tvm | tvm-main/tests/python/relay/backend/test_pass_remove_standalone_reshapes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Exercises the RemoveStandaloneReshapes pass.
import tvm
from tvm import relay
from tvm.relay.expr_functor import ExprMutator
import tvm.testing
from tvm.script import tir as T
HOST_DEVICE = tvm.device("cpu")
HOST_TARGET = tvm.target.Target("llvm")
CPU_DEVICE = tvm.device("cpu")
CPU_TARGET = tvm.target.Target("llvm").with_host(HOST_TARGET)
CPU = tvm.target.VirtualDevice(CPU_DEVICE, CPU_TARGET) # device_type=1
RemoveStandaloneReshapes = tvm._ffi.get_global_func("relay._transform.RemoveStandaloneReshapes")
class MarkReshapeOnlyMutator(ExprMutator):
"""A pass for marking call_lowered as ReshapeOnly where reshapes exist unfused"""
def __init__(self):
ExprMutator.__init__(self)
def visit_call(self, call):
if isinstance(call.args[0], tvm.ir.GlobalVar) and "reshape" in call.args[0].name_hint:
# attrs = {"relay_attrs" : {"relay.reshape_only" : 1}}
dict_attrs = tvm.ir.make_node("DictAttrs", **{"relay.reshape_only": 1})
attrs = tvm.ir.make_node(
"relay.attrs.CallLoweredAttrs", **{"metadata": {"relay_attrs": dict_attrs}}
)
return relay.Call(call.op, call.args, attrs)
return super().visit_call(call)
# Reshape should not be removed if its the first layer in the network
def test_first_reshape():
mod = tvm.ir.IRModule()
@T.prim_func
def reshape_primfunc(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j in T.grid(128, 128):
D[i, j] = A[i, j]
metatable = {"VirtualDevice": [CPU]}
reshape_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
reshape_gv = relay.GlobalVar("reshape", type_annot=reshape_ty)
mod[reshape_gv] = reshape_primfunc
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
virtual_device=meta[VirtualDevice][0]) {
%1 = call_lowered(@reshape, (%x,) );
let %x_14: Tensor[(128, 128), float32] = on_device(%1, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%x_14
}
""",
"from_string",
mod,
metatable,
)
mod["main"] = MarkReshapeOnlyMutator().visit(mod["main"])
mod = RemoveStandaloneReshapes()(mod)
reshapes_present = any(["reshape" in gv.name_hint for gv in mod.get_global_vars()])
assert reshapes_present, "Reshape should have been removed."
return
# When reshape layer is the last one in the network
def test_last_reshape():
mod = tvm.ir.IRModule()
@T.prim_func
def mul_primfunc(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
D[vi, vj] = A[vi, vk] * B[vj, vk]
@T.prim_func
def reshape_primfunc(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j in T.grid(128, 128):
D[i, j] = A[i, j]
metatable = {"VirtualDevice": [CPU]}
mul_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
mul_gv = relay.GlobalVar("multiply", type_annot=mul_ty)
mod[mul_gv] = mul_primfunc
reshape_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
reshape_gv = relay.GlobalVar("reshape", type_annot=reshape_ty)
mod[reshape_gv] = reshape_primfunc
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
%y {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
%z {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
virtual_device=meta[VirtualDevice][0]) {
%0 = call_lowered(@multiply, (%x, %y, %z));
let %x_12: Tensor[(128, 128), float32] = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%1 = call_lowered(@reshape, (%x_12,) );
let %x_14: Tensor[(128, 128), float32] = on_device(%1, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%x_14
}
""",
"from_string",
mod,
metatable,
)
# Expected main:
##[version = "0.0.5"]
# def @main(%x /* ty=Tensor[(128, 128), float32] */) -> Tensor[(128, 128), float32] {
# %0 = (%x, %y, %z);
# %1 = call_lowered(@multiply, %0);
# let %x_12: Tensor[(128, 128), float32] = on_device(%1, constrain_result=True);
# let %x_14: Tensor[(128, 128), float32] = on_device(%1, constrain_result=True);
# %x_14
# }
mod["main"] = MarkReshapeOnlyMutator().visit(mod["main"])
mod = RemoveStandaloneReshapes()(mod)
reshapes_present = any(["reshape" in gv.name_hint for gv in mod.get_global_vars()])
assert not reshapes_present, "Reshape should have been removed."
return
# When reshape layer is not marked as reshape_only
def test_fused_reshape():
mod = tvm.ir.IRModule()
@T.prim_func
def mul_primfunc(a: T.handle, b: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
B = T.match_buffer(b, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j, k in T.grid(128, 128, 128):
with T.block("update"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
D[vi, vj] = A[vi, vk] * B[vj, vk]
@T.prim_func
def fused_reshape_primfunc(a: T.handle, d: T.handle) -> None:
A = T.match_buffer(a, [128, 128])
D = T.match_buffer(d, [128, 128])
for i, j in T.grid(128, 128):
D[i, j] = A[i, j]
metatable = {"VirtualDevice": [CPU]}
mul_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
mul_gv = relay.GlobalVar("multiply", type_annot=mul_ty)
mod[mul_gv] = mul_primfunc
reshape_ty = relay.FuncType(
[
relay.TensorType((128, 128), "float32"),
],
relay.TensorType((128, 128), "float32"),
)
reshape_gv = relay.GlobalVar("fused_reshape", type_annot=reshape_ty)
mod[reshape_gv] = fused_reshape_primfunc
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
%y {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
%z {virtual_device=meta[VirtualDevice][0]}: Tensor[(128, 128), float32],
virtual_device=meta[VirtualDevice][0]) {
%0 = call_lowered(@multiply, (%x, %y, %z));
let %x_12: Tensor[(128, 128), float32] = on_device(%0, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%1 = call_lowered(@fused_reshape, (%x_12,) );
let %x_14: Tensor[(128, 128), float32] = on_device(%1, virtual_device=meta[VirtualDevice][0], constrain_result=True);
%x_14
}
""",
"from_string",
mod,
metatable,
)
# Expected main:
##[version = "0.0.5"]
# def @main(%x /* ty=Tensor[(128, 128), float32] */) -> Tensor[(128, 128), float32] {
# %0 = (%x, %y, %z);
# %1 = call_lowered(@multiply, %0);
# let %x_12: Tensor[(128, 128), float32] = on_device(%1, constrain_result=True);
# let %x_14: Tensor[(128, 128), float32] = on_device(%1, constrain_result=True);
# %x_14
# }
mod = RemoveStandaloneReshapes()(mod)
reshapes_present = any(["reshape" in gv.name_hint for gv in mod.get_global_vars()])
assert reshapes_present, "Reshape should have been removed."
return
if __name__ == "__main__":
tvm.testing.main()
| 9,340 | 34.789272 | 127 | py |
tvm | tvm-main/tests/python/relay/qnn/test_clip_legalization.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test that do-nothing requantize -> clip operators are removed during legalization."""
import numpy as np
import pytest
import tvm
from tvm import nd, relay
from tvm.relay import transform
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def tvm_const(obj):
return relay.Constant(nd.array(obj))
@pytest.mark.parametrize(
"dtype,min_val,max_val,is_redundant",
[
("int8", -128, 127, True),
("int8", -127, 127, False),
("int16", -128, 127, False),
("int32", -2147483648, 2147483647, True),
],
)
def test_removes_redundant_requantize_clip_ops(dtype, min_val, max_val, is_redundant):
"""Test that qnn.requantize -> clip sequences are removed during legalization if the bounds of
the clip operator match the min and max values of the data type."""
input_var = relay.var("input", shape=(1, 3, 3, 4), dtype="int32")
out = relay.qnn.op.requantize(
input_var,
tvm_const(np.float32(1.0)),
tvm_const(np.int32(0)),
tvm_const(np.float32(1.0)),
tvm_const(np.int32(-128)),
axis=3,
out_dtype=dtype,
)
out = relay.clip(out, a_min=min_val, a_max=max_val)
func = relay.Function([input_var], out)
unmodified = run_opt_pass(func, transform.InferType())
legalized = run_opt_pass(func, transform.Legalize())
# Check that the clip op was removed if and only if `is_redundant` is True.
if is_redundant:
assert legalized.body.op.name == "qnn.requantize"
assert not tvm.ir.structural_equal(unmodified, legalized)
else:
assert legalized.body.op.name == "clip"
tvm.ir.assert_structural_equal(unmodified, legalized)
def test_ignores_standalone_clip_ops():
"""The legalization pass should only affect qnn.requantize -> clip sequences, and should leave
standalone clip operators untouched."""
input_var = relay.var("x", shape=(1, 3, 3, 4), dtype="int8")
out = relay.clip(input_var, a_min=-128, a_max=127)
func = relay.Function([input_var], out)
unmodified = run_opt_pass(func, transform.InferType())
legalized = run_opt_pass(func, transform.Legalize())
tvm.ir.assert_structural_equal(unmodified, legalized)
| 3,282 | 36.306818 | 98 | py |
tvm | tvm-main/tests/python/relay/qnn/test_qnn_channel_stripping.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test QNN channel stripping legalization pass."""
import numpy as np
import tvm
from tvm import nd, relay
from tvm.relay import transform
from tvm.relay.testing.temp_op_attr import TempOpAttr
from tvm.testing.aot import generate_ref_data
from tvm.topi.arm_cpu.qnn_legalize import legalize_bias_add
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def execute_relay_func(relay_func, in_data):
ref_module = tvm.IRModule.from_expr(relay_func)
return generate_ref_data(ref_module, {"input": in_data})["output"]
def tvm_const(obj):
return relay.Constant(nd.array(obj))
def make_test_conv_depthwise_conv():
"""Generates a convolution -> depthwise_convolution -> convolution pattern that can have
channels stripped. The structure here mirrors MobileNetV1's layers 8-10."""
input_var = relay.var("input", shape=(1, 12, 12, 4), dtype="int8")
kernel_1 = np.array(
[[0, 1, 0, -2], [0, 3, 0, 5], [0, 5, 0, -9], [0, 2, 0, 21]], dtype="int8"
).reshape((1, 1, 4, 4))
input_scale_1 = np.float32(0.5)
output_scale_1 = np.array([0.5, 2.0, 0.25, 4.0], dtype="float32")
out = relay.qnn.op.conv2d(
input_var,
tvm_const(kernel_1),
tvm_const(np.int32(-128)),
tvm_const(np.int32(0)),
tvm_const(input_scale_1),
tvm_const(output_scale_1),
channels=4,
kernel_size=(1, 1),
padding=(0, 0),
data_layout="NHWC",
kernel_layout="HWIO",
)
bias_1 = np.array([198, -2, 19, 10], dtype="int32")
out = relay.nn.bias_add(
out,
tvm_const(bias_1),
axis=3,
)
input_scale_2 = np.float32(0.25)
out = relay.qnn.op.requantize(
out,
tvm_const(input_scale_1 * output_scale_1),
tvm_const(np.int32(0)),
tvm_const(input_scale_2),
tvm_const(np.int32(-128)),
axis=3,
out_dtype="int8",
)
# Outputs here will be fixed to {0: 70, 2: -118}
kernel_2 = np.array(
[
[0, 6, 4, 2],
[8, 6, -3, -1],
[-2, -5, 3, -8],
[-7, 5, 1, 9],
[-4, -9, -8, -2],
[-1, 4, -5, 3],
[-4, -9, 2, 6],
[9, -6, 0, 5],
[-3, 8, 1, -7],
],
dtype="int8",
).reshape((3, 3, 4, 1))
output_scale_2 = np.array([0.25, 0.125, 2.0, 0.125], dtype="float32")
out = relay.qnn.op.conv2d(
out,
tvm_const(kernel_2),
tvm_const(np.int32(-128)),
tvm_const(np.int32(0)),
tvm_const(input_scale_2),
tvm_const(output_scale_2),
channels=4,
groups=4,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWOI",
)
bias_2 = np.array([4582, 4, -12, 15], dtype="int32")
out = relay.nn.bias_add(
out,
tvm_const(bias_2),
axis=3,
)
input_scale_3 = np.float32(0.125)
out = relay.qnn.op.requantize(
out,
tvm_const(input_scale_2 * output_scale_2),
tvm_const(np.int32(0)),
tvm_const(input_scale_3),
tvm_const(np.int32(-128)),
axis=3,
out_dtype="int8",
)
# Outputs here will be fixed to {0: 127, 2: -128}
kernel_3 = np.array(
[[4, -2, 9, 9], [0, 0, 0, 0], [0, 0, 0, 0], [-1, 1, -1, 1]], dtype="int8"
).reshape((1, 1, 4, 4))
output_scale_3 = np.array([0.25, 0.125, 1.0, 0.5], dtype="float32")
out = relay.qnn.op.conv2d(
out,
tvm_const(kernel_3),
tvm_const(np.int32(-128)),
tvm_const(np.int32(0)),
tvm_const(input_scale_3),
tvm_const(output_scale_3),
channels=4,
kernel_size=(1, 1),
padding=(0, 0),
data_layout="NHWC",
kernel_layout="HWIO",
)
bias_3 = np.array([1, -1, 4, 6], dtype="int32")
out = relay.nn.bias_add(
out,
tvm_const(bias_3),
axis=3,
)
return relay.Function([input_var], out)
def make_test_conv_pool_dense():
"""Generates a convolution -> pool -> dense pattern that can have channels stripped. The
structure here mirrors MobileNetV1's final few layers."""
input_var = relay.var("input", shape=(1, 3, 3, 4), dtype="int8")
kernel = np.array(
[[0, 1, 0, -2], [0, 3, 0, 5], [0, 5, 0, -9], [0, 2, 0, 21]], dtype="int8"
).reshape((1, 1, 4, 4))
input_scale = np.float32(0.029626124)
output_scale = np.array([0.5, 2.0, 0.25, 4.0], dtype="float32")
out = relay.qnn.op.conv2d(
input_var,
tvm_const(kernel),
tvm_const(np.int32(-128)),
tvm_const(np.int32(0)),
tvm_const(input_scale),
tvm_const(output_scale),
channels=4,
kernel_size=(1, 1),
padding=(0, 0),
data_layout="NHWC",
kernel_layout="HWIO",
)
bias_1 = np.array([198, -2, 19, 10], dtype="int32")
out = relay.nn.bias_add(
out,
tvm_const(bias_1),
axis=3,
)
out = relay.qnn.op.requantize(
out,
tvm_const(input_scale * output_scale),
tvm_const(np.int32(0)),
tvm_const(np.float32(0.015656913)),
tvm_const(np.int32(-128)),
axis=3,
out_dtype="int8",
)
out = relay.cast(out, dtype="int32")
out = relay.nn.avg_pool2d(
out,
pool_size=[3, 3],
strides=[3, 3],
layout="NHWC",
)
out = relay.cast(out, dtype="int8")
# The channel stripping logic expects two reshape operators
out = relay.reshape(out, newshape=[-1, 4])
out = relay.reshape(out, newshape=[-1, 4])
dense_weights = np.array([[15, -2, -3, 11], [12, -10, 13, -10]], dtype="int8")
out = relay.qnn.op.dense(
out,
tvm_const(dense_weights),
tvm_const(np.int32(-128)),
tvm_const(np.int32(0)),
tvm_const(np.float32(0.015656913)),
tvm_const(np.float32(0.0047202893)),
units=2,
out_dtype="int32",
)
dense_bias = np.array([1463, -1463], dtype="int32")
out = relay.nn.bias_add(
out,
tvm_const(dense_bias),
axis=1,
)
return relay.Function([input_var], out)
def test_conv_depthwise_conv():
"""Make sure that qnn_legalize.py is able to detect and remove empty output channels from a
convolution -> depthwise convolution -> convolution pattern by folding into a bias_add op."""
original = make_test_conv_depthwise_conv()
with TempOpAttr("nn.bias_add", "FTVMLegalize", legalize_bias_add):
unoptimized = run_opt_pass(original, transform.InferType())
optimized = run_opt_pass(original, transform.Legalize())
# Inputs and outputs should be unmodified by channel stripping
assert unoptimized.checked_type == optimized.checked_type
# Make sure 2/4 channels were removed by channel stripping
assert tuple(unoptimized.body.args[0].args[0].checked_type.shape) == (1, 12, 12, 4)
assert tuple(optimized.body.args[0].args[0].checked_type.shape) == (1, 12, 12, 2)
# Make sure optimized and unoptimized versions behave identically
np.random.seed(12402) # Fix seed for repeatability
input_data = np.random.randint(-128, 128, size=(1, 12, 12, 4), dtype="int8")
unoptimized_output = execute_relay_func(unoptimized, np.copy(input_data))
optimized_output = execute_relay_func(optimized, np.copy(input_data))
np.testing.assert_array_equal(unoptimized_output, optimized_output)
def test_conv_pool_dense():
"""Make sure that qnn_legalize.py is able to detect and remove empty output channels from a
convolution -> avg_pool2d -> dense pattern by folding them into a bias_add op."""
original = make_test_conv_pool_dense()
with TempOpAttr("nn.bias_add", "FTVMLegalize", legalize_bias_add):
unoptimized = run_opt_pass(original, transform.InferType())
optimized = run_opt_pass(original, transform.Legalize())
# Inputs and outputs should be unmodified by channel stripping
assert unoptimized.checked_type == optimized.checked_type
# Make sure 2/4 channels were removed by channel stripping
assert tuple(unoptimized.body.args[0].args[0].checked_type.shape) == (1, 4)
assert tuple(optimized.body.args[0].args[0].checked_type.shape) == (1, 2)
# Make sure optimized and unoptimized versions behave identically
np.random.seed(12402) # Fix seed for repeatability
input_data = np.random.randint(-128, 128, size=(1, 3, 3, 4), dtype="int8")
unoptimized_output = execute_relay_func(unoptimized, np.copy(input_data))
optimized_output = execute_relay_func(optimized, np.copy(input_data))
np.testing.assert_array_equal(unoptimized_output, optimized_output)
| 9,754 | 31.516667 | 97 | py |
tvm | tvm-main/tests/python/relay/qnn/test_canonicalizations.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Callable
import numpy as np
from tvm import relay
from tvm.relay.qnn.op import canonicalizations
class TestIntegerTableLookupTable:
"""Consists of tests testing functionality of creating lookup tables for integer operations."""
def fake_identity_func_numpy(self, arr: np.ndarray):
return arr.astype("float32")
def fake_identity_func_relay(
self,
floating_point_func: Callable[[np.ndarray], np.ndarray],
input_arg=None,
in_scale=relay.const(1.0, dtype="float32"),
in_zero_point=relay.const(0, dtype="int32"),
out_scale=relay.const(1.0, dtype="float32"),
out_zero_point=relay.const(0, dtype="int32"),
in_axis=-1,
out_axis=-1,
in_dtype="uint8",
out_dtype="uint8",
):
if input_arg is None:
input_arg = relay.const(np.arange(0, 256, dtype="uint8").view(in_dtype))
return (
canonicalizations.create_integer_lookup_op(
input_arg=input_arg,
floating_point_func=floating_point_func,
in_scale=in_scale,
in_zero_point=in_zero_point,
out_scale=out_scale,
out_zero_point=out_zero_point,
in_axis=in_axis,
out_axis=out_axis,
in_dtype=in_dtype,
out_dtype=out_dtype,
),
input_arg.data.numpy(),
)
def dequantize_numpy(self, np_arr, np_scale=1.0, np_zero_point=0):
return (np_arr.astype("int32") - np_zero_point) * np_scale
def run_function_test(
self,
in_scale: float,
in_zero_point: int,
out_scale: float,
out_zero_point: int,
in_dtype: str,
out_dtype: str,
floating_point_func: Callable[[np.ndarray], np.ndarray],
input_arg: relay.Expr = None,
rtol=1e-7,
atol=0,
):
relay_lookup, input_arg = self.fake_identity_func_relay(
input_arg=input_arg,
floating_point_func=floating_point_func,
in_scale=relay.const(in_scale, "float32"),
in_zero_point=relay.const(in_zero_point, "int32"),
out_scale=relay.const(out_scale, "float32"),
out_zero_point=relay.const(out_zero_point, "int32"),
in_dtype=in_dtype,
out_dtype=out_dtype,
)
result = canonicalizations.run_const_expr(relay_lookup)
np.testing.assert_allclose(
floating_point_func(
self.dequantize_numpy(input_arg, np_scale=in_scale, np_zero_point=in_zero_point)
),
self.dequantize_numpy(result, np_scale=out_scale, np_zero_point=out_zero_point),
atol=atol,
rtol=rtol,
)
"""Test mapping between different input/output dtypes"""
def test_int8_to_int8(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=0,
out_scale=1.0,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
)
def test_uint8_to_uint8(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=128,
out_scale=1.0,
out_zero_point=128,
in_dtype="uint8",
out_dtype="uint8",
floating_point_func=self.fake_identity_func_numpy,
)
def test_int8_to_uint8(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=0,
out_scale=1.0,
out_zero_point=128,
in_dtype="int8",
out_dtype="uint8",
floating_point_func=self.fake_identity_func_numpy,
)
def test_uint8_to_int8(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=128,
out_scale=1.0,
out_zero_point=0,
in_dtype="uint8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
)
"""Test different input shapes"""
def test_keep_input_shapes(self):
# input in floating point ~[-2, 2], final output ~[0, 8]
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8").reshape([2, 2, 8, 8])),
in_scale=0.015,
in_zero_point=0,
out_scale=16 / 256,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
atol=0.03,
rtol=0.01,
)
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8").reshape([2, 2, 64])),
in_scale=0.015,
in_zero_point=0,
out_scale=16 / 256,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
atol=0.03,
rtol=0.01,
)
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8").reshape([2, 128])),
in_scale=0.015,
in_zero_point=0,
out_scale=16 / 256,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=self.fake_identity_func_numpy,
atol=0.03,
rtol=0.01,
)
"""Test mapping with different in/out qparams works."""
def test_different_in_out_qparams(self):
self.run_function_test(
in_scale=1.0,
in_zero_point=128,
out_scale=1.0,
out_zero_point=128,
in_dtype="uint8",
out_dtype="uint8",
floating_point_func=self.fake_identity_func_numpy,
atol=1, # numbers range from -128 -> 128 so not that big error
rtol=0,
)
"""Test some simple functions"""
def test_tanh(self):
# 1 / 64 in scale -- input range is ~ (-2, 2), tanh(+-2) ~= +-1
# 1 / 128 out_scale -- output range is ~(-1, 1)
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8")),
in_scale=1 / 64,
in_zero_point=0,
out_scale=1 / 128,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=np.tanh,
atol=0.01,
rtol=0.01,
)
def test_exp(self):
# input in floating point ~[-2, 2], final output ~[0, 8]
self.run_function_test(
input_arg=relay.const(np.arange(-128, 128).astype("int8")),
in_scale=0.015,
in_zero_point=0,
out_scale=16 / 256,
out_zero_point=0,
in_dtype="int8",
out_dtype="int8",
floating_point_func=np.exp,
atol=0.03,
rtol=0.01,
)
| 7,821 | 32.715517 | 99 | py |
tvm | tvm-main/tests/python/relay/collage/test_sub_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import logging
import tvm.testing
logging.basicConfig(level=logging.INFO)
partition_for_testing = tvm._ffi.get_global_func("relay.collage.PartitionForTesting")
def print_with_indexes(mod):
mod = tvm.relay.transform.CapturePostDfsIndexInSpans()(mod)
print(mod)
def run(in_mod, expected_mod, max_outputs, allow_taps, compiler, map):
expected_mod = tvm.relay.transform.InferType()(expected_mod)
in_mod = tvm.relay.transform.InferType()(in_mod)
in_mod = tvm.relay.transform.CapturePostDfsIndexInSpans()(in_mod)
indexes = [i for l, iss in map.items() for i in iss]
labels = [l for l, iss in map.items() for i in iss]
actual_mod = partition_for_testing(max_outputs, allow_taps, compiler, indexes, labels)(in_mod)
if not tvm.ir.structural_equal(actual_mod, expected_mod, True):
# Print everything in full so we can see what's going on when things fail.
print("Input module:")
print(in_mod)
print("Expected module:")
print(expected_mod)
print("Actual module:")
print(actual_mod)
# Assert again so as to see the actual disagreeing sub-expressions.
tvm.ir.assert_structural_equal(actual_mod, expected_mod, map_free_vars=True)
def test_single_op():
def input():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = add(%c, %d); // node 7
subtract(%0, %1)
}
"""
)
def expected():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b);
%1 = (fn(%x, %y, Compiler="foo") { add(%x, %y) })(%c, %d);
subtract(%0, %1)
}
"""
)
run(input(), expected(), 1, False, "foo", {"": [7]})
def test_multi_output():
def input():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = add(%a, %b); // node 6
%1 = add(%c, %d); // node 7
subtract(%0, %1)
}
"""
)
def expected():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32],
%c: Tensor[(5, 7), float32], %d: Tensor[(5, 7), float32]) {
%0 = (fn(%w, %x, %y, %z, Compiler="foo") { (add(%y, %z), add(%w, %x)) })(%c, %d, %a, %b);
%1 = %0.0;
%2 = %0.1;
subtract(%1, %2)
}
"""
)
# No rewrite since 2 outputs
run(input(), input(), 1, False, "foo", {"": [6, 7]})
# Rewrite
run(input(), expected(), 2, False, "foo", {"": [6, 7]})
def test_classic_conv2d_add_relu():
def input():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32],
%c: Tensor[(5, 2, 28, 28), float32], %d: Tensor[(5, 2, 28, 28), float32]) {
%0 = nn.conv2d(%a, %b); // node 8
%1 = add(%0, %c); // node 9
%2 = nn.relu(%1); // node 10
subtract(%2, %d)
}
"""
)
def expected():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32],
%c: Tensor[(5, 2, 28, 28), float32], %d: Tensor[(5, 2, 28, 28), float32]) {
%2 = (fn(%x, %y, %z, Compiler="foo") {
%0 = nn.conv2d(%x, %y);
%1 = add(%0, %z);
nn.relu(%1)
})(%a, %b, %c);
subtract(%2, %d)
}
"""
)
run(input(), expected(), 1, False, "foo", {"": [8, 9, 10]})
def test_diamond_single_output():
def input():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%0 = nn.conv2d(%a, %b, padding=[0, 0, 0, 0]); // node 5
%1 = nn.relu(%0); // node 6
%2 = nn.relu(%1); // node 7
%3 = nn.leaky_relu(%0, alpha=0f); // node 9
add(%2, %3) // node 10
}
"""
)
def expected():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
(fn (%x: Tensor[(5, 3, 32, 32), float32], %y: Tensor[(2, 3, 5, 5), float32], Compiler="foo") {
%0 = nn.conv2d(%x, %y, padding=[0, 0, 0, 0]);
%1 = nn.relu(%0);
%2 = nn.relu(%1);
%3 = nn.leaky_relu(%0, alpha=0f);
add(%2, %3)
})(%a, %b)
}
"""
)
run(input(), expected(), 1, False, "foo", {"": [5, 6, 7, 9, 10]})
def test_diamond_multi_output():
def input():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%0 = nn.conv2d(%a, %b, padding=[0, 0, 0, 0]); // node 5
%1 = nn.relu(%0); // node 6
%2 = nn.relu(%1); // node 7
%3 = nn.leaky_relu(%0, alpha=0f); // node 9
add(%2, %3)
}
"""
)
def expected():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%4 = (fn (%x: Tensor[(5, 3, 32, 32), float32], %y: Tensor[(2, 3, 5, 5), float32], Compiler="foo") {
%0 = nn.conv2d(%x, %y, padding=[0, 0, 0, 0]);
%1 = nn.relu(%0);
%2 = nn.relu(%1);
%3 = nn.leaky_relu(%0, alpha=0f);
(%2, %3)
})(%a, %b);
%5 = %4.0;
%6 = %4.1;
add(%5, %6)
}
"""
)
run(input(), expected(), 2, False, "foo", {"": [5, 6, 7, 9]})
def test_with_tap():
def input():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%0 = nn.conv2d(%a, %b, padding=[0, 0, 0, 0]); // node 5
%1 = nn.relu(%0); // node 6
add(%1, %0)
}
"""
)
def expected():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 3, 32, 32), float32], %b: Tensor[(2, 3, 5, 5), float32]) {
%2 = (fn (%x, %y, Compiler="foo") {
%0 = nn.conv2d(%x, %y, padding=[0, 0, 0, 0]);
%1 = nn.relu(%0);
(%0, %1)
})(%a, %b);
%3 = %2.1;
%4 = %2.0;
add(%3, %4)
}
"""
)
# No rewrite since has tap
run(input(), input(), 2, False, "foo", {"": [5, 6]})
# Rewrite
run(input(), expected(), 2, True, "foo", {"": [5, 6]})
def test_no_cycles():
def input():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32]) {
%0 = add(%a, %b); // node 3
%1 = add(%0, %b);
add(%1, %b) // node 5
}
"""
)
def expected():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32]) {
(fn(%x, %y, Compiler="foo") {
%0 = add(%x, %y);
%1 = add(%0, %y);
add(%1, %y)
})(%a, %b)
}
"""
)
# No rewrite since would create cycle
run(input(), input(), 2, False, "foo", {"": [3, 5]})
# No cycle
run(input(), expected(), 2, False, "foo", {"": [3, 4, 5]})
def test_labels_direct_connection():
def input():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) {
%0 = nn.relu(%a); // node 3
%1 = nn.relu(%0); // node 4
%2 = nn.relu(%1); // node 5
%3 = nn.relu(%1); // node 6
%4 = add(%2, %3); // node 7
%5 = nn.relu(%4); // node 8
%6 = nn.relu(%4); // node 9
%7 = add(%5, %6); // node 10
nn.relu(%7) // node 11
}
"""
)
def expected():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) {
(fn(%aa: Tensor[(5, 7), float32], Compiler="foo") {
%0 = nn.relu(%aa);
%4 = (fn(%y, Composite="a") {
%1 = nn.relu(%y);
%2 = nn.relu(%1);
%3 = nn.relu(%1);
add(%2, %3)
})(%0);
%7 = (fn(%z, Composite="b") {
%5 = nn.relu(%z);
%6 = nn.relu(%z);
add(%5, %6)
})(%4);
nn.relu(%7)
})(%a)
}
"""
)
run(input(), expected(), 1, False, "foo", {"": [3, 11], "a": [4, 5, 6, 7], "b": [8, 9, 10]})
def test_labels_nested_tap():
def input():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) {
%0 = nn.relu(%a); // node 3
%1 = nn.relu(%0); // node 4
%2 = nn.relu(%1); // node 5
%3 = nn.relu(%1); // node 6
%4 = add(%2, %3); // node 7
%5 = nn.relu(%4); // node 8
%6 = nn.relu(%4); // node 9
%7 = add(%5, %6); // node 10
add(%2, %7) // node 11
}
"""
)
def expected():
return tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) {
%0 = nn.relu(%a);
%9 = (fn(%x: Tensor[(5, 7), float32], Compiler="foo") {
%5 = (fn(%y, Composite="a") {
%1 = nn.relu(%y);
%2 = nn.relu(%1);
%3 = nn.relu(%1);
%4 = add(%2, %3);
(%2, %4)
})(%x);
%8 = (fn(%z, Composite="b") {
%6 = nn.relu(%z);
%7 = nn.relu(%z);
add(%6, %7)
})(%5.1);
(%5.0, %8)
})(%0);
add(%9.0, %9.1)
}
"""
)
run(input(), expected(), 2, True, "foo", {"a": [4, 5, 6, 7], "b": [8, 9, 10]})
if __name__ == "__main__":
tvm.testing.main()
| 12,777 | 31.93299 | 113 | py |
tvm | tvm-main/tests/python/relay/collage/menangerie.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A collection of Relay models for exercising Collage."""
import tvm
import onnx
import numpy as np
import logging
import tvm.contrib.target.onnx
MODEL_PREFIX = "/home/mbs/gauntlet/models/"
MNIST = {
"name": "mnist",
"filename": "mnist-8.onnx",
"input_shapes": {"Input3": [1, 1, 28, 28]},
"input_dtypes": {"Input3": "float32"},
"main_dtype": "float32",
}
GPT2 = {
"name": "gpt2",
"filename": "gpt2.onnx",
"input_shapes": {"input1": [1, 50, 32]},
"input_dtypes": {"input1": "int64"},
"main_dtype": "float32",
}
RESNET50V2 = {
"name": "resnet50",
"filename": "resnet50-v2-7.onnx",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float32"},
"main_dtype": "float32",
}
MOBILENETV2 = {
"name": "mobilenet",
"filename": "mobilenetv2-1.0.onnx",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float32"},
"main_dtype": "float32",
}
# Note that resnext50_32_4d below was extracted directly from the pytorch model and not from any onnx file.
RESNEXT50_32_4d = {
"name": "resnext50_32_4d",
"filename": "resnext50_32x4d.onnx",
"input_shapes": {"x": [1, 64, 56, 56]},
"input_dtypes": {"x": "float32"},
"main_dtype": "float32",
}
def make_const(dtype, shape):
return tvm.relay.const(np.random.rand(*shape).astype(dtype))
def make_consts(dtype, shapes):
return [make_const(dtype, shape) for shape in shapes]
def mnist_consts(dtype):
return make_consts(
dtype,
[
(8, 1, 5, 5), # 0
(8, 1, 1), # 1
(16, 8, 5, 5), # 2
(16, 1, 1), # 3
(10, 256), # 4
(1, 10), # 5
],
)
def mnist():
metatable = {"relay.Constant": mnist_consts("float32")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 1, 28, 28), float32]) -> Tensor[(1, 10), float32] {
%0 = nn.pad(%x, 0f, pad_width=[[0, 0], [0, 0], [2, 2], [2, 2]]);
%1 = nn.conv2d(%0, meta[relay.Constant][0], padding=[0, 0, 0, 0], channels=8, kernel_size=[5, 5]);
%2 = add(%1, meta[relay.Constant][1]);
%3 = nn.relu(%2);
%4 = nn.max_pool2d(%3, pool_size=[2, 2], strides=[2, 2], padding=[0, 0, 0, 0]);
%5 = nn.pad(%4, 0f, pad_width=[[0, 0], [0, 0], [2, 2], [2, 2]]);
%6 = nn.conv2d(%5, meta[relay.Constant][2], padding=[0, 0, 0, 0], channels=16, kernel_size=[5, 5]);
%7 = add(%6, meta[relay.Constant][3]);
%8 = nn.relu(%7);
%9 = nn.max_pool2d(%8, pool_size=[3, 3], strides=[3, 3], padding=[0, 0, 0, 0]);
%10 = reshape(%9, newshape=[1, 256]);
%11 = nn.dense(%10, meta[relay.Constant][4], units=None, out_dtype="float32");
add(%11, meta[relay.Constant][5])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "mnist",
"input_shapes": {"x": [1, 1, 28, 28]},
"input_dtypes": {"x": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def gpt2_consts(dtype):
return make_consts(
dtype,
[
(50257, 768), # 0
(1, 32, 768), # 1
(768,), # 2
(768,), # 3
(2304, 768), # 4
(2304,), # 5
(1, 1, 32, 32), # 6
(1, 1, 32, 32), # 7
(768, 768), # 8
(768,), # 9
(768,), # 10
(768,), # 11
(3072, 768), # 12
(3072,), # 13
(768, 3072), # 14
(768,), # 15
(768,), # 16
(768,), # 17
(2304, 768), # 18
(2304,), # 19
(1, 1, 32, 32), # 20
(1, 1, 32, 32), # 21
(768, 768), # 22
(768,), # 23
(768,), # 24
(768,), # 25
(3072, 768), # 26
(3072,), # 27
(768, 3072), # 28
(768,), # 29
(768,), # 30
(768,), # 31
(2304, 768), # 32
(2304,), # 33
(1, 1, 32, 32), # 34
(1, 1, 32, 32), # 35
(768, 768), # 36
(768,), # 37
(768,), # 38
(768,), # 39
(3072, 768), # 40
(3072,), # 41
(768, 3072), # 42
(768,), # 43
(768,), # 44
(768,), # 45
(2304, 768), # 46
(2304,), # 47
(1, 1, 32, 32), # 48
(1, 1, 32, 32), # 49
(768, 768), # 50
(768,), # 51
(768,), # 52
(768,), # 53
(3072, 768), # 54
(3072,), # 55
(768, 3072), # 56
(768,), # 57
(768,), # 58
(768,), # 59
(2304, 768), # 60
(2304,), # 61
(1, 1, 32, 32), # 62
(1, 1, 32, 32), # 63
(768, 768), # 64
(768,), # 65
(768,), # 66
(768,), # 67
(3072, 768), # 68
(3072,), # 69
(768, 3072), # 70
(768,), # 71
(768,), # 72
(768,), # 73
(2304, 768), # 74
(2304,), # 75
(1, 1, 32, 32), # 76
(1, 1, 32, 32), # 77
(768, 768), # 78
(768,), # 79
(768,), # 80
(768,), # 81
(3072, 768), # 82
(3072,), # 83
(768, 3072), # 84
(768,), # 85
(768,), # 86
(768,), # 87
(2304, 768), # 88
(2304,), # 89
(1, 1, 32, 32), # 90
(1, 1, 32, 32), # 91
(768, 768), # 92
(768,), # 93
(768,), # 94
(768,), # 95
(3072, 768), # 96
(3072,), # 97
(768, 3072), # 98
(768,), # 99
(768,), # 100
(768,), # 101
(2304, 768), # 102
(2304,), # 103
(1, 1, 32, 32), # 104
(1, 1, 32, 32), # 105
(768, 768), # 106
(768,), # 107
(768,), # 108
(768,), # 109
(3072, 768), # 110
(3072,), # 111
(768, 3072), # 112
(768,), # 113
(768,), # 114
(768,), # 115
(2304, 768), # 116
(2304,), # 117
(1, 1, 32, 32), # 118
(1, 1, 32, 32), # 119
(768, 768), # 120
(768,), # 121
(768,), # 122
(768,), # 123
(3072, 768), # 124
(3072,), # 125
(768, 3072), # 126
(768,), # 127
(768,), # 128
(768,), # 129
(2304, 768), # 130
(2304,), # 131
(1, 1, 32, 32), # 132
(1, 1, 32, 32), # 133
(768, 768), # 134
(768,), # 135
(768,), # 136
(768,), # 137
(3072, 768), # 138
(3072,), # 139
(768, 3072), # 140
(768,), # 141
(768,), # 142
(768,), # 143
(2304, 768), # 144
(2304,), # 145
(1, 1, 32, 32), # 146
(1, 1, 32, 32), # 147
(768, 768), # 148
(768,), # 149
(768,), # 150
(768,), # 151
(3072, 768), # 152
(3072,), # 153
(768, 3072), # 154
(768,), # 155
(768,), # 156
(768,), # 157
(2304, 768), # 158
(2304,), # 159
(1, 1, 32, 32), # 160
(1, 1, 32, 32), # 161
(768, 768), # 162
(768,), # 163
(768,), # 164
(768,), # 165
(3072, 768), # 166
(3072,), # 167
(768, 3072), # 168
(768,), # 169
(768,), # 170
(768,), # 171
],
)
def gpt2():
metatable = {"relay.Constant": gpt2_consts("float32")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 50, 32), int64]) -> (Tensor[(1, 50, 32, 768), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32]) {
%0 = reshape(%x, newshape=[-1, 32]);
%1 = less(%0, 0i64);
%2 = add(%0, 50257i64);
%3 = where(%1, %2, %0);
%4 = take(meta[relay.Constant][0], %3, axis=0);
%5 = add(%4, meta[relay.Constant][1]);
%6 = mean(%5, axis=[-1], keepdims=True);
%7 = subtract(%5, %6);
%8 = power(%7, 2f);
%9 = mean(%8, axis=[-1], keepdims=True);
%10 = add(%9, 1e-05f);
%11 = sqrt(%10);
%12 = divide(%7, %11);
%13 = multiply(%12, meta[relay.Constant][2]);
%14 = add(%13, meta[relay.Constant][3]);
%15 = reshape(%14, newshape=[-1, 768]);
%16 = nn.dense(%15, meta[relay.Constant][4], units=2304);
%17 = add(%16, meta[relay.Constant][5]);
%18 = reshape(%17, newshape=[50, 32, 2304]);
%19 = split(%18, indices_or_sections=[768, 1536], axis=2);
%20 = %19.0;
%21 = reshape(%20, newshape=[50, 32, 12, 64]);
%22 = transpose(%21, axes=[0, 2, 1, 3]);
%23 = %19.1;
%24 = reshape(%23, newshape=[50, 32, 12, 64]);
%25 = transpose(%24, axes=[0, 2, 3, 1]);
%26 = reshape(%25, newshape=[-1, 64, 32]);
%27 = reshape(%22, newshape=[-1, 32, 64]);
%28 = transpose(%26, axes=[0, 2, 1]);
%29 = nn.batch_matmul(%27, %28, out_dtype="float32", transpose_b=True);
%30 = reshape(%29, newshape=[50, 12, 32, 32]);
%31 = divide(%30, 8f);
%32 = multiply(%31, meta[relay.Constant][6]);
%33 = subtract(%32, meta[relay.Constant][7]);
%34 = nn.softmax(%33, axis=3);
%35 = %19.2;
%36 = reshape(%35, newshape=[50, 32, 12, 64]);
%37 = transpose(%36, axes=[0, 2, 1, 3]);
%38 = reshape(%37, newshape=[-1, 32, 64]);
%39 = reshape(%34, newshape=[-1, 32, 32]);
%40 = transpose(%38, axes=[0, 2, 1]);
%41 = nn.batch_matmul(%39, %40, out_dtype="float32", transpose_b=True);
%42 = reshape(%41, newshape=[50, 12, 32, 64]);
%43 = transpose(%42, axes=[0, 2, 1, 3]);
%44 = reshape(%43, newshape=[50, 32, 768]);
%45 = reshape(%44, newshape=[-1, 768]);
%46 = nn.dense(%45, meta[relay.Constant][8], units=768);
%47 = add(%46, meta[relay.Constant][9]);
%48 = reshape(%47, newshape=[50, 32, 768]);
%49 = add(%5, %48);
%50 = mean(%49, axis=[-1], keepdims=True);
%51 = subtract(%49, %50);
%52 = power(%51, 2f);
%53 = mean(%52, axis=[-1], keepdims=True);
%54 = add(%53, 1e-05f);
%55 = sqrt(%54);
%56 = divide(%51, %55);
%57 = multiply(%56, meta[relay.Constant][10]);
%58 = add(%57, meta[relay.Constant][11]);
%59 = reshape(%58, newshape=[-1, 768]);
%60 = nn.dense(%59, meta[relay.Constant][12], units=3072);
%61 = add(%60, meta[relay.Constant][13]);
%62 = reshape(%61, newshape=[50, 32, 3072]);
%63 = power(%62, 3f);
%64 = multiply(%63, 0.044715f);
%65 = add(%62, %64);
%66 = multiply(%65, 0.797885f);
%67 = tanh(%66);
%68 = multiply(%62, 0.5f);
%69 = add(%67, 1f);
%70 = multiply(%68, %69);
%71 = reshape(%70, newshape=[-1, 3072]);
%72 = nn.dense(%71, meta[relay.Constant][14], units=768);
%73 = add(%72, meta[relay.Constant][15]);
%74 = reshape(%73, newshape=[50, 32, 768]);
%75 = add(%49, %74);
%76 = mean(%75, axis=[-1], keepdims=True);
%77 = subtract(%75, %76);
%78 = power(%77, 2f);
%79 = mean(%78, axis=[-1], keepdims=True);
%80 = add(%79, 1e-05f);
%81 = sqrt(%80);
%82 = divide(%77, %81);
%83 = multiply(%82, meta[relay.Constant][16]);
%84 = add(%83, meta[relay.Constant][17]);
%85 = reshape(%84, newshape=[-1, 768]);
%86 = nn.dense(%85, meta[relay.Constant][18], units=2304);
%87 = add(%86, meta[relay.Constant][19]);
%88 = reshape(%87, newshape=[50, 32, 2304]);
%89 = split(%88, indices_or_sections=[768, 1536], axis=2);
%90 = %89.0;
%91 = reshape(%90, newshape=[50, 32, 12, 64]);
%92 = transpose(%91, axes=[0, 2, 1, 3]);
%93 = %89.1;
%94 = reshape(%93, newshape=[50, 32, 12, 64]);
%95 = transpose(%94, axes=[0, 2, 3, 1]);
%96 = reshape(%95, newshape=[-1, 64, 32]);
%97 = reshape(%92, newshape=[-1, 32, 64]);
%98 = transpose(%96, axes=[0, 2, 1]);
%99 = nn.batch_matmul(%97, %98, out_dtype="float32", transpose_b=True);
%100 = reshape(%99, newshape=[50, 12, 32, 32]);
%101 = divide(%100, 8f);
%102 = multiply(%101, meta[relay.Constant][20]);
%103 = subtract(%102, meta[relay.Constant][21]);
%104 = nn.softmax(%103, axis=3);
%105 = %89.2;
%106 = reshape(%105, newshape=[50, 32, 12, 64]);
%107 = transpose(%106, axes=[0, 2, 1, 3]);
%108 = reshape(%107, newshape=[-1, 32, 64]);
%109 = reshape(%104, newshape=[-1, 32, 32]);
%110 = transpose(%108, axes=[0, 2, 1]);
%111 = nn.batch_matmul(%109, %110, out_dtype="float32", transpose_b=True);
%112 = reshape(%111, newshape=[50, 12, 32, 64]);
%113 = transpose(%112, axes=[0, 2, 1, 3]);
%114 = reshape(%113, newshape=[50, 32, 768]);
%115 = reshape(%114, newshape=[-1, 768]);
%116 = nn.dense(%115, meta[relay.Constant][22], units=768);
%117 = add(%116, meta[relay.Constant][23]);
%118 = reshape(%117, newshape=[50, 32, 768]);
%119 = add(%75, %118);
%120 = mean(%119, axis=[-1], keepdims=True);
%121 = subtract(%119, %120);
%122 = power(%121, 2f);
%123 = mean(%122, axis=[-1], keepdims=True);
%124 = add(%123, 1e-05f);
%125 = sqrt(%124);
%126 = divide(%121, %125);
%127 = multiply(%126, meta[relay.Constant][24]);
%128 = add(%127, meta[relay.Constant][25]);
%129 = reshape(%128, newshape=[-1, 768]);
%130 = nn.dense(%129, meta[relay.Constant][26], units=3072);
%131 = add(%130, meta[relay.Constant][27]);
%132 = reshape(%131, newshape=[50, 32, 3072]);
%133 = power(%132, 3f);
%134 = multiply(%133, 0.044715f);
%135 = add(%132, %134);
%136 = multiply(%135, 0.797885f);
%137 = tanh(%136);
%138 = multiply(%132, 0.5f);
%139 = add(%137, 1f);
%140 = multiply(%138, %139);
%141 = reshape(%140, newshape=[-1, 3072]);
%142 = nn.dense(%141, meta[relay.Constant][28], units=768);
%143 = add(%142, meta[relay.Constant][29]);
%144 = reshape(%143, newshape=[50, 32, 768]);
%145 = add(%119, %144);
%146 = mean(%145, axis=[-1], keepdims=True);
%147 = subtract(%145, %146);
%148 = power(%147, 2f);
%149 = mean(%148, axis=[-1], keepdims=True);
%150 = add(%149, 1e-05f);
%151 = sqrt(%150);
%152 = divide(%147, %151);
%153 = multiply(%152, meta[relay.Constant][30]);
%154 = add(%153, meta[relay.Constant][31]);
%155 = reshape(%154, newshape=[-1, 768]);
%156 = nn.dense(%155, meta[relay.Constant][32], units=2304);
%157 = add(%156, meta[relay.Constant][33]);
%158 = reshape(%157, newshape=[50, 32, 2304]);
%159 = split(%158, indices_or_sections=[768, 1536], axis=2);
%160 = %159.0;
%161 = reshape(%160, newshape=[50, 32, 12, 64]);
%162 = transpose(%161, axes=[0, 2, 1, 3]);
%163 = %159.1;
%164 = reshape(%163, newshape=[50, 32, 12, 64]);
%165 = transpose(%164, axes=[0, 2, 3, 1]);
%166 = reshape(%165, newshape=[-1, 64, 32]);
%167 = reshape(%162, newshape=[-1, 32, 64]);
%168 = transpose(%166, axes=[0, 2, 1]);
%169 = nn.batch_matmul(%167, %168, out_dtype="float32", transpose_b=True);
%170 = reshape(%169, newshape=[50, 12, 32, 32]);
%171 = divide(%170, 8f);
%172 = multiply(%171, meta[relay.Constant][34]);
%173 = subtract(%172, meta[relay.Constant][35]);
%174 = nn.softmax(%173, axis=3);
%175 = %159.2;
%176 = reshape(%175, newshape=[50, 32, 12, 64]);
%177 = transpose(%176, axes=[0, 2, 1, 3]);
%178 = reshape(%177, newshape=[-1, 32, 64]);
%179 = reshape(%174, newshape=[-1, 32, 32]);
%180 = transpose(%178, axes=[0, 2, 1]);
%181 = nn.batch_matmul(%179, %180, out_dtype="float32", transpose_b=True);
%182 = reshape(%181, newshape=[50, 12, 32, 64]);
%183 = transpose(%182, axes=[0, 2, 1, 3]);
%184 = reshape(%183, newshape=[50, 32, 768]);
%185 = reshape(%184, newshape=[-1, 768]);
%186 = nn.dense(%185, meta[relay.Constant][36], units=768);
%187 = add(%186, meta[relay.Constant][37]);
%188 = reshape(%187, newshape=[50, 32, 768]);
%189 = add(%145, %188);
%190 = mean(%189, axis=[-1], keepdims=True);
%191 = subtract(%189, %190);
%192 = power(%191, 2f);
%193 = mean(%192, axis=[-1], keepdims=True);
%194 = add(%193, 1e-05f);
%195 = sqrt(%194);
%196 = divide(%191, %195);
%197 = multiply(%196, meta[relay.Constant][38]);
%198 = add(%197, meta[relay.Constant][39]);
%199 = reshape(%198, newshape=[-1, 768]);
%200 = nn.dense(%199, meta[relay.Constant][40], units=3072);
%201 = add(%200, meta[relay.Constant][41]);
%202 = reshape(%201, newshape=[50, 32, 3072]);
%203 = power(%202, 3f);
%204 = multiply(%203, 0.044715f);
%205 = add(%202, %204);
%206 = multiply(%205, 0.797885f);
%207 = tanh(%206);
%208 = multiply(%202, 0.5f);
%209 = add(%207, 1f);
%210 = multiply(%208, %209);
%211 = reshape(%210, newshape=[-1, 3072]);
%212 = nn.dense(%211, meta[relay.Constant][42], units=768);
%213 = add(%212, meta[relay.Constant][43]);
%214 = reshape(%213, newshape=[50, 32, 768]);
%215 = add(%189, %214);
%216 = mean(%215, axis=[-1], keepdims=True);
%217 = subtract(%215, %216);
%218 = power(%217, 2f);
%219 = mean(%218, axis=[-1], keepdims=True);
%220 = add(%219, 1e-05f);
%221 = sqrt(%220);
%222 = divide(%217, %221);
%223 = multiply(%222, meta[relay.Constant][44]);
%224 = add(%223, meta[relay.Constant][45]);
%225 = reshape(%224, newshape=[-1, 768]);
%226 = nn.dense(%225, meta[relay.Constant][46], units=2304);
%227 = add(%226, meta[relay.Constant][47]);
%228 = reshape(%227, newshape=[50, 32, 2304]);
%229 = split(%228, indices_or_sections=[768, 1536], axis=2);
%230 = %229.0;
%231 = reshape(%230, newshape=[50, 32, 12, 64]);
%232 = transpose(%231, axes=[0, 2, 1, 3]);
%233 = %229.1;
%234 = reshape(%233, newshape=[50, 32, 12, 64]);
%235 = transpose(%234, axes=[0, 2, 3, 1]);
%236 = reshape(%235, newshape=[-1, 64, 32]);
%237 = reshape(%232, newshape=[-1, 32, 64]);
%238 = transpose(%236, axes=[0, 2, 1]);
%239 = nn.batch_matmul(%237, %238, out_dtype="float32", transpose_b=True);
%240 = reshape(%239, newshape=[50, 12, 32, 32]);
%241 = divide(%240, 8f);
%242 = multiply(%241, meta[relay.Constant][48]);
%243 = subtract(%242, meta[relay.Constant][49]);
%244 = nn.softmax(%243, axis=3);
%245 = %229.2;
%246 = reshape(%245, newshape=[50, 32, 12, 64]);
%247 = transpose(%246, axes=[0, 2, 1, 3]);
%248 = reshape(%247, newshape=[-1, 32, 64]);
%249 = reshape(%244, newshape=[-1, 32, 32]);
%250 = transpose(%248, axes=[0, 2, 1]);
%251 = nn.batch_matmul(%249, %250, out_dtype="float32", transpose_b=True);
%252 = reshape(%251, newshape=[50, 12, 32, 64]);
%253 = transpose(%252, axes=[0, 2, 1, 3]);
%254 = reshape(%253, newshape=[50, 32, 768]);
%255 = reshape(%254, newshape=[-1, 768]);
%256 = nn.dense(%255, meta[relay.Constant][50], units=768);
%257 = add(%256, meta[relay.Constant][51]);
%258 = reshape(%257, newshape=[50, 32, 768]);
%259 = add(%215, %258);
%260 = mean(%259, axis=[-1], keepdims=True);
%261 = subtract(%259, %260);
%262 = power(%261, 2f);
%263 = mean(%262, axis=[-1], keepdims=True);
%264 = add(%263, 1e-05f);
%265 = sqrt(%264);
%266 = divide(%261, %265);
%267 = multiply(%266, meta[relay.Constant][52]);
%268 = add(%267, meta[relay.Constant][53]);
%269 = reshape(%268, newshape=[-1, 768]);
%270 = nn.dense(%269, meta[relay.Constant][54], units=3072);
%271 = add(%270, meta[relay.Constant][55]);
%272 = reshape(%271, newshape=[50, 32, 3072]);
%273 = power(%272, 3f);
%274 = multiply(%273, 0.044715f);
%275 = add(%272, %274);
%276 = multiply(%275, 0.797885f);
%277 = tanh(%276);
%278 = multiply(%272, 0.5f);
%279 = add(%277, 1f);
%280 = multiply(%278, %279);
%281 = reshape(%280, newshape=[-1, 3072]);
%282 = nn.dense(%281, meta[relay.Constant][56], units=768);
%283 = add(%282, meta[relay.Constant][57]);
%284 = reshape(%283, newshape=[50, 32, 768]);
%285 = add(%259, %284);
%286 = mean(%285, axis=[-1], keepdims=True);
%287 = subtract(%285, %286);
%288 = power(%287, 2f);
%289 = mean(%288, axis=[-1], keepdims=True);
%290 = add(%289, 1e-05f);
%291 = sqrt(%290);
%292 = divide(%287, %291);
%293 = multiply(%292, meta[relay.Constant][58]);
%294 = add(%293, meta[relay.Constant][59]);
%295 = reshape(%294, newshape=[-1, 768]);
%296 = nn.dense(%295, meta[relay.Constant][60], units=2304);
%297 = add(%296, meta[relay.Constant][61]);
%298 = reshape(%297, newshape=[50, 32, 2304]);
%299 = split(%298, indices_or_sections=[768, 1536], axis=2);
%300 = %299.0;
%301 = reshape(%300, newshape=[50, 32, 12, 64]);
%302 = transpose(%301, axes=[0, 2, 1, 3]);
%303 = %299.1;
%304 = reshape(%303, newshape=[50, 32, 12, 64]);
%305 = transpose(%304, axes=[0, 2, 3, 1]);
%306 = reshape(%305, newshape=[-1, 64, 32]);
%307 = reshape(%302, newshape=[-1, 32, 64]);
%308 = transpose(%306, axes=[0, 2, 1]);
%309 = nn.batch_matmul(%307, %308, out_dtype="float32", transpose_b=True);
%310 = reshape(%309, newshape=[50, 12, 32, 32]);
%311 = divide(%310, 8f);
%312 = multiply(%311, meta[relay.Constant][62]);
%313 = subtract(%312, meta[relay.Constant][63]);
%314 = nn.softmax(%313, axis=3);
%315 = %299.2;
%316 = reshape(%315, newshape=[50, 32, 12, 64]);
%317 = transpose(%316, axes=[0, 2, 1, 3]);
%318 = reshape(%317, newshape=[-1, 32, 64]);
%319 = reshape(%314, newshape=[-1, 32, 32]);
%320 = transpose(%318, axes=[0, 2, 1]);
%321 = nn.batch_matmul(%319, %320, out_dtype="float32", transpose_b=True);
%322 = reshape(%321, newshape=[50, 12, 32, 64]);
%323 = transpose(%322, axes=[0, 2, 1, 3]);
%324 = reshape(%323, newshape=[50, 32, 768]);
%325 = reshape(%324, newshape=[-1, 768]);
%326 = nn.dense(%325, meta[relay.Constant][64], units=768);
%327 = add(%326, meta[relay.Constant][65]);
%328 = reshape(%327, newshape=[50, 32, 768]);
%329 = add(%285, %328);
%330 = mean(%329, axis=[-1], keepdims=True);
%331 = subtract(%329, %330);
%332 = power(%331, 2f);
%333 = mean(%332, axis=[-1], keepdims=True);
%334 = add(%333, 1e-05f);
%335 = sqrt(%334);
%336 = divide(%331, %335);
%337 = multiply(%336, meta[relay.Constant][66]);
%338 = add(%337, meta[relay.Constant][67]);
%339 = reshape(%338, newshape=[-1, 768]);
%340 = nn.dense(%339, meta[relay.Constant][68], units=3072);
%341 = add(%340, meta[relay.Constant][69]);
%342 = reshape(%341, newshape=[50, 32, 3072]);
%343 = power(%342, 3f);
%344 = multiply(%343, 0.044715f);
%345 = add(%342, %344);
%346 = multiply(%345, 0.797885f);
%347 = tanh(%346);
%348 = multiply(%342, 0.5f);
%349 = add(%347, 1f);
%350 = multiply(%348, %349);
%351 = reshape(%350, newshape=[-1, 3072]);
%352 = nn.dense(%351, meta[relay.Constant][70], units=768);
%353 = add(%352, meta[relay.Constant][71]);
%354 = reshape(%353, newshape=[50, 32, 768]);
%355 = add(%329, %354);
%356 = mean(%355, axis=[-1], keepdims=True);
%357 = subtract(%355, %356);
%358 = power(%357, 2f);
%359 = mean(%358, axis=[-1], keepdims=True);
%360 = add(%359, 1e-05f);
%361 = sqrt(%360);
%362 = divide(%357, %361);
%363 = multiply(%362, meta[relay.Constant][72]);
%364 = add(%363, meta[relay.Constant][73]);
%365 = reshape(%364, newshape=[-1, 768]);
%366 = nn.dense(%365, meta[relay.Constant][74], units=2304);
%367 = add(%366, meta[relay.Constant][75]);
%368 = reshape(%367, newshape=[50, 32, 2304]);
%369 = split(%368, indices_or_sections=[768, 1536], axis=2);
%370 = %369.0;
%371 = reshape(%370, newshape=[50, 32, 12, 64]);
%372 = transpose(%371, axes=[0, 2, 1, 3]);
%373 = %369.1;
%374 = reshape(%373, newshape=[50, 32, 12, 64]);
%375 = transpose(%374, axes=[0, 2, 3, 1]);
%376 = reshape(%375, newshape=[-1, 64, 32]);
%377 = reshape(%372, newshape=[-1, 32, 64]);
%378 = transpose(%376, axes=[0, 2, 1]);
%379 = nn.batch_matmul(%377, %378, out_dtype="float32", transpose_b=True);
%380 = reshape(%379, newshape=[50, 12, 32, 32]);
%381 = divide(%380, 8f);
%382 = multiply(%381, meta[relay.Constant][76]);
%383 = subtract(%382, meta[relay.Constant][77]);
%384 = nn.softmax(%383, axis=3);
%385 = %369.2;
%386 = reshape(%385, newshape=[50, 32, 12, 64]);
%387 = transpose(%386, axes=[0, 2, 1, 3]);
%388 = reshape(%387, newshape=[-1, 32, 64]);
%389 = reshape(%384, newshape=[-1, 32, 32]);
%390 = transpose(%388, axes=[0, 2, 1]);
%391 = nn.batch_matmul(%389, %390, out_dtype="float32", transpose_b=True);
%392 = reshape(%391, newshape=[50, 12, 32, 64]);
%393 = transpose(%392, axes=[0, 2, 1, 3]);
%394 = reshape(%393, newshape=[50, 32, 768]);
%395 = reshape(%394, newshape=[-1, 768]);
%396 = nn.dense(%395, meta[relay.Constant][78], units=768);
%397 = add(%396, meta[relay.Constant][79]);
%398 = reshape(%397, newshape=[50, 32, 768]);
%399 = add(%355, %398);
%400 = mean(%399, axis=[-1], keepdims=True);
%401 = subtract(%399, %400);
%402 = power(%401, 2f);
%403 = mean(%402, axis=[-1], keepdims=True);
%404 = add(%403, 1e-05f);
%405 = sqrt(%404);
%406 = divide(%401, %405);
%407 = multiply(%406, meta[relay.Constant][80]);
%408 = add(%407, meta[relay.Constant][81]);
%409 = reshape(%408, newshape=[-1, 768]);
%410 = nn.dense(%409, meta[relay.Constant][82], units=3072);
%411 = add(%410, meta[relay.Constant][83]);
%412 = reshape(%411, newshape=[50, 32, 3072]);
%413 = power(%412, 3f);
%414 = multiply(%413, 0.044715f);
%415 = add(%412, %414);
%416 = multiply(%415, 0.797885f);
%417 = tanh(%416);
%418 = multiply(%412, 0.5f);
%419 = add(%417, 1f);
%420 = multiply(%418, %419);
%421 = reshape(%420, newshape=[-1, 3072]);
%422 = nn.dense(%421, meta[relay.Constant][84], units=768);
%423 = add(%422, meta[relay.Constant][85]);
%424 = reshape(%423, newshape=[50, 32, 768]);
%425 = add(%399, %424);
%426 = mean(%425, axis=[-1], keepdims=True);
%427 = subtract(%425, %426);
%428 = power(%427, 2f);
%429 = mean(%428, axis=[-1], keepdims=True);
%430 = add(%429, 1e-05f);
%431 = sqrt(%430);
%432 = divide(%427, %431);
%433 = multiply(%432, meta[relay.Constant][86]);
%434 = add(%433, meta[relay.Constant][87]);
%435 = reshape(%434, newshape=[-1, 768]);
%436 = nn.dense(%435, meta[relay.Constant][88], units=2304);
%437 = add(%436, meta[relay.Constant][89]);
%438 = reshape(%437, newshape=[50, 32, 2304]);
%439 = split(%438, indices_or_sections=[768, 1536], axis=2);
%440 = %439.0;
%441 = reshape(%440, newshape=[50, 32, 12, 64]);
%442 = transpose(%441, axes=[0, 2, 1, 3]);
%443 = %439.1;
%444 = reshape(%443, newshape=[50, 32, 12, 64]);
%445 = transpose(%444, axes=[0, 2, 3, 1]);
%446 = reshape(%445, newshape=[-1, 64, 32]);
%447 = reshape(%442, newshape=[-1, 32, 64]);
%448 = transpose(%446, axes=[0, 2, 1]);
%449 = nn.batch_matmul(%447, %448, out_dtype="float32", transpose_b=True);
%450 = reshape(%449, newshape=[50, 12, 32, 32]);
%451 = divide(%450, 8f);
%452 = multiply(%451, meta[relay.Constant][90]);
%453 = subtract(%452, meta[relay.Constant][91]);
%454 = nn.softmax(%453, axis=3);
%455 = %439.2;
%456 = reshape(%455, newshape=[50, 32, 12, 64]);
%457 = transpose(%456, axes=[0, 2, 1, 3]);
%458 = reshape(%457, newshape=[-1, 32, 64]);
%459 = reshape(%454, newshape=[-1, 32, 32]);
%460 = transpose(%458, axes=[0, 2, 1]);
%461 = nn.batch_matmul(%459, %460, out_dtype="float32", transpose_b=True);
%462 = reshape(%461, newshape=[50, 12, 32, 64]);
%463 = transpose(%462, axes=[0, 2, 1, 3]);
%464 = reshape(%463, newshape=[50, 32, 768]);
%465 = reshape(%464, newshape=[-1, 768]);
%466 = nn.dense(%465, meta[relay.Constant][92], units=768);
%467 = add(%466, meta[relay.Constant][93]);
%468 = reshape(%467, newshape=[50, 32, 768]);
%469 = add(%425, %468);
%470 = mean(%469, axis=[-1], keepdims=True);
%471 = subtract(%469, %470);
%472 = power(%471, 2f);
%473 = mean(%472, axis=[-1], keepdims=True);
%474 = add(%473, 1e-05f);
%475 = sqrt(%474);
%476 = divide(%471, %475);
%477 = multiply(%476, meta[relay.Constant][94]);
%478 = add(%477, meta[relay.Constant][95]);
%479 = reshape(%478, newshape=[-1, 768]);
%480 = nn.dense(%479, meta[relay.Constant][96], units=3072);
%481 = add(%480, meta[relay.Constant][97]);
%482 = reshape(%481, newshape=[50, 32, 3072]);
%483 = power(%482, 3f);
%484 = multiply(%483, 0.044715f);
%485 = add(%482, %484);
%486 = multiply(%485, 0.797885f);
%487 = tanh(%486);
%488 = multiply(%482, 0.5f);
%489 = add(%487, 1f);
%490 = multiply(%488, %489);
%491 = reshape(%490, newshape=[-1, 3072]);
%492 = nn.dense(%491, meta[relay.Constant][98], units=768);
%493 = add(%492, meta[relay.Constant][99]);
%494 = reshape(%493, newshape=[50, 32, 768]);
%495 = add(%469, %494);
%496 = mean(%495, axis=[-1], keepdims=True);
%497 = subtract(%495, %496);
%498 = power(%497, 2f);
%499 = mean(%498, axis=[-1], keepdims=True);
%500 = add(%499, 1e-05f);
%501 = sqrt(%500);
%502 = divide(%497, %501);
%503 = multiply(%502, meta[relay.Constant][100]);
%504 = add(%503, meta[relay.Constant][101]);
%505 = reshape(%504, newshape=[-1, 768]);
%506 = nn.dense(%505, meta[relay.Constant][102], units=2304);
%507 = add(%506, meta[relay.Constant][103]);
%508 = reshape(%507, newshape=[50, 32, 2304]);
%509 = split(%508, indices_or_sections=[768, 1536], axis=2);
%510 = %509.0;
%511 = reshape(%510, newshape=[50, 32, 12, 64]);
%512 = transpose(%511, axes=[0, 2, 1, 3]);
%513 = %509.1;
%514 = reshape(%513, newshape=[50, 32, 12, 64]);
%515 = transpose(%514, axes=[0, 2, 3, 1]);
%516 = reshape(%515, newshape=[-1, 64, 32]);
%517 = reshape(%512, newshape=[-1, 32, 64]);
%518 = transpose(%516, axes=[0, 2, 1]);
%519 = nn.batch_matmul(%517, %518, out_dtype="float32", transpose_b=True);
%520 = reshape(%519, newshape=[50, 12, 32, 32]);
%521 = divide(%520, 8f);
%522 = multiply(%521, meta[relay.Constant][104]);
%523 = subtract(%522, meta[relay.Constant][105]);
%524 = nn.softmax(%523, axis=3);
%525 = %509.2;
%526 = reshape(%525, newshape=[50, 32, 12, 64]);
%527 = transpose(%526, axes=[0, 2, 1, 3]);
%528 = reshape(%527, newshape=[-1, 32, 64]);
%529 = reshape(%524, newshape=[-1, 32, 32]);
%530 = transpose(%528, axes=[0, 2, 1]);
%531 = nn.batch_matmul(%529, %530, out_dtype="float32", transpose_b=True);
%532 = reshape(%531, newshape=[50, 12, 32, 64]);
%533 = transpose(%532, axes=[0, 2, 1, 3]);
%534 = reshape(%533, newshape=[50, 32, 768]);
%535 = reshape(%534, newshape=[-1, 768]);
%536 = nn.dense(%535, meta[relay.Constant][106], units=768);
%537 = add(%536, meta[relay.Constant][107]);
%538 = reshape(%537, newshape=[50, 32, 768]);
%539 = add(%495, %538);
%540 = mean(%539, axis=[-1], keepdims=True);
%541 = subtract(%539, %540);
%542 = power(%541, 2f);
%543 = mean(%542, axis=[-1], keepdims=True);
%544 = add(%543, 1e-05f);
%545 = sqrt(%544);
%546 = divide(%541, %545);
%547 = multiply(%546, meta[relay.Constant][108]);
%548 = add(%547, meta[relay.Constant][109]);
%549 = reshape(%548, newshape=[-1, 768]);
%550 = nn.dense(%549, meta[relay.Constant][110], units=3072);
%551 = add(%550, meta[relay.Constant][111]);
%552 = reshape(%551, newshape=[50, 32, 3072]);
%553 = power(%552, 3f);
%554 = multiply(%553, 0.044715f);
%555 = add(%552, %554);
%556 = multiply(%555, 0.797885f);
%557 = tanh(%556);
%558 = multiply(%552, 0.5f);
%559 = add(%557, 1f);
%560 = multiply(%558, %559);
%561 = reshape(%560, newshape=[-1, 3072]);
%562 = nn.dense(%561, meta[relay.Constant][112], units=768);
%563 = add(%562, meta[relay.Constant][113]);
%564 = reshape(%563, newshape=[50, 32, 768]);
%565 = add(%539, %564);
%566 = mean(%565, axis=[-1], keepdims=True);
%567 = subtract(%565, %566);
%568 = power(%567, 2f);
%569 = mean(%568, axis=[-1], keepdims=True);
%570 = add(%569, 1e-05f);
%571 = sqrt(%570);
%572 = divide(%567, %571);
%573 = multiply(%572, meta[relay.Constant][114]);
%574 = add(%573, meta[relay.Constant][115]);
%575 = reshape(%574, newshape=[-1, 768]);
%576 = nn.dense(%575, meta[relay.Constant][116], units=2304);
%577 = add(%576, meta[relay.Constant][117]);
%578 = reshape(%577, newshape=[50, 32, 2304]);
%579 = split(%578, indices_or_sections=[768, 1536], axis=2);
%580 = %579.0;
%581 = reshape(%580, newshape=[50, 32, 12, 64]);
%582 = transpose(%581, axes=[0, 2, 1, 3]);
%583 = %579.1;
%584 = reshape(%583, newshape=[50, 32, 12, 64]);
%585 = transpose(%584, axes=[0, 2, 3, 1]);
%586 = reshape(%585, newshape=[-1, 64, 32]);
%587 = reshape(%582, newshape=[-1, 32, 64]);
%588 = transpose(%586, axes=[0, 2, 1]);
%589 = nn.batch_matmul(%587, %588, out_dtype="float32", transpose_b=True);
%590 = reshape(%589, newshape=[50, 12, 32, 32]);
%591 = divide(%590, 8f);
%592 = multiply(%591, meta[relay.Constant][118]);
%593 = subtract(%592, meta[relay.Constant][119]);
%594 = nn.softmax(%593, axis=3);
%595 = %579.2;
%596 = reshape(%595, newshape=[50, 32, 12, 64]);
%597 = transpose(%596, axes=[0, 2, 1, 3]);
%598 = reshape(%597, newshape=[-1, 32, 64]);
%599 = reshape(%594, newshape=[-1, 32, 32]);
%600 = transpose(%598, axes=[0, 2, 1]);
%601 = nn.batch_matmul(%599, %600, out_dtype="float32", transpose_b=True);
%602 = reshape(%601, newshape=[50, 12, 32, 64]);
%603 = transpose(%602, axes=[0, 2, 1, 3]);
%604 = reshape(%603, newshape=[50, 32, 768]);
%605 = reshape(%604, newshape=[-1, 768]);
%606 = nn.dense(%605, meta[relay.Constant][120], units=768);
%607 = add(%606, meta[relay.Constant][121]);
%608 = reshape(%607, newshape=[50, 32, 768]);
%609 = add(%565, %608);
%610 = mean(%609, axis=[-1], keepdims=True);
%611 = subtract(%609, %610);
%612 = power(%611, 2f);
%613 = mean(%612, axis=[-1], keepdims=True);
%614 = add(%613, 1e-05f);
%615 = sqrt(%614);
%616 = divide(%611, %615);
%617 = multiply(%616, meta[relay.Constant][122]);
%618 = add(%617, meta[relay.Constant][123]);
%619 = reshape(%618, newshape=[-1, 768]);
%620 = nn.dense(%619, meta[relay.Constant][124], units=3072);
%621 = add(%620, meta[relay.Constant][125]);
%622 = reshape(%621, newshape=[50, 32, 3072]);
%623 = power(%622, 3f);
%624 = multiply(%623, 0.044715f);
%625 = add(%622, %624);
%626 = multiply(%625, 0.797885f);
%627 = tanh(%626);
%628 = multiply(%622, 0.5f);
%629 = add(%627, 1f);
%630 = multiply(%628, %629);
%631 = reshape(%630, newshape=[-1, 3072]);
%632 = nn.dense(%631, meta[relay.Constant][126], units=768);
%633 = add(%632, meta[relay.Constant][127]);
%634 = reshape(%633, newshape=[50, 32, 768]);
%635 = add(%609, %634);
%636 = mean(%635, axis=[-1], keepdims=True);
%637 = subtract(%635, %636);
%638 = power(%637, 2f);
%639 = mean(%638, axis=[-1], keepdims=True);
%640 = add(%639, 1e-05f);
%641 = sqrt(%640);
%642 = divide(%637, %641);
%643 = multiply(%642, meta[relay.Constant][128]);
%644 = add(%643, meta[relay.Constant][129]);
%645 = reshape(%644, newshape=[-1, 768]);
%646 = nn.dense(%645, meta[relay.Constant][130], units=2304);
%647 = add(%646, meta[relay.Constant][131]);
%648 = reshape(%647, newshape=[50, 32, 2304]);
%649 = split(%648, indices_or_sections=[768, 1536], axis=2);
%650 = %649.0;
%651 = reshape(%650, newshape=[50, 32, 12, 64]);
%652 = transpose(%651, axes=[0, 2, 1, 3]);
%653 = %649.1;
%654 = reshape(%653, newshape=[50, 32, 12, 64]);
%655 = transpose(%654, axes=[0, 2, 3, 1]);
%656 = reshape(%655, newshape=[-1, 64, 32]);
%657 = reshape(%652, newshape=[-1, 32, 64]);
%658 = transpose(%656, axes=[0, 2, 1]);
%659 = nn.batch_matmul(%657, %658, out_dtype="float32", transpose_b=True);
%660 = reshape(%659, newshape=[50, 12, 32, 32]);
%661 = divide(%660, 8f);
%662 = multiply(%661, meta[relay.Constant][132]);
%663 = subtract(%662, meta[relay.Constant][133]);
%664 = nn.softmax(%663, axis=3);
%665 = %649.2;
%666 = reshape(%665, newshape=[50, 32, 12, 64]);
%667 = transpose(%666, axes=[0, 2, 1, 3]);
%668 = reshape(%667, newshape=[-1, 32, 64]);
%669 = reshape(%664, newshape=[-1, 32, 32]);
%670 = transpose(%668, axes=[0, 2, 1]);
%671 = nn.batch_matmul(%669, %670, out_dtype="float32", transpose_b=True);
%672 = reshape(%671, newshape=[50, 12, 32, 64]);
%673 = transpose(%672, axes=[0, 2, 1, 3]);
%674 = reshape(%673, newshape=[50, 32, 768]);
%675 = reshape(%674, newshape=[-1, 768]);
%676 = nn.dense(%675, meta[relay.Constant][134], units=768);
%677 = add(%676, meta[relay.Constant][135]);
%678 = reshape(%677, newshape=[50, 32, 768]);
%679 = add(%635, %678);
%680 = mean(%679, axis=[-1], keepdims=True);
%681 = subtract(%679, %680);
%682 = power(%681, 2f);
%683 = mean(%682, axis=[-1], keepdims=True);
%684 = add(%683, 1e-05f);
%685 = sqrt(%684);
%686 = divide(%681, %685);
%687 = multiply(%686, meta[relay.Constant][136]);
%688 = add(%687, meta[relay.Constant][137]);
%689 = reshape(%688, newshape=[-1, 768]);
%690 = nn.dense(%689, meta[relay.Constant][138], units=3072);
%691 = add(%690, meta[relay.Constant][139]);
%692 = reshape(%691, newshape=[50, 32, 3072]);
%693 = power(%692, 3f);
%694 = multiply(%693, 0.044715f);
%695 = add(%692, %694);
%696 = multiply(%695, 0.797885f);
%697 = tanh(%696);
%698 = multiply(%692, 0.5f);
%699 = add(%697, 1f);
%700 = multiply(%698, %699);
%701 = reshape(%700, newshape=[-1, 3072]);
%702 = nn.dense(%701, meta[relay.Constant][140], units=768);
%703 = add(%702, meta[relay.Constant][141]);
%704 = reshape(%703, newshape=[50, 32, 768]);
%705 = add(%679, %704);
%706 = mean(%705, axis=[-1], keepdims=True);
%707 = subtract(%705, %706);
%708 = power(%707, 2f);
%709 = mean(%708, axis=[-1], keepdims=True);
%710 = add(%709, 1e-05f);
%711 = sqrt(%710);
%712 = divide(%707, %711);
%713 = multiply(%712, meta[relay.Constant][142]);
%714 = add(%713, meta[relay.Constant][143]);
%715 = reshape(%714, newshape=[-1, 768]);
%716 = nn.dense(%715, meta[relay.Constant][144], units=2304);
%717 = add(%716, meta[relay.Constant][145]);
%718 = reshape(%717, newshape=[50, 32, 2304]);
%719 = split(%718, indices_or_sections=[768, 1536], axis=2);
%720 = %719.0;
%721 = reshape(%720, newshape=[50, 32, 12, 64]);
%722 = transpose(%721, axes=[0, 2, 1, 3]);
%723 = %719.1;
%724 = reshape(%723, newshape=[50, 32, 12, 64]);
%725 = transpose(%724, axes=[0, 2, 3, 1]);
%726 = reshape(%725, newshape=[-1, 64, 32]);
%727 = reshape(%722, newshape=[-1, 32, 64]);
%728 = transpose(%726, axes=[0, 2, 1]);
%729 = nn.batch_matmul(%727, %728, out_dtype="float32", transpose_b=True);
%730 = reshape(%729, newshape=[50, 12, 32, 32]);
%731 = divide(%730, 8f);
%732 = multiply(%731, meta[relay.Constant][146]);
%733 = subtract(%732, meta[relay.Constant][147]);
%734 = nn.softmax(%733, axis=3);
%735 = %719.2;
%736 = reshape(%735, newshape=[50, 32, 12, 64]);
%737 = transpose(%736, axes=[0, 2, 1, 3]);
%738 = reshape(%737, newshape=[-1, 32, 64]);
%739 = reshape(%734, newshape=[-1, 32, 32]);
%740 = transpose(%738, axes=[0, 2, 1]);
%741 = nn.batch_matmul(%739, %740, out_dtype="float32", transpose_b=True);
%742 = reshape(%741, newshape=[50, 12, 32, 64]);
%743 = transpose(%742, axes=[0, 2, 1, 3]);
%744 = reshape(%743, newshape=[50, 32, 768]);
%745 = reshape(%744, newshape=[-1, 768]);
%746 = nn.dense(%745, meta[relay.Constant][148], units=768);
%747 = add(%746, meta[relay.Constant][149]);
%748 = reshape(%747, newshape=[50, 32, 768]);
%749 = add(%705, %748);
%750 = mean(%749, axis=[-1], keepdims=True);
%751 = subtract(%749, %750);
%752 = power(%751, 2f);
%753 = mean(%752, axis=[-1], keepdims=True);
%754 = add(%753, 1e-05f);
%755 = sqrt(%754);
%756 = divide(%751, %755);
%757 = multiply(%756, meta[relay.Constant][150]);
%758 = add(%757, meta[relay.Constant][151]);
%759 = reshape(%758, newshape=[-1, 768]);
%760 = nn.dense(%759, meta[relay.Constant][152], units=3072);
%761 = add(%760, meta[relay.Constant][153]);
%762 = reshape(%761, newshape=[50, 32, 3072]);
%763 = power(%762, 3f);
%764 = multiply(%763, 0.044715f);
%765 = add(%762, %764);
%766 = multiply(%765, 0.797885f);
%767 = tanh(%766);
%768 = multiply(%762, 0.5f);
%769 = add(%767, 1f);
%770 = multiply(%768, %769);
%771 = reshape(%770, newshape=[-1, 3072]);
%772 = nn.dense(%771, meta[relay.Constant][154], units=768);
%773 = add(%772, meta[relay.Constant][155]);
%774 = reshape(%773, newshape=[50, 32, 768]);
%775 = add(%749, %774);
%776 = mean(%775, axis=[-1], keepdims=True);
%777 = subtract(%775, %776);
%778 = power(%777, 2f);
%779 = mean(%778, axis=[-1], keepdims=True);
%780 = add(%779, 1e-05f);
%781 = sqrt(%780);
%782 = divide(%777, %781);
%783 = multiply(%782, meta[relay.Constant][156]);
%784 = add(%783, meta[relay.Constant][157]);
%785 = reshape(%784, newshape=[-1, 768]);
%786 = nn.dense(%785, meta[relay.Constant][158], units=2304);
%787 = add(%786, meta[relay.Constant][159]);
%788 = reshape(%787, newshape=[50, 32, 2304]);
%789 = split(%788, indices_or_sections=[768, 1536], axis=2);
%790 = %789.0;
%791 = reshape(%790, newshape=[50, 32, 12, 64]);
%792 = transpose(%791, axes=[0, 2, 1, 3]);
%793 = %789.1;
%794 = reshape(%793, newshape=[50, 32, 12, 64]);
%795 = transpose(%794, axes=[0, 2, 3, 1]);
%796 = reshape(%795, newshape=[-1, 64, 32]);
%797 = reshape(%792, newshape=[-1, 32, 64]);
%798 = transpose(%796, axes=[0, 2, 1]);
%799 = nn.batch_matmul(%797, %798, out_dtype="float32", transpose_b=True);
%800 = reshape(%799, newshape=[50, 12, 32, 32]);
%801 = divide(%800, 8f);
%802 = multiply(%801, meta[relay.Constant][160]);
%803 = subtract(%802, meta[relay.Constant][161]);
%804 = nn.softmax(%803, axis=3);
%805 = %789.2;
%806 = reshape(%805, newshape=[50, 32, 12, 64]);
%807 = transpose(%806, axes=[0, 2, 1, 3]);
%808 = reshape(%807, newshape=[-1, 32, 64]);
%809 = reshape(%804, newshape=[-1, 32, 32]);
%810 = transpose(%808, axes=[0, 2, 1]);
%811 = nn.batch_matmul(%809, %810, out_dtype="float32", transpose_b=True);
%812 = reshape(%811, newshape=[50, 12, 32, 64]);
%813 = transpose(%812, axes=[0, 2, 1, 3]);
%814 = reshape(%813, newshape=[50, 32, 768]);
%815 = reshape(%814, newshape=[-1, 768]);
%816 = nn.dense(%815, meta[relay.Constant][162], units=768);
%817 = add(%816, meta[relay.Constant][163]);
%818 = reshape(%817, newshape=[50, 32, 768]);
%819 = add(%775, %818);
%820 = mean(%819, axis=[-1], keepdims=True);
%821 = subtract(%819, %820);
%822 = power(%821, 2f);
%823 = mean(%822, axis=[-1], keepdims=True);
%824 = add(%823, 1e-05f);
%825 = sqrt(%824);
%826 = divide(%821, %825);
%827 = multiply(%826, meta[relay.Constant][164]);
%828 = add(%827, meta[relay.Constant][165]);
%829 = reshape(%828, newshape=[-1, 768]);
%830 = nn.dense(%829, meta[relay.Constant][166], units=3072);
%831 = add(%830, meta[relay.Constant][167]);
%832 = reshape(%831, newshape=[50, 32, 3072]);
%833 = power(%832, 3f);
%834 = multiply(%833, 0.044715f);
%835 = add(%832, %834);
%836 = multiply(%835, 0.797885f);
%837 = tanh(%836);
%838 = multiply(%832, 0.5f);
%839 = add(%837, 1f);
%840 = multiply(%838, %839);
%841 = reshape(%840, newshape=[-1, 3072]);
%842 = nn.dense(%841, meta[relay.Constant][168], units=768);
%843 = add(%842, meta[relay.Constant][169]);
%844 = reshape(%843, newshape=[50, 32, 768]);
%845 = add(%819, %844);
%846 = mean(%845, axis=[-1], keepdims=True);
%847 = subtract(%845, %846);
%848 = power(%847, 2f);
%849 = mean(%848, axis=[-1], keepdims=True);
%850 = add(%849, 1e-05f);
%851 = sqrt(%850);
%852 = divide(%847, %851);
%853 = multiply(%852, meta[relay.Constant][170]);
%854 = add(%853, meta[relay.Constant][171]);
%855 = transpose(%24, axes=[0, 2, 1, 3]);
%856 = expand_dims(%855, axis=0);
%857 = expand_dims(%37, axis=0);
%858 = (%856, %857);
%859 = transpose(%94, axes=[0, 2, 1, 3]);
%860 = expand_dims(%859, axis=0);
%861 = expand_dims(%107, axis=0);
%862 = (%860, %861);
%863 = transpose(%164, axes=[0, 2, 1, 3]);
%864 = expand_dims(%863, axis=0);
%865 = expand_dims(%177, axis=0);
%866 = (%864, %865);
%867 = transpose(%234, axes=[0, 2, 1, 3]);
%868 = expand_dims(%867, axis=0);
%869 = expand_dims(%247, axis=0);
%870 = (%868, %869);
%871 = transpose(%304, axes=[0, 2, 1, 3]);
%872 = expand_dims(%871, axis=0);
%873 = expand_dims(%317, axis=0);
%874 = (%872, %873);
%875 = transpose(%374, axes=[0, 2, 1, 3]);
%876 = expand_dims(%875, axis=0);
%877 = expand_dims(%387, axis=0);
%878 = (%876, %877);
%879 = transpose(%444, axes=[0, 2, 1, 3]);
%880 = expand_dims(%879, axis=0);
%881 = expand_dims(%457, axis=0);
%882 = (%880, %881);
%883 = transpose(%514, axes=[0, 2, 1, 3]);
%884 = expand_dims(%883, axis=0);
%885 = expand_dims(%527, axis=0);
%886 = (%884, %885);
%887 = transpose(%584, axes=[0, 2, 1, 3]);
%888 = expand_dims(%887, axis=0);
%889 = expand_dims(%597, axis=0);
%890 = (%888, %889);
%891 = transpose(%654, axes=[0, 2, 1, 3]);
%892 = expand_dims(%891, axis=0);
%893 = expand_dims(%667, axis=0);
%894 = (%892, %893);
%895 = transpose(%724, axes=[0, 2, 1, 3]);
%896 = expand_dims(%895, axis=0);
%897 = expand_dims(%737, axis=0);
%898 = (%896, %897);
%899 = transpose(%794, axes=[0, 2, 1, 3]);
%900 = expand_dims(%899, axis=0);
%901 = expand_dims(%807, axis=0);
%902 = (%900, %901);
%903 = reshape(%854, newshape=[1, 50, 32, 768]);
%904 = concatenate(%858);
%905 = concatenate(%862);
%906 = concatenate(%866);
%907 = concatenate(%870);
%908 = concatenate(%874);
%909 = concatenate(%878);
%910 = concatenate(%882);
%911 = concatenate(%886);
%912 = concatenate(%890);
%913 = concatenate(%894);
%914 = concatenate(%898);
%915 = concatenate(%902);
(%903, %904, %905, %906, %907, %908, %909, %910, %911, %912, %913, %914, %915)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "gpt2",
"input_shapes": {"x": [1, 50, 32]},
"input_dtypes": {"x": "int64"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def gpt2_16():
metatable = {"relay.Constant": gpt2_consts("float16")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 50, 32), int64]) -> (Tensor[(1, 50, 32, 768), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16]) {
%0 = reshape(%x, newshape=[-1, 32]);
%1 = less(%0, 0i64);
%2 = add(%0, 50257i64);
%3 = where(%1, %2, %0);
%4 = take(meta[relay.Constant][0], %3, axis=0);
%5 = add(%4, meta[relay.Constant][1]);
%6 = mean(%5, axis=[-1], keepdims=True);
%7 = subtract(%5, %6);
%8 = power(%7, 2f16);
%9 = mean(%8, axis=[-1], keepdims=True);
%10 = add(%9, 1e-05f16);
%11 = sqrt(%10);
%12 = divide(%7, %11);
%13 = multiply(%12, meta[relay.Constant][2]);
%14 = add(%13, meta[relay.Constant][3]);
%15 = reshape(%14, newshape=[-1, 768]);
%16 = nn.dense(%15, meta[relay.Constant][4], units=2304);
%17 = add(%16, meta[relay.Constant][5]);
%18 = reshape(%17, newshape=[50, 32, 2304]);
%19 = split(%18, indices_or_sections=[768, 1536], axis=2);
%20 = %19.0;
%21 = reshape(%20, newshape=[50, 32, 12, 64]);
%22 = transpose(%21, axes=[0, 2, 1, 3]);
%23 = %19.1;
%24 = reshape(%23, newshape=[50, 32, 12, 64]);
%25 = transpose(%24, axes=[0, 2, 3, 1]);
%26 = reshape(%25, newshape=[-1, 64, 32]);
%27 = reshape(%22, newshape=[-1, 32, 64]);
%28 = transpose(%26, axes=[0, 2, 1]);
%29 = nn.batch_matmul(%27, %28, out_dtype="float16", transpose_b=True);
%30 = reshape(%29, newshape=[50, 12, 32, 32]);
%31 = divide(%30, 8f16);
%32 = multiply(%31, meta[relay.Constant][6]);
%33 = subtract(%32, meta[relay.Constant][7]);
%34 = nn.softmax(%33, axis=3);
%35 = %19.2;
%36 = reshape(%35, newshape=[50, 32, 12, 64]);
%37 = transpose(%36, axes=[0, 2, 1, 3]);
%38 = reshape(%37, newshape=[-1, 32, 64]);
%39 = reshape(%34, newshape=[-1, 32, 32]);
%40 = transpose(%38, axes=[0, 2, 1]);
%41 = nn.batch_matmul(%39, %40, out_dtype="float16", transpose_b=True);
%42 = reshape(%41, newshape=[50, 12, 32, 64]);
%43 = transpose(%42, axes=[0, 2, 1, 3]);
%44 = reshape(%43, newshape=[50, 32, 768]);
%45 = reshape(%44, newshape=[-1, 768]);
%46 = nn.dense(%45, meta[relay.Constant][8], units=768);
%47 = add(%46, meta[relay.Constant][9]);
%48 = reshape(%47, newshape=[50, 32, 768]);
%49 = add(%5, %48);
%50 = mean(%49, axis=[-1], keepdims=True);
%51 = subtract(%49, %50);
%52 = power(%51, 2f16);
%53 = mean(%52, axis=[-1], keepdims=True);
%54 = add(%53, 1e-05f16);
%55 = sqrt(%54);
%56 = divide(%51, %55);
%57 = multiply(%56, meta[relay.Constant][10]);
%58 = add(%57, meta[relay.Constant][11]);
%59 = reshape(%58, newshape=[-1, 768]);
%60 = nn.dense(%59, meta[relay.Constant][12], units=3072);
%61 = add(%60, meta[relay.Constant][13]);
%62 = reshape(%61, newshape=[50, 32, 3072]);
%63 = power(%62, 3f16);
%64 = multiply(%63, 0.044715f16);
%65 = add(%62, %64);
%66 = multiply(%65, 0.797885f16);
%67 = tanh(%66);
%68 = multiply(%62, 0.5f16);
%69 = add(%67, 1f16);
%70 = multiply(%68, %69);
%71 = reshape(%70, newshape=[-1, 3072]);
%72 = nn.dense(%71, meta[relay.Constant][14], units=768);
%73 = add(%72, meta[relay.Constant][15]);
%74 = reshape(%73, newshape=[50, 32, 768]);
%75 = add(%49, %74);
%76 = mean(%75, axis=[-1], keepdims=True);
%77 = subtract(%75, %76);
%78 = power(%77, 2f16);
%79 = mean(%78, axis=[-1], keepdims=True);
%80 = add(%79, 1e-05f16);
%81 = sqrt(%80);
%82 = divide(%77, %81);
%83 = multiply(%82, meta[relay.Constant][16]);
%84 = add(%83, meta[relay.Constant][17]);
%85 = reshape(%84, newshape=[-1, 768]);
%86 = nn.dense(%85, meta[relay.Constant][18], units=2304);
%87 = add(%86, meta[relay.Constant][19]);
%88 = reshape(%87, newshape=[50, 32, 2304]);
%89 = split(%88, indices_or_sections=[768, 1536], axis=2);
%90 = %89.0;
%91 = reshape(%90, newshape=[50, 32, 12, 64]);
%92 = transpose(%91, axes=[0, 2, 1, 3]);
%93 = %89.1;
%94 = reshape(%93, newshape=[50, 32, 12, 64]);
%95 = transpose(%94, axes=[0, 2, 3, 1]);
%96 = reshape(%95, newshape=[-1, 64, 32]);
%97 = reshape(%92, newshape=[-1, 32, 64]);
%98 = transpose(%96, axes=[0, 2, 1]);
%99 = nn.batch_matmul(%97, %98, out_dtype="float16", transpose_b=True);
%100 = reshape(%99, newshape=[50, 12, 32, 32]);
%101 = divide(%100, 8f16);
%102 = multiply(%101, meta[relay.Constant][20]);
%103 = subtract(%102, meta[relay.Constant][21]);
%104 = nn.softmax(%103, axis=3);
%105 = %89.2;
%106 = reshape(%105, newshape=[50, 32, 12, 64]);
%107 = transpose(%106, axes=[0, 2, 1, 3]);
%108 = reshape(%107, newshape=[-1, 32, 64]);
%109 = reshape(%104, newshape=[-1, 32, 32]);
%110 = transpose(%108, axes=[0, 2, 1]);
%111 = nn.batch_matmul(%109, %110, out_dtype="float16", transpose_b=True);
%112 = reshape(%111, newshape=[50, 12, 32, 64]);
%113 = transpose(%112, axes=[0, 2, 1, 3]);
%114 = reshape(%113, newshape=[50, 32, 768]);
%115 = reshape(%114, newshape=[-1, 768]);
%116 = nn.dense(%115, meta[relay.Constant][22], units=768);
%117 = add(%116, meta[relay.Constant][23]);
%118 = reshape(%117, newshape=[50, 32, 768]);
%119 = add(%75, %118);
%120 = mean(%119, axis=[-1], keepdims=True);
%121 = subtract(%119, %120);
%122 = power(%121, 2f16);
%123 = mean(%122, axis=[-1], keepdims=True);
%124 = add(%123, 1e-05f16);
%125 = sqrt(%124);
%126 = divide(%121, %125);
%127 = multiply(%126, meta[relay.Constant][24]);
%128 = add(%127, meta[relay.Constant][25]);
%129 = reshape(%128, newshape=[-1, 768]);
%130 = nn.dense(%129, meta[relay.Constant][26], units=3072);
%131 = add(%130, meta[relay.Constant][27]);
%132 = reshape(%131, newshape=[50, 32, 3072]);
%133 = power(%132, 3f16);
%134 = multiply(%133, 0.044715f16);
%135 = add(%132, %134);
%136 = multiply(%135, 0.797885f16);
%137 = tanh(%136);
%138 = multiply(%132, 0.5f16);
%139 = add(%137, 1f16);
%140 = multiply(%138, %139);
%141 = reshape(%140, newshape=[-1, 3072]);
%142 = nn.dense(%141, meta[relay.Constant][28], units=768);
%143 = add(%142, meta[relay.Constant][29]);
%144 = reshape(%143, newshape=[50, 32, 768]);
%145 = add(%119, %144);
%146 = mean(%145, axis=[-1], keepdims=True);
%147 = subtract(%145, %146);
%148 = power(%147, 2f16);
%149 = mean(%148, axis=[-1], keepdims=True);
%150 = add(%149, 1e-05f16);
%151 = sqrt(%150);
%152 = divide(%147, %151);
%153 = multiply(%152, meta[relay.Constant][30]);
%154 = add(%153, meta[relay.Constant][31]);
%155 = reshape(%154, newshape=[-1, 768]);
%156 = nn.dense(%155, meta[relay.Constant][32], units=2304);
%157 = add(%156, meta[relay.Constant][33]);
%158 = reshape(%157, newshape=[50, 32, 2304]);
%159 = split(%158, indices_or_sections=[768, 1536], axis=2);
%160 = %159.0;
%161 = reshape(%160, newshape=[50, 32, 12, 64]);
%162 = transpose(%161, axes=[0, 2, 1, 3]);
%163 = %159.1;
%164 = reshape(%163, newshape=[50, 32, 12, 64]);
%165 = transpose(%164, axes=[0, 2, 3, 1]);
%166 = reshape(%165, newshape=[-1, 64, 32]);
%167 = reshape(%162, newshape=[-1, 32, 64]);
%168 = transpose(%166, axes=[0, 2, 1]);
%169 = nn.batch_matmul(%167, %168, out_dtype="float16", transpose_b=True);
%170 = reshape(%169, newshape=[50, 12, 32, 32]);
%171 = divide(%170, 8f16);
%172 = multiply(%171, meta[relay.Constant][34]);
%173 = subtract(%172, meta[relay.Constant][35]);
%174 = nn.softmax(%173, axis=3);
%175 = %159.2;
%176 = reshape(%175, newshape=[50, 32, 12, 64]);
%177 = transpose(%176, axes=[0, 2, 1, 3]);
%178 = reshape(%177, newshape=[-1, 32, 64]);
%179 = reshape(%174, newshape=[-1, 32, 32]);
%180 = transpose(%178, axes=[0, 2, 1]);
%181 = nn.batch_matmul(%179, %180, out_dtype="float16", transpose_b=True);
%182 = reshape(%181, newshape=[50, 12, 32, 64]);
%183 = transpose(%182, axes=[0, 2, 1, 3]);
%184 = reshape(%183, newshape=[50, 32, 768]);
%185 = reshape(%184, newshape=[-1, 768]);
%186 = nn.dense(%185, meta[relay.Constant][36], units=768);
%187 = add(%186, meta[relay.Constant][37]);
%188 = reshape(%187, newshape=[50, 32, 768]);
%189 = add(%145, %188);
%190 = mean(%189, axis=[-1], keepdims=True);
%191 = subtract(%189, %190);
%192 = power(%191, 2f16);
%193 = mean(%192, axis=[-1], keepdims=True);
%194 = add(%193, 1e-05f16);
%195 = sqrt(%194);
%196 = divide(%191, %195);
%197 = multiply(%196, meta[relay.Constant][38]);
%198 = add(%197, meta[relay.Constant][39]);
%199 = reshape(%198, newshape=[-1, 768]);
%200 = nn.dense(%199, meta[relay.Constant][40], units=3072);
%201 = add(%200, meta[relay.Constant][41]);
%202 = reshape(%201, newshape=[50, 32, 3072]);
%203 = power(%202, 3f16);
%204 = multiply(%203, 0.044715f16);
%205 = add(%202, %204);
%206 = multiply(%205, 0.797885f16);
%207 = tanh(%206);
%208 = multiply(%202, 0.5f16);
%209 = add(%207, 1f16);
%210 = multiply(%208, %209);
%211 = reshape(%210, newshape=[-1, 3072]);
%212 = nn.dense(%211, meta[relay.Constant][42], units=768);
%213 = add(%212, meta[relay.Constant][43]);
%214 = reshape(%213, newshape=[50, 32, 768]);
%215 = add(%189, %214);
%216 = mean(%215, axis=[-1], keepdims=True);
%217 = subtract(%215, %216);
%218 = power(%217, 2f16);
%219 = mean(%218, axis=[-1], keepdims=True);
%220 = add(%219, 1e-05f16);
%221 = sqrt(%220);
%222 = divide(%217, %221);
%223 = multiply(%222, meta[relay.Constant][44]);
%224 = add(%223, meta[relay.Constant][45]);
%225 = reshape(%224, newshape=[-1, 768]);
%226 = nn.dense(%225, meta[relay.Constant][46], units=2304);
%227 = add(%226, meta[relay.Constant][47]);
%228 = reshape(%227, newshape=[50, 32, 2304]);
%229 = split(%228, indices_or_sections=[768, 1536], axis=2);
%230 = %229.0;
%231 = reshape(%230, newshape=[50, 32, 12, 64]);
%232 = transpose(%231, axes=[0, 2, 1, 3]);
%233 = %229.1;
%234 = reshape(%233, newshape=[50, 32, 12, 64]);
%235 = transpose(%234, axes=[0, 2, 3, 1]);
%236 = reshape(%235, newshape=[-1, 64, 32]);
%237 = reshape(%232, newshape=[-1, 32, 64]);
%238 = transpose(%236, axes=[0, 2, 1]);
%239 = nn.batch_matmul(%237, %238, out_dtype="float16", transpose_b=True);
%240 = reshape(%239, newshape=[50, 12, 32, 32]);
%241 = divide(%240, 8f16);
%242 = multiply(%241, meta[relay.Constant][48]);
%243 = subtract(%242, meta[relay.Constant][49]);
%244 = nn.softmax(%243, axis=3);
%245 = %229.2;
%246 = reshape(%245, newshape=[50, 32, 12, 64]);
%247 = transpose(%246, axes=[0, 2, 1, 3]);
%248 = reshape(%247, newshape=[-1, 32, 64]);
%249 = reshape(%244, newshape=[-1, 32, 32]);
%250 = transpose(%248, axes=[0, 2, 1]);
%251 = nn.batch_matmul(%249, %250, out_dtype="float16", transpose_b=True);
%252 = reshape(%251, newshape=[50, 12, 32, 64]);
%253 = transpose(%252, axes=[0, 2, 1, 3]);
%254 = reshape(%253, newshape=[50, 32, 768]);
%255 = reshape(%254, newshape=[-1, 768]);
%256 = nn.dense(%255, meta[relay.Constant][50], units=768);
%257 = add(%256, meta[relay.Constant][51]);
%258 = reshape(%257, newshape=[50, 32, 768]);
%259 = add(%215, %258);
%260 = mean(%259, axis=[-1], keepdims=True);
%261 = subtract(%259, %260);
%262 = power(%261, 2f16);
%263 = mean(%262, axis=[-1], keepdims=True);
%264 = add(%263, 1e-05f16);
%265 = sqrt(%264);
%266 = divide(%261, %265);
%267 = multiply(%266, meta[relay.Constant][52]);
%268 = add(%267, meta[relay.Constant][53]);
%269 = reshape(%268, newshape=[-1, 768]);
%270 = nn.dense(%269, meta[relay.Constant][54], units=3072);
%271 = add(%270, meta[relay.Constant][55]);
%272 = reshape(%271, newshape=[50, 32, 3072]);
%273 = power(%272, 3f16);
%274 = multiply(%273, 0.044715f16);
%275 = add(%272, %274);
%276 = multiply(%275, 0.797885f16);
%277 = tanh(%276);
%278 = multiply(%272, 0.5f16);
%279 = add(%277, 1f16);
%280 = multiply(%278, %279);
%281 = reshape(%280, newshape=[-1, 3072]);
%282 = nn.dense(%281, meta[relay.Constant][56], units=768);
%283 = add(%282, meta[relay.Constant][57]);
%284 = reshape(%283, newshape=[50, 32, 768]);
%285 = add(%259, %284);
%286 = mean(%285, axis=[-1], keepdims=True);
%287 = subtract(%285, %286);
%288 = power(%287, 2f16);
%289 = mean(%288, axis=[-1], keepdims=True);
%290 = add(%289, 1e-05f16);
%291 = sqrt(%290);
%292 = divide(%287, %291);
%293 = multiply(%292, meta[relay.Constant][58]);
%294 = add(%293, meta[relay.Constant][59]);
%295 = reshape(%294, newshape=[-1, 768]);
%296 = nn.dense(%295, meta[relay.Constant][60], units=2304);
%297 = add(%296, meta[relay.Constant][61]);
%298 = reshape(%297, newshape=[50, 32, 2304]);
%299 = split(%298, indices_or_sections=[768, 1536], axis=2);
%300 = %299.0;
%301 = reshape(%300, newshape=[50, 32, 12, 64]);
%302 = transpose(%301, axes=[0, 2, 1, 3]);
%303 = %299.1;
%304 = reshape(%303, newshape=[50, 32, 12, 64]);
%305 = transpose(%304, axes=[0, 2, 3, 1]);
%306 = reshape(%305, newshape=[-1, 64, 32]);
%307 = reshape(%302, newshape=[-1, 32, 64]);
%308 = transpose(%306, axes=[0, 2, 1]);
%309 = nn.batch_matmul(%307, %308, out_dtype="float16", transpose_b=True);
%310 = reshape(%309, newshape=[50, 12, 32, 32]);
%311 = divide(%310, 8f16);
%312 = multiply(%311, meta[relay.Constant][62]);
%313 = subtract(%312, meta[relay.Constant][63]);
%314 = nn.softmax(%313, axis=3);
%315 = %299.2;
%316 = reshape(%315, newshape=[50, 32, 12, 64]);
%317 = transpose(%316, axes=[0, 2, 1, 3]);
%318 = reshape(%317, newshape=[-1, 32, 64]);
%319 = reshape(%314, newshape=[-1, 32, 32]);
%320 = transpose(%318, axes=[0, 2, 1]);
%321 = nn.batch_matmul(%319, %320, out_dtype="float16", transpose_b=True);
%322 = reshape(%321, newshape=[50, 12, 32, 64]);
%323 = transpose(%322, axes=[0, 2, 1, 3]);
%324 = reshape(%323, newshape=[50, 32, 768]);
%325 = reshape(%324, newshape=[-1, 768]);
%326 = nn.dense(%325, meta[relay.Constant][64], units=768);
%327 = add(%326, meta[relay.Constant][65]);
%328 = reshape(%327, newshape=[50, 32, 768]);
%329 = add(%285, %328);
%330 = mean(%329, axis=[-1], keepdims=True);
%331 = subtract(%329, %330);
%332 = power(%331, 2f16);
%333 = mean(%332, axis=[-1], keepdims=True);
%334 = add(%333, 1e-05f16);
%335 = sqrt(%334);
%336 = divide(%331, %335);
%337 = multiply(%336, meta[relay.Constant][66]);
%338 = add(%337, meta[relay.Constant][67]);
%339 = reshape(%338, newshape=[-1, 768]);
%340 = nn.dense(%339, meta[relay.Constant][68], units=3072);
%341 = add(%340, meta[relay.Constant][69]);
%342 = reshape(%341, newshape=[50, 32, 3072]);
%343 = power(%342, 3f16);
%344 = multiply(%343, 0.044715f16);
%345 = add(%342, %344);
%346 = multiply(%345, 0.797885f16);
%347 = tanh(%346);
%348 = multiply(%342, 0.5f16);
%349 = add(%347, 1f16);
%350 = multiply(%348, %349);
%351 = reshape(%350, newshape=[-1, 3072]);
%352 = nn.dense(%351, meta[relay.Constant][70], units=768);
%353 = add(%352, meta[relay.Constant][71]);
%354 = reshape(%353, newshape=[50, 32, 768]);
%355 = add(%329, %354);
%356 = mean(%355, axis=[-1], keepdims=True);
%357 = subtract(%355, %356);
%358 = power(%357, 2f16);
%359 = mean(%358, axis=[-1], keepdims=True);
%360 = add(%359, 1e-05f16);
%361 = sqrt(%360);
%362 = divide(%357, %361);
%363 = multiply(%362, meta[relay.Constant][72]);
%364 = add(%363, meta[relay.Constant][73]);
%365 = reshape(%364, newshape=[-1, 768]);
%366 = nn.dense(%365, meta[relay.Constant][74], units=2304);
%367 = add(%366, meta[relay.Constant][75]);
%368 = reshape(%367, newshape=[50, 32, 2304]);
%369 = split(%368, indices_or_sections=[768, 1536], axis=2);
%370 = %369.0;
%371 = reshape(%370, newshape=[50, 32, 12, 64]);
%372 = transpose(%371, axes=[0, 2, 1, 3]);
%373 = %369.1;
%374 = reshape(%373, newshape=[50, 32, 12, 64]);
%375 = transpose(%374, axes=[0, 2, 3, 1]);
%376 = reshape(%375, newshape=[-1, 64, 32]);
%377 = reshape(%372, newshape=[-1, 32, 64]);
%378 = transpose(%376, axes=[0, 2, 1]);
%379 = nn.batch_matmul(%377, %378, out_dtype="float16", transpose_b=True);
%380 = reshape(%379, newshape=[50, 12, 32, 32]);
%381 = divide(%380, 8f16);
%382 = multiply(%381, meta[relay.Constant][76]);
%383 = subtract(%382, meta[relay.Constant][77]);
%384 = nn.softmax(%383, axis=3);
%385 = %369.2;
%386 = reshape(%385, newshape=[50, 32, 12, 64]);
%387 = transpose(%386, axes=[0, 2, 1, 3]);
%388 = reshape(%387, newshape=[-1, 32, 64]);
%389 = reshape(%384, newshape=[-1, 32, 32]);
%390 = transpose(%388, axes=[0, 2, 1]);
%391 = nn.batch_matmul(%389, %390, out_dtype="float16", transpose_b=True);
%392 = reshape(%391, newshape=[50, 12, 32, 64]);
%393 = transpose(%392, axes=[0, 2, 1, 3]);
%394 = reshape(%393, newshape=[50, 32, 768]);
%395 = reshape(%394, newshape=[-1, 768]);
%396 = nn.dense(%395, meta[relay.Constant][78], units=768);
%397 = add(%396, meta[relay.Constant][79]);
%398 = reshape(%397, newshape=[50, 32, 768]);
%399 = add(%355, %398);
%400 = mean(%399, axis=[-1], keepdims=True);
%401 = subtract(%399, %400);
%402 = power(%401, 2f16);
%403 = mean(%402, axis=[-1], keepdims=True);
%404 = add(%403, 1e-05f16);
%405 = sqrt(%404);
%406 = divide(%401, %405);
%407 = multiply(%406, meta[relay.Constant][80]);
%408 = add(%407, meta[relay.Constant][81]);
%409 = reshape(%408, newshape=[-1, 768]);
%410 = nn.dense(%409, meta[relay.Constant][82], units=3072);
%411 = add(%410, meta[relay.Constant][83]);
%412 = reshape(%411, newshape=[50, 32, 3072]);
%413 = power(%412, 3f16);
%414 = multiply(%413, 0.044715f16);
%415 = add(%412, %414);
%416 = multiply(%415, 0.797885f16);
%417 = tanh(%416);
%418 = multiply(%412, 0.5f16);
%419 = add(%417, 1f16);
%420 = multiply(%418, %419);
%421 = reshape(%420, newshape=[-1, 3072]);
%422 = nn.dense(%421, meta[relay.Constant][84], units=768);
%423 = add(%422, meta[relay.Constant][85]);
%424 = reshape(%423, newshape=[50, 32, 768]);
%425 = add(%399, %424);
%426 = mean(%425, axis=[-1], keepdims=True);
%427 = subtract(%425, %426);
%428 = power(%427, 2f16);
%429 = mean(%428, axis=[-1], keepdims=True);
%430 = add(%429, 1e-05f16);
%431 = sqrt(%430);
%432 = divide(%427, %431);
%433 = multiply(%432, meta[relay.Constant][86]);
%434 = add(%433, meta[relay.Constant][87]);
%435 = reshape(%434, newshape=[-1, 768]);
%436 = nn.dense(%435, meta[relay.Constant][88], units=2304);
%437 = add(%436, meta[relay.Constant][89]);
%438 = reshape(%437, newshape=[50, 32, 2304]);
%439 = split(%438, indices_or_sections=[768, 1536], axis=2);
%440 = %439.0;
%441 = reshape(%440, newshape=[50, 32, 12, 64]);
%442 = transpose(%441, axes=[0, 2, 1, 3]);
%443 = %439.1;
%444 = reshape(%443, newshape=[50, 32, 12, 64]);
%445 = transpose(%444, axes=[0, 2, 3, 1]);
%446 = reshape(%445, newshape=[-1, 64, 32]);
%447 = reshape(%442, newshape=[-1, 32, 64]);
%448 = transpose(%446, axes=[0, 2, 1]);
%449 = nn.batch_matmul(%447, %448, out_dtype="float16", transpose_b=True);
%450 = reshape(%449, newshape=[50, 12, 32, 32]);
%451 = divide(%450, 8f16);
%452 = multiply(%451, meta[relay.Constant][90]);
%453 = subtract(%452, meta[relay.Constant][91]);
%454 = nn.softmax(%453, axis=3);
%455 = %439.2;
%456 = reshape(%455, newshape=[50, 32, 12, 64]);
%457 = transpose(%456, axes=[0, 2, 1, 3]);
%458 = reshape(%457, newshape=[-1, 32, 64]);
%459 = reshape(%454, newshape=[-1, 32, 32]);
%460 = transpose(%458, axes=[0, 2, 1]);
%461 = nn.batch_matmul(%459, %460, out_dtype="float16", transpose_b=True);
%462 = reshape(%461, newshape=[50, 12, 32, 64]);
%463 = transpose(%462, axes=[0, 2, 1, 3]);
%464 = reshape(%463, newshape=[50, 32, 768]);
%465 = reshape(%464, newshape=[-1, 768]);
%466 = nn.dense(%465, meta[relay.Constant][92], units=768);
%467 = add(%466, meta[relay.Constant][93]);
%468 = reshape(%467, newshape=[50, 32, 768]);
%469 = add(%425, %468);
%470 = mean(%469, axis=[-1], keepdims=True);
%471 = subtract(%469, %470);
%472 = power(%471, 2f16);
%473 = mean(%472, axis=[-1], keepdims=True);
%474 = add(%473, 1e-05f16);
%475 = sqrt(%474);
%476 = divide(%471, %475);
%477 = multiply(%476, meta[relay.Constant][94]);
%478 = add(%477, meta[relay.Constant][95]);
%479 = reshape(%478, newshape=[-1, 768]);
%480 = nn.dense(%479, meta[relay.Constant][96], units=3072);
%481 = add(%480, meta[relay.Constant][97]);
%482 = reshape(%481, newshape=[50, 32, 3072]);
%483 = power(%482, 3f16);
%484 = multiply(%483, 0.044715f16);
%485 = add(%482, %484);
%486 = multiply(%485, 0.797885f16);
%487 = tanh(%486);
%488 = multiply(%482, 0.5f16);
%489 = add(%487, 1f16);
%490 = multiply(%488, %489);
%491 = reshape(%490, newshape=[-1, 3072]);
%492 = nn.dense(%491, meta[relay.Constant][98], units=768);
%493 = add(%492, meta[relay.Constant][99]);
%494 = reshape(%493, newshape=[50, 32, 768]);
%495 = add(%469, %494);
%496 = mean(%495, axis=[-1], keepdims=True);
%497 = subtract(%495, %496);
%498 = power(%497, 2f16);
%499 = mean(%498, axis=[-1], keepdims=True);
%500 = add(%499, 1e-05f16);
%501 = sqrt(%500);
%502 = divide(%497, %501);
%503 = multiply(%502, meta[relay.Constant][100]);
%504 = add(%503, meta[relay.Constant][101]);
%505 = reshape(%504, newshape=[-1, 768]);
%506 = nn.dense(%505, meta[relay.Constant][102], units=2304);
%507 = add(%506, meta[relay.Constant][103]);
%508 = reshape(%507, newshape=[50, 32, 2304]);
%509 = split(%508, indices_or_sections=[768, 1536], axis=2);
%510 = %509.0;
%511 = reshape(%510, newshape=[50, 32, 12, 64]);
%512 = transpose(%511, axes=[0, 2, 1, 3]);
%513 = %509.1;
%514 = reshape(%513, newshape=[50, 32, 12, 64]);
%515 = transpose(%514, axes=[0, 2, 3, 1]);
%516 = reshape(%515, newshape=[-1, 64, 32]);
%517 = reshape(%512, newshape=[-1, 32, 64]);
%518 = transpose(%516, axes=[0, 2, 1]);
%519 = nn.batch_matmul(%517, %518, out_dtype="float16", transpose_b=True);
%520 = reshape(%519, newshape=[50, 12, 32, 32]);
%521 = divide(%520, 8f16);
%522 = multiply(%521, meta[relay.Constant][104]);
%523 = subtract(%522, meta[relay.Constant][105]);
%524 = nn.softmax(%523, axis=3);
%525 = %509.2;
%526 = reshape(%525, newshape=[50, 32, 12, 64]);
%527 = transpose(%526, axes=[0, 2, 1, 3]);
%528 = reshape(%527, newshape=[-1, 32, 64]);
%529 = reshape(%524, newshape=[-1, 32, 32]);
%530 = transpose(%528, axes=[0, 2, 1]);
%531 = nn.batch_matmul(%529, %530, out_dtype="float16", transpose_b=True);
%532 = reshape(%531, newshape=[50, 12, 32, 64]);
%533 = transpose(%532, axes=[0, 2, 1, 3]);
%534 = reshape(%533, newshape=[50, 32, 768]);
%535 = reshape(%534, newshape=[-1, 768]);
%536 = nn.dense(%535, meta[relay.Constant][106], units=768);
%537 = add(%536, meta[relay.Constant][107]);
%538 = reshape(%537, newshape=[50, 32, 768]);
%539 = add(%495, %538);
%540 = mean(%539, axis=[-1], keepdims=True);
%541 = subtract(%539, %540);
%542 = power(%541, 2f16);
%543 = mean(%542, axis=[-1], keepdims=True);
%544 = add(%543, 1e-05f16);
%545 = sqrt(%544);
%546 = divide(%541, %545);
%547 = multiply(%546, meta[relay.Constant][108]);
%548 = add(%547, meta[relay.Constant][109]);
%549 = reshape(%548, newshape=[-1, 768]);
%550 = nn.dense(%549, meta[relay.Constant][110], units=3072);
%551 = add(%550, meta[relay.Constant][111]);
%552 = reshape(%551, newshape=[50, 32, 3072]);
%553 = power(%552, 3f16);
%554 = multiply(%553, 0.044715f16);
%555 = add(%552, %554);
%556 = multiply(%555, 0.797885f16);
%557 = tanh(%556);
%558 = multiply(%552, 0.5f16);
%559 = add(%557, 1f16);
%560 = multiply(%558, %559);
%561 = reshape(%560, newshape=[-1, 3072]);
%562 = nn.dense(%561, meta[relay.Constant][112], units=768);
%563 = add(%562, meta[relay.Constant][113]);
%564 = reshape(%563, newshape=[50, 32, 768]);
%565 = add(%539, %564);
%566 = mean(%565, axis=[-1], keepdims=True);
%567 = subtract(%565, %566);
%568 = power(%567, 2f16);
%569 = mean(%568, axis=[-1], keepdims=True);
%570 = add(%569, 1e-05f16);
%571 = sqrt(%570);
%572 = divide(%567, %571);
%573 = multiply(%572, meta[relay.Constant][114]);
%574 = add(%573, meta[relay.Constant][115]);
%575 = reshape(%574, newshape=[-1, 768]);
%576 = nn.dense(%575, meta[relay.Constant][116], units=2304);
%577 = add(%576, meta[relay.Constant][117]);
%578 = reshape(%577, newshape=[50, 32, 2304]);
%579 = split(%578, indices_or_sections=[768, 1536], axis=2);
%580 = %579.0;
%581 = reshape(%580, newshape=[50, 32, 12, 64]);
%582 = transpose(%581, axes=[0, 2, 1, 3]);
%583 = %579.1;
%584 = reshape(%583, newshape=[50, 32, 12, 64]);
%585 = transpose(%584, axes=[0, 2, 3, 1]);
%586 = reshape(%585, newshape=[-1, 64, 32]);
%587 = reshape(%582, newshape=[-1, 32, 64]);
%588 = transpose(%586, axes=[0, 2, 1]);
%589 = nn.batch_matmul(%587, %588, out_dtype="float16", transpose_b=True);
%590 = reshape(%589, newshape=[50, 12, 32, 32]);
%591 = divide(%590, 8f16);
%592 = multiply(%591, meta[relay.Constant][118]);
%593 = subtract(%592, meta[relay.Constant][119]);
%594 = nn.softmax(%593, axis=3);
%595 = %579.2;
%596 = reshape(%595, newshape=[50, 32, 12, 64]);
%597 = transpose(%596, axes=[0, 2, 1, 3]);
%598 = reshape(%597, newshape=[-1, 32, 64]);
%599 = reshape(%594, newshape=[-1, 32, 32]);
%600 = transpose(%598, axes=[0, 2, 1]);
%601 = nn.batch_matmul(%599, %600, out_dtype="float16", transpose_b=True);
%602 = reshape(%601, newshape=[50, 12, 32, 64]);
%603 = transpose(%602, axes=[0, 2, 1, 3]);
%604 = reshape(%603, newshape=[50, 32, 768]);
%605 = reshape(%604, newshape=[-1, 768]);
%606 = nn.dense(%605, meta[relay.Constant][120], units=768);
%607 = add(%606, meta[relay.Constant][121]);
%608 = reshape(%607, newshape=[50, 32, 768]);
%609 = add(%565, %608);
%610 = mean(%609, axis=[-1], keepdims=True);
%611 = subtract(%609, %610);
%612 = power(%611, 2f16);
%613 = mean(%612, axis=[-1], keepdims=True);
%614 = add(%613, 1e-05f16);
%615 = sqrt(%614);
%616 = divide(%611, %615);
%617 = multiply(%616, meta[relay.Constant][122]);
%618 = add(%617, meta[relay.Constant][123]);
%619 = reshape(%618, newshape=[-1, 768]);
%620 = nn.dense(%619, meta[relay.Constant][124], units=3072);
%621 = add(%620, meta[relay.Constant][125]);
%622 = reshape(%621, newshape=[50, 32, 3072]);
%623 = power(%622, 3f16);
%624 = multiply(%623, 0.044715f16);
%625 = add(%622, %624);
%626 = multiply(%625, 0.797885f16);
%627 = tanh(%626);
%628 = multiply(%622, 0.5f16);
%629 = add(%627, 1f16);
%630 = multiply(%628, %629);
%631 = reshape(%630, newshape=[-1, 3072]);
%632 = nn.dense(%631, meta[relay.Constant][126], units=768);
%633 = add(%632, meta[relay.Constant][127]);
%634 = reshape(%633, newshape=[50, 32, 768]);
%635 = add(%609, %634);
%636 = mean(%635, axis=[-1], keepdims=True);
%637 = subtract(%635, %636);
%638 = power(%637, 2f16);
%639 = mean(%638, axis=[-1], keepdims=True);
%640 = add(%639, 1e-05f16);
%641 = sqrt(%640);
%642 = divide(%637, %641);
%643 = multiply(%642, meta[relay.Constant][128]);
%644 = add(%643, meta[relay.Constant][129]);
%645 = reshape(%644, newshape=[-1, 768]);
%646 = nn.dense(%645, meta[relay.Constant][130], units=2304);
%647 = add(%646, meta[relay.Constant][131]);
%648 = reshape(%647, newshape=[50, 32, 2304]);
%649 = split(%648, indices_or_sections=[768, 1536], axis=2);
%650 = %649.0;
%651 = reshape(%650, newshape=[50, 32, 12, 64]);
%652 = transpose(%651, axes=[0, 2, 1, 3]);
%653 = %649.1;
%654 = reshape(%653, newshape=[50, 32, 12, 64]);
%655 = transpose(%654, axes=[0, 2, 3, 1]);
%656 = reshape(%655, newshape=[-1, 64, 32]);
%657 = reshape(%652, newshape=[-1, 32, 64]);
%658 = transpose(%656, axes=[0, 2, 1]);
%659 = nn.batch_matmul(%657, %658, out_dtype="float16", transpose_b=True);
%660 = reshape(%659, newshape=[50, 12, 32, 32]);
%661 = divide(%660, 8f16);
%662 = multiply(%661, meta[relay.Constant][132]);
%663 = subtract(%662, meta[relay.Constant][133]);
%664 = nn.softmax(%663, axis=3);
%665 = %649.2;
%666 = reshape(%665, newshape=[50, 32, 12, 64]);
%667 = transpose(%666, axes=[0, 2, 1, 3]);
%668 = reshape(%667, newshape=[-1, 32, 64]);
%669 = reshape(%664, newshape=[-1, 32, 32]);
%670 = transpose(%668, axes=[0, 2, 1]);
%671 = nn.batch_matmul(%669, %670, out_dtype="float16", transpose_b=True);
%672 = reshape(%671, newshape=[50, 12, 32, 64]);
%673 = transpose(%672, axes=[0, 2, 1, 3]);
%674 = reshape(%673, newshape=[50, 32, 768]);
%675 = reshape(%674, newshape=[-1, 768]);
%676 = nn.dense(%675, meta[relay.Constant][134], units=768);
%677 = add(%676, meta[relay.Constant][135]);
%678 = reshape(%677, newshape=[50, 32, 768]);
%679 = add(%635, %678);
%680 = mean(%679, axis=[-1], keepdims=True);
%681 = subtract(%679, %680);
%682 = power(%681, 2f16);
%683 = mean(%682, axis=[-1], keepdims=True);
%684 = add(%683, 1e-05f16);
%685 = sqrt(%684);
%686 = divide(%681, %685);
%687 = multiply(%686, meta[relay.Constant][136]);
%688 = add(%687, meta[relay.Constant][137]);
%689 = reshape(%688, newshape=[-1, 768]);
%690 = nn.dense(%689, meta[relay.Constant][138], units=3072);
%691 = add(%690, meta[relay.Constant][139]);
%692 = reshape(%691, newshape=[50, 32, 3072]);
%693 = power(%692, 3f16);
%694 = multiply(%693, 0.044715f16);
%695 = add(%692, %694);
%696 = multiply(%695, 0.797885f16);
%697 = tanh(%696);
%698 = multiply(%692, 0.5f16);
%699 = add(%697, 1f16);
%700 = multiply(%698, %699);
%701 = reshape(%700, newshape=[-1, 3072]);
%702 = nn.dense(%701, meta[relay.Constant][140], units=768);
%703 = add(%702, meta[relay.Constant][141]);
%704 = reshape(%703, newshape=[50, 32, 768]);
%705 = add(%679, %704);
%706 = mean(%705, axis=[-1], keepdims=True);
%707 = subtract(%705, %706);
%708 = power(%707, 2f16);
%709 = mean(%708, axis=[-1], keepdims=True);
%710 = add(%709, 1e-05f16);
%711 = sqrt(%710);
%712 = divide(%707, %711);
%713 = multiply(%712, meta[relay.Constant][142]);
%714 = add(%713, meta[relay.Constant][143]);
%715 = reshape(%714, newshape=[-1, 768]);
%716 = nn.dense(%715, meta[relay.Constant][144], units=2304);
%717 = add(%716, meta[relay.Constant][145]);
%718 = reshape(%717, newshape=[50, 32, 2304]);
%719 = split(%718, indices_or_sections=[768, 1536], axis=2);
%720 = %719.0;
%721 = reshape(%720, newshape=[50, 32, 12, 64]);
%722 = transpose(%721, axes=[0, 2, 1, 3]);
%723 = %719.1;
%724 = reshape(%723, newshape=[50, 32, 12, 64]);
%725 = transpose(%724, axes=[0, 2, 3, 1]);
%726 = reshape(%725, newshape=[-1, 64, 32]);
%727 = reshape(%722, newshape=[-1, 32, 64]);
%728 = transpose(%726, axes=[0, 2, 1]);
%729 = nn.batch_matmul(%727, %728, out_dtype="float16", transpose_b=True);
%730 = reshape(%729, newshape=[50, 12, 32, 32]);
%731 = divide(%730, 8f16);
%732 = multiply(%731, meta[relay.Constant][146]);
%733 = subtract(%732, meta[relay.Constant][147]);
%734 = nn.softmax(%733, axis=3);
%735 = %719.2;
%736 = reshape(%735, newshape=[50, 32, 12, 64]);
%737 = transpose(%736, axes=[0, 2, 1, 3]);
%738 = reshape(%737, newshape=[-1, 32, 64]);
%739 = reshape(%734, newshape=[-1, 32, 32]);
%740 = transpose(%738, axes=[0, 2, 1]);
%741 = nn.batch_matmul(%739, %740, out_dtype="float16", transpose_b=True);
%742 = reshape(%741, newshape=[50, 12, 32, 64]);
%743 = transpose(%742, axes=[0, 2, 1, 3]);
%744 = reshape(%743, newshape=[50, 32, 768]);
%745 = reshape(%744, newshape=[-1, 768]);
%746 = nn.dense(%745, meta[relay.Constant][148], units=768);
%747 = add(%746, meta[relay.Constant][149]);
%748 = reshape(%747, newshape=[50, 32, 768]);
%749 = add(%705, %748);
%750 = mean(%749, axis=[-1], keepdims=True);
%751 = subtract(%749, %750);
%752 = power(%751, 2f16);
%753 = mean(%752, axis=[-1], keepdims=True);
%754 = add(%753, 1e-05f16);
%755 = sqrt(%754);
%756 = divide(%751, %755);
%757 = multiply(%756, meta[relay.Constant][150]);
%758 = add(%757, meta[relay.Constant][151]);
%759 = reshape(%758, newshape=[-1, 768]);
%760 = nn.dense(%759, meta[relay.Constant][152], units=3072);
%761 = add(%760, meta[relay.Constant][153]);
%762 = reshape(%761, newshape=[50, 32, 3072]);
%763 = power(%762, 3f16);
%764 = multiply(%763, 0.044715f16);
%765 = add(%762, %764);
%766 = multiply(%765, 0.797885f16);
%767 = tanh(%766);
%768 = multiply(%762, 0.5f16);
%769 = add(%767, 1f16);
%770 = multiply(%768, %769);
%771 = reshape(%770, newshape=[-1, 3072]);
%772 = nn.dense(%771, meta[relay.Constant][154], units=768);
%773 = add(%772, meta[relay.Constant][155]);
%774 = reshape(%773, newshape=[50, 32, 768]);
%775 = add(%749, %774);
%776 = mean(%775, axis=[-1], keepdims=True);
%777 = subtract(%775, %776);
%778 = power(%777, 2f16);
%779 = mean(%778, axis=[-1], keepdims=True);
%780 = add(%779, 1e-05f16);
%781 = sqrt(%780);
%782 = divide(%777, %781);
%783 = multiply(%782, meta[relay.Constant][156]);
%784 = add(%783, meta[relay.Constant][157]);
%785 = reshape(%784, newshape=[-1, 768]);
%786 = nn.dense(%785, meta[relay.Constant][158], units=2304);
%787 = add(%786, meta[relay.Constant][159]);
%788 = reshape(%787, newshape=[50, 32, 2304]);
%789 = split(%788, indices_or_sections=[768, 1536], axis=2);
%790 = %789.0;
%791 = reshape(%790, newshape=[50, 32, 12, 64]);
%792 = transpose(%791, axes=[0, 2, 1, 3]);
%793 = %789.1;
%794 = reshape(%793, newshape=[50, 32, 12, 64]);
%795 = transpose(%794, axes=[0, 2, 3, 1]);
%796 = reshape(%795, newshape=[-1, 64, 32]);
%797 = reshape(%792, newshape=[-1, 32, 64]);
%798 = transpose(%796, axes=[0, 2, 1]);
%799 = nn.batch_matmul(%797, %798, out_dtype="float16", transpose_b=True);
%800 = reshape(%799, newshape=[50, 12, 32, 32]);
%801 = divide(%800, 8f16);
%802 = multiply(%801, meta[relay.Constant][160]);
%803 = subtract(%802, meta[relay.Constant][161]);
%804 = nn.softmax(%803, axis=3);
%805 = %789.2;
%806 = reshape(%805, newshape=[50, 32, 12, 64]);
%807 = transpose(%806, axes=[0, 2, 1, 3]);
%808 = reshape(%807, newshape=[-1, 32, 64]);
%809 = reshape(%804, newshape=[-1, 32, 32]);
%810 = transpose(%808, axes=[0, 2, 1]);
%811 = nn.batch_matmul(%809, %810, out_dtype="float16", transpose_b=True);
%812 = reshape(%811, newshape=[50, 12, 32, 64]);
%813 = transpose(%812, axes=[0, 2, 1, 3]);
%814 = reshape(%813, newshape=[50, 32, 768]);
%815 = reshape(%814, newshape=[-1, 768]);
%816 = nn.dense(%815, meta[relay.Constant][162], units=768);
%817 = add(%816, meta[relay.Constant][163]);
%818 = reshape(%817, newshape=[50, 32, 768]);
%819 = add(%775, %818);
%820 = mean(%819, axis=[-1], keepdims=True);
%821 = subtract(%819, %820);
%822 = power(%821, 2f16);
%823 = mean(%822, axis=[-1], keepdims=True);
%824 = add(%823, 1e-05f16);
%825 = sqrt(%824);
%826 = divide(%821, %825);
%827 = multiply(%826, meta[relay.Constant][164]);
%828 = add(%827, meta[relay.Constant][165]);
%829 = reshape(%828, newshape=[-1, 768]);
%830 = nn.dense(%829, meta[relay.Constant][166], units=3072);
%831 = add(%830, meta[relay.Constant][167]);
%832 = reshape(%831, newshape=[50, 32, 3072]);
%833 = power(%832, 3f16);
%834 = multiply(%833, 0.044715f16);
%835 = add(%832, %834);
%836 = multiply(%835, 0.797885f16);
%837 = tanh(%836);
%838 = multiply(%832, 0.5f16);
%839 = add(%837, 1f16);
%840 = multiply(%838, %839);
%841 = reshape(%840, newshape=[-1, 3072]);
%842 = nn.dense(%841, meta[relay.Constant][168], units=768);
%843 = add(%842, meta[relay.Constant][169]);
%844 = reshape(%843, newshape=[50, 32, 768]);
%845 = add(%819, %844);
%846 = mean(%845, axis=[-1], keepdims=True);
%847 = subtract(%845, %846);
%848 = power(%847, 2f16);
%849 = mean(%848, axis=[-1], keepdims=True);
%850 = add(%849, 1e-05f16);
%851 = sqrt(%850);
%852 = divide(%847, %851);
%853 = multiply(%852, meta[relay.Constant][170]);
%854 = add(%853, meta[relay.Constant][171]);
%855 = transpose(%24, axes=[0, 2, 1, 3]);
%856 = expand_dims(%855, axis=0);
%857 = expand_dims(%37, axis=0);
%858 = (%856, %857);
%859 = transpose(%94, axes=[0, 2, 1, 3]);
%860 = expand_dims(%859, axis=0);
%861 = expand_dims(%107, axis=0);
%862 = (%860, %861);
%863 = transpose(%164, axes=[0, 2, 1, 3]);
%864 = expand_dims(%863, axis=0);
%865 = expand_dims(%177, axis=0);
%866 = (%864, %865);
%867 = transpose(%234, axes=[0, 2, 1, 3]);
%868 = expand_dims(%867, axis=0);
%869 = expand_dims(%247, axis=0);
%870 = (%868, %869);
%871 = transpose(%304, axes=[0, 2, 1, 3]);
%872 = expand_dims(%871, axis=0);
%873 = expand_dims(%317, axis=0);
%874 = (%872, %873);
%875 = transpose(%374, axes=[0, 2, 1, 3]);
%876 = expand_dims(%875, axis=0);
%877 = expand_dims(%387, axis=0);
%878 = (%876, %877);
%879 = transpose(%444, axes=[0, 2, 1, 3]);
%880 = expand_dims(%879, axis=0);
%881 = expand_dims(%457, axis=0);
%882 = (%880, %881);
%883 = transpose(%514, axes=[0, 2, 1, 3]);
%884 = expand_dims(%883, axis=0);
%885 = expand_dims(%527, axis=0);
%886 = (%884, %885);
%887 = transpose(%584, axes=[0, 2, 1, 3]);
%888 = expand_dims(%887, axis=0);
%889 = expand_dims(%597, axis=0);
%890 = (%888, %889);
%891 = transpose(%654, axes=[0, 2, 1, 3]);
%892 = expand_dims(%891, axis=0);
%893 = expand_dims(%667, axis=0);
%894 = (%892, %893);
%895 = transpose(%724, axes=[0, 2, 1, 3]);
%896 = expand_dims(%895, axis=0);
%897 = expand_dims(%737, axis=0);
%898 = (%896, %897);
%899 = transpose(%794, axes=[0, 2, 1, 3]);
%900 = expand_dims(%899, axis=0);
%901 = expand_dims(%807, axis=0);
%902 = (%900, %901);
%903 = reshape(%854, newshape=[1, 50, 32, 768]);
%904 = concatenate(%858);
%905 = concatenate(%862);
%906 = concatenate(%866);
%907 = concatenate(%870);
%908 = concatenate(%874);
%909 = concatenate(%878);
%910 = concatenate(%882);
%911 = concatenate(%886);
%912 = concatenate(%890);
%913 = concatenate(%894);
%914 = concatenate(%898);
%915 = concatenate(%902);
(%903, %904, %905, %906, %907, %908, %909, %910, %911, %912, %913, %914, %915)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "gpt2_16",
"input_shapes": {"x": [1, 50, 32]},
"input_dtypes": {"x": "int64"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def gpt2_extract_consts(dtype):
return make_consts(
dtype,
[
(768, 768), # 0
(768,), # 1
(768,), # 2
(768,), # 3
(3072, 768), # 4
(3072,), # 5
(1, 32, 768), # 6
],
)
def gpt2_extract():
metatable = {"relay.Constant": gpt2_extract_consts("float32")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1600, 768), float32]) -> Tensor[(50, 32, 3072), float32] {
%46 = nn.dense(%x, meta[relay.Constant][0], units=768);
%47 = add(%46, meta[relay.Constant][1]);
%48 = reshape(%47, newshape=[50, 32, 768]);
%49 = add(meta[relay.Constant][6], %48);
%50 = mean(%49, axis=[-1], keepdims=True);
%51 = subtract(%49, %50);
%52 = power(%51, 2f);
%53 = mean(%52, axis=[-1], keepdims=True);
%54 = add(%53, 1e-05f);
%55 = sqrt(%54);
%56 = divide(%51, %55);
%57 = multiply(%56, meta[relay.Constant][2]);
%58 = add(%57, meta[relay.Constant][3]);
%59 = reshape(%58, newshape=[-1, 768]);
%60 = nn.dense(%59, meta[relay.Constant][4], units=3072);
%61 = add(%60, meta[relay.Constant][5]);
%62 = reshape(%61, newshape=[50, 32, 3072]);
%63 = power(%62, 3f);
%64 = multiply(%63, 0.044715f);
%65 = add(%62, %64);
%66 = multiply(%65, 0.797885f);
%67 = tanh(%66);
%68 = multiply(%62, 0.5f);
%69 = add(%67, 1f);
%70 = multiply(%68, %69);
%70
}
""",
"from_string",
None,
metatable,
)
return {
"input_shapes": {"x": [1600, 768]},
"input_dtypes": {"x": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def gpt2_extract_16():
metatable = {"relay.Constant": gpt2_extract_consts("float16")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1600, 768), float16]) -> Tensor[(50, 32, 3072), float16] {
%46 = nn.dense(%x, meta[relay.Constant][0], units=768);
%47 = add(%46, meta[relay.Constant][1]);
%48 = reshape(%47, newshape=[50, 32, 768]);
%49 = add(meta[relay.Constant][6], %48);
%50 = mean(%49, axis=[-1], keepdims=True);
%51 = subtract(%49, %50);
%52 = power(%51, 2f16);
%53 = mean(%52, axis=[-1], keepdims=True);
%54 = add(%53, 1e-05f16);
%55 = sqrt(%54);
%56 = divide(%51, %55);
%57 = multiply(%56, meta[relay.Constant][2]);
%58 = add(%57, meta[relay.Constant][3]);
%59 = reshape(%58, newshape=[-1, 768]);
%60 = nn.dense(%59, meta[relay.Constant][4], units=3072);
%61 = add(%60, meta[relay.Constant][5]);
%62 = reshape(%61, newshape=[50, 32, 3072]);
%63 = power(%62, 3f16);
%64 = multiply(%63, 0.044715f16);
%65 = add(%62, %64);
%66 = multiply(%65, 0.797885f16);
%67 = tanh(%66);
%68 = multiply(%62, 0.5f16);
%69 = add(%67, 1f16);
%70 = multiply(%68, %69);
%70
}
""",
"from_string",
None,
metatable,
)
return {
"name": "gpt2_extract_16",
"input_shapes": {"x": [1600, 768]},
"input_dtypes": {"x": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def gpt2_16_for_cutlass_extract_consts(dtype):
return make_consts(
"float16",
[
(2304, 768), # 0
(2304,), # 1
(600, 32, 64), # 2
(600, 32, 32), # 3
],
)
def gpt2_16_for_cutlass_extract():
metatable = {"relay.Constant": gpt2_16_for_cutlass_extract_consts("float16")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x0: Tensor[(1600, 768), float16],
%x3: Tensor[(600, 32, 64), float16])
-> (Tensor[(1600, 2304), float16], Tensor[(1200, 32, 32), float16]) {
%0 = nn.dense(%x0, meta[relay.Constant][0], units=2304);
%1 = add(%0, meta[relay.Constant][1]);
%2 = nn.batch_matmul(%x3, meta[relay.Constant][2], out_dtype="float16", transpose_b=True);
%3 = (%2, meta[relay.Constant][3]);
%4 = concatenate(%3);
(%1, %4)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "gpt2_16_for_cutlass_extract",
"input_shapes": {"x0": (1600, 768), "x3": (600, 32, 64)},
"input_dtypes": {"x0": "float16", "x3": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def resnet50_consts(dtype):
return make_consts(
dtype,
[
(3,), # 0
(3,), # 1
(3,), # 2
(3,), # 3
(64, 3, 7, 7), # 4
(64,), # 5
(64,), # 6
(64,), # 7
(64,), # 8
(64,), # 9
(64,), # 10
(64,), # 11
(64,), # 12
(64, 64, 1, 1), # 13
(64,), # 14
(64,), # 15
(64,), # 16
(64,), # 17
(64, 64, 3, 3), # 18
(64,), # 19
(64,), # 20
(64,), # 21
(64,), # 22
(256, 64, 1, 1), # 23
(256, 64, 1, 1), # 24
(256,), # 25
(256,), # 26
(256,), # 27
(256,), # 28
(64, 256, 1, 1), # 29
(64,), # 30
(64,), # 31
(64,), # 32
(64,), # 33
(64, 64, 3, 3), # 34
(64,), # 35
(64,), # 36
(64,), # 37
(64,), # 38
(256, 64, 1, 1), # 39
(256,), # 40
(256,), # 41
(256,), # 42
(256,), # 43
(64, 256, 1, 1), # 44
(64,), # 45
(64,), # 46
(64,), # 47
(64,), # 48
(64, 64, 3, 3), # 49
(64,), # 50
(64,), # 51
(64,), # 52
(64,), # 53
(256, 64, 1, 1), # 54
(256,), # 55
(256,), # 56
(256,), # 57
(256,), # 58
(128, 256, 1, 1), # 59
(128,), # 60
(128,), # 61
(128,), # 62
(128,), # 63
(128, 128, 3, 3), # 64
(128,), # 65
(128,), # 66
(128,), # 67
(128,), # 68
(512, 128, 1, 1), # 69
(512, 256, 1, 1), # 70
(512,), # 71
(512,), # 72
(512,), # 73
(512,), # 74
(128, 512, 1, 1), # 75
(128,), # 76
(128,), # 77
(128,), # 78
(128,), # 79
(128, 128, 3, 3), # 80
(128,), # 81
(128,), # 82
(128,), # 83
(128,), # 84
(512, 128, 1, 1), # 85
(512,), # 86
(512,), # 87
(512,), # 88
(512,), # 89
(128, 512, 1, 1), # 90
(128,), # 91
(128,), # 92
(128,), # 93
(128,), # 94
(128, 128, 3, 3), # 95
(128,), # 96
(128,), # 97
(128,), # 98
(128,), # 99
(512, 128, 1, 1), # 100
(512,), # 101
(512,), # 102
(512,), # 103
(512,), # 104
(128, 512, 1, 1), # 105
(128,), # 106
(128,), # 107
(128,), # 108
(128,), # 109
(128, 128, 3, 3), # 110
(128,), # 111
(128,), # 112
(128,), # 113
(128,), # 114
(512, 128, 1, 1), # 115
(512,), # 116
(512,), # 117
(512,), # 118
(512,), # 119
(256, 512, 1, 1), # 120
(256,), # 121
(256,), # 122
(256,), # 123
(256,), # 124
(256, 256, 3, 3), # 125
(256,), # 126
(256,), # 127
(256,), # 128
(256,), # 129
(1024, 256, 1, 1), # 130
(1024, 512, 1, 1), # 131
(1024,), # 132
(1024,), # 133
(1024,), # 134
(1024,), # 135
(256, 1024, 1, 1), # 136
(256,), # 137
(256,), # 138
(256,), # 139
(256,), # 140
(256, 256, 3, 3), # 141
(256,), # 142
(256,), # 143
(256,), # 144
(256,), # 145
(1024, 256, 1, 1), # 146
(1024,), # 147
(1024,), # 148
(1024,), # 149
(1024,), # 150
(256, 1024, 1, 1), # 151
(256,), # 152
(256,), # 153
(256,), # 154
(256,), # 155
(256, 256, 3, 3), # 156
(256,), # 157
(256,), # 158
(256,), # 159
(256,), # 160
(1024, 256, 1, 1), # 161
(1024,), # 162
(1024,), # 163
(1024,), # 164
(1024,), # 165
(256, 1024, 1, 1), # 166
(256,), # 167
(256,), # 168
(256,), # 169
(256,), # 170
(256, 256, 3, 3), # 171
(256,), # 172
(256,), # 173
(256,), # 174
(256,), # 175
(1024, 256, 1, 1), # 176
(1024,), # 177
(1024,), # 178
(1024,), # 179
(1024,), # 180
(256, 1024, 1, 1), # 181
(256,), # 182
(256,), # 183
(256,), # 184
(256,), # 185
(256, 256, 3, 3), # 186
(256,), # 187
(256,), # 188
(256,), # 189
(256,), # 190
(1024, 256, 1, 1), # 191
(1024,), # 192
(1024,), # 193
(1024,), # 194
(1024,), # 195
(256, 1024, 1, 1), # 196
(256,), # 197
(256,), # 198
(256,), # 199
(256,), # 200
(256, 256, 3, 3), # 201
(256,), # 202
(256,), # 203
(256,), # 204
(256,), # 205
(1024, 256, 1, 1), # 206
(1024,), # 207
(1024,), # 208
(1024,), # 209
(1024,), # 210
(512, 1024, 1, 1), # 211
(512,), # 212
(512,), # 213
(512,), # 214
(512,), # 215
(512, 512, 3, 3), # 216
(512,), # 217
(512,), # 218
(512,), # 219
(512,), # 220
(2048, 512, 1, 1), # 221
(2048, 1024, 1, 1), # 222
(2048,), # 223
(2048,), # 224
(2048,), # 225
(2048,), # 226
(512, 2048, 1, 1), # 227
(512,), # 228
(512,), # 229
(512,), # 230
(512,), # 231
(512, 512, 3, 3), # 232
(512,), # 233
(512,), # 234
(512,), # 235
(512,), # 236
(2048, 512, 1, 1), # 237
(2048,), # 238
(2048,), # 239
(2048,), # 240
(2048,), # 241
(512, 2048, 1, 1), # 242
(512,), # 243
(512,), # 244
(512,), # 245
(512,), # 246
(512, 512, 3, 3), # 247
(512,), # 248
(512,), # 249
(512,), # 250
(512,), # 251
(2048, 512, 1, 1), # 252
(2048,), # 253
(2048,), # 254
(2048,), # 255
(2048,), # 256
(1000, 2048), # 257
(1000,), # 258
],
)
def resnet50():
metatable = {"relay.Constant": resnet50_consts("float32")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1000), float32] {
%0 = nn.batch_norm(%data, meta[relay.Constant][0], meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3]);
%1 = %0.0;
%2 = nn.conv2d(%1, meta[relay.Constant][4], strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]);
%3 = nn.batch_norm(%2, meta[relay.Constant][5], meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8]);
%4 = %3.0;
%5 = nn.relu(%4);
%6 = nn.max_pool2d(%5, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]);
%7 = nn.batch_norm(%6, meta[relay.Constant][9], meta[relay.Constant][10], meta[relay.Constant][11], meta[relay.Constant][12]);
%8 = %7.0;
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][13], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%11 = nn.batch_norm(%10, meta[relay.Constant][14], meta[relay.Constant][15], meta[relay.Constant][16], meta[relay.Constant][17]);
%12 = %11.0;
%13 = nn.relu(%12);
%14 = nn.conv2d(%13, meta[relay.Constant][18], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%15 = nn.batch_norm(%14, meta[relay.Constant][19], meta[relay.Constant][20], meta[relay.Constant][21], meta[relay.Constant][22]);
%16 = %15.0;
%17 = nn.relu(%16);
%18 = nn.conv2d(%17, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%19 = nn.conv2d(%9, meta[relay.Constant][24], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%18, %19);
%21 = nn.batch_norm(%20, meta[relay.Constant][25], meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28]);
%22 = %21.0;
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%25 = nn.batch_norm(%24, meta[relay.Constant][30], meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33]);
%26 = %25.0;
%27 = nn.relu(%26);
%28 = nn.conv2d(%27, meta[relay.Constant][34], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%29 = nn.batch_norm(%28, meta[relay.Constant][35], meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38]);
%30 = %29.0;
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%33 = add(%32, %20);
%34 = nn.batch_norm(%33, meta[relay.Constant][40], meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43]);
%35 = %34.0;
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%38 = nn.batch_norm(%37, meta[relay.Constant][45], meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48]);
%39 = %38.0;
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][49], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%42 = nn.batch_norm(%41, meta[relay.Constant][50], meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53]);
%43 = %42.0;
%44 = nn.relu(%43);
%45 = nn.conv2d(%44, meta[relay.Constant][54], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%46 = add(%45, %33);
%47 = nn.batch_norm(%46, meta[relay.Constant][55], meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58]);
%48 = %47.0;
%49 = nn.relu(%48);
%50 = nn.conv2d(%49, meta[relay.Constant][59], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%51 = nn.batch_norm(%50, meta[relay.Constant][60], meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63]);
%52 = %51.0;
%53 = nn.relu(%52);
%54 = nn.conv2d(%53, meta[relay.Constant][64], strides=[2, 2], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%55 = nn.batch_norm(%54, meta[relay.Constant][65], meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68]);
%56 = %55.0;
%57 = nn.relu(%56);
%58 = nn.conv2d(%57, meta[relay.Constant][69], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%59 = nn.conv2d(%49, meta[relay.Constant][70], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = add(%58, %59);
%61 = nn.batch_norm(%60, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%62 = %61.0;
%63 = nn.relu(%62);
%64 = nn.conv2d(%63, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%65 = nn.batch_norm(%64, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%66 = %65.0;
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][80], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%69 = nn.batch_norm(%68, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%70 = %69.0;
%71 = nn.relu(%70);
%72 = nn.conv2d(%71, meta[relay.Constant][85], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%73 = add(%72, %60);
%74 = nn.batch_norm(%73, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%78 = nn.batch_norm(%77, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][95], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%82 = nn.batch_norm(%81, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%83 = %82.0;
%84 = nn.relu(%83);
%85 = nn.conv2d(%84, meta[relay.Constant][100], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%86 = add(%85, %73);
%87 = nn.batch_norm(%86, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%88 = %87.0;
%89 = nn.relu(%88);
%90 = nn.conv2d(%89, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%91 = nn.batch_norm(%90, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%92 = %91.0;
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][110], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%95 = nn.batch_norm(%94, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%96 = %95.0;
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][115], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%99 = add(%98, %86);
%100 = nn.batch_norm(%99, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%101 = %100.0;
%102 = nn.relu(%101);
%103 = nn.conv2d(%102, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%104 = nn.batch_norm(%103, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%105 = %104.0;
%106 = nn.relu(%105);
%107 = nn.conv2d(%106, meta[relay.Constant][125], strides=[2, 2], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%108 = nn.batch_norm(%107, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%109 = %108.0;
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][130], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%112 = nn.conv2d(%102, meta[relay.Constant][131], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%113 = add(%111, %112);
%114 = nn.batch_norm(%113, meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134], meta[relay.Constant][135]);
%115 = %114.0;
%116 = nn.relu(%115);
%117 = nn.conv2d(%116, meta[relay.Constant][136], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%118 = nn.batch_norm(%117, meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139], meta[relay.Constant][140]);
%119 = %118.0;
%120 = nn.relu(%119);
%121 = nn.conv2d(%120, meta[relay.Constant][141], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%122 = nn.batch_norm(%121, meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144], meta[relay.Constant][145]);
%123 = %122.0;
%124 = nn.relu(%123);
%125 = nn.conv2d(%124, meta[relay.Constant][146], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%126 = add(%125, %113);
%127 = nn.batch_norm(%126, meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149], meta[relay.Constant][150]);
%128 = %127.0;
%129 = nn.relu(%128);
%130 = nn.conv2d(%129, meta[relay.Constant][151], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%131 = nn.batch_norm(%130, meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154], meta[relay.Constant][155]);
%132 = %131.0;
%133 = nn.relu(%132);
%134 = nn.conv2d(%133, meta[relay.Constant][156], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%135 = nn.batch_norm(%134, meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159], meta[relay.Constant][160]);
%136 = %135.0;
%137 = nn.relu(%136);
%138 = nn.conv2d(%137, meta[relay.Constant][161], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%139 = add(%138, %126);
%140 = nn.batch_norm(%139, meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164], meta[relay.Constant][165]);
%141 = %140.0;
%142 = nn.relu(%141);
%143 = nn.conv2d(%142, meta[relay.Constant][166], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169], meta[relay.Constant][170]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][171], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174], meta[relay.Constant][175]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][176], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%152 = add(%151, %139);
%153 = nn.batch_norm(%152, meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179], meta[relay.Constant][180]);
%154 = %153.0;
%155 = nn.relu(%154);
%156 = nn.conv2d(%155, meta[relay.Constant][181], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%157 = nn.batch_norm(%156, meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184], meta[relay.Constant][185]);
%158 = %157.0;
%159 = nn.relu(%158);
%160 = nn.conv2d(%159, meta[relay.Constant][186], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%161 = nn.batch_norm(%160, meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189], meta[relay.Constant][190]);
%162 = %161.0;
%163 = nn.relu(%162);
%164 = nn.conv2d(%163, meta[relay.Constant][191], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%165 = add(%164, %152);
%166 = nn.batch_norm(%165, meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194], meta[relay.Constant][195]);
%167 = %166.0;
%168 = nn.relu(%167);
%169 = nn.conv2d(%168, meta[relay.Constant][196], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%170 = nn.batch_norm(%169, meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199], meta[relay.Constant][200]);
%171 = %170.0;
%172 = nn.relu(%171);
%173 = nn.conv2d(%172, meta[relay.Constant][201], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%174 = nn.batch_norm(%173, meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204], meta[relay.Constant][205]);
%175 = %174.0;
%176 = nn.relu(%175);
%177 = nn.conv2d(%176, meta[relay.Constant][206], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%178 = add(%177, %165);
%179 = nn.batch_norm(%178, meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209], meta[relay.Constant][210]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][211], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%183 = nn.batch_norm(%182, meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214], meta[relay.Constant][215]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][216], strides=[2, 2], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%187 = nn.batch_norm(%186, meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219], meta[relay.Constant][220]);
%188 = %187.0;
%189 = nn.relu(%188);
%190 = nn.conv2d(%189, meta[relay.Constant][221], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%191 = nn.conv2d(%181, meta[relay.Constant][222], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%192 = add(%190, %191);
%193 = nn.batch_norm(%192, meta[relay.Constant][223], meta[relay.Constant][224], meta[relay.Constant][225], meta[relay.Constant][226]);
%194 = %193.0;
%195 = nn.relu(%194);
%196 = nn.conv2d(%195, meta[relay.Constant][227], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%197 = nn.batch_norm(%196, meta[relay.Constant][228], meta[relay.Constant][229], meta[relay.Constant][230], meta[relay.Constant][231]);
%198 = %197.0;
%199 = nn.relu(%198);
%200 = nn.conv2d(%199, meta[relay.Constant][232], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%201 = nn.batch_norm(%200, meta[relay.Constant][233], meta[relay.Constant][234], meta[relay.Constant][235], meta[relay.Constant][236]);
%202 = %201.0;
%203 = nn.relu(%202);
%204 = nn.conv2d(%203, meta[relay.Constant][237], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%205 = add(%204, %192);
%206 = nn.batch_norm(%205, meta[relay.Constant][238], meta[relay.Constant][239], meta[relay.Constant][240], meta[relay.Constant][241]);
%207 = %206.0;
%208 = nn.relu(%207);
%209 = nn.conv2d(%208, meta[relay.Constant][242], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%210 = nn.batch_norm(%209, meta[relay.Constant][243], meta[relay.Constant][244], meta[relay.Constant][245], meta[relay.Constant][246]);
%211 = %210.0;
%212 = nn.relu(%211);
%213 = nn.conv2d(%212, meta[relay.Constant][247], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%214 = nn.batch_norm(%213, meta[relay.Constant][248], meta[relay.Constant][249], meta[relay.Constant][250], meta[relay.Constant][251]);
%215 = %214.0;
%216 = nn.relu(%215);
%217 = nn.conv2d(%216, meta[relay.Constant][252], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%218 = add(%217, %205);
%219 = nn.batch_norm(%218, meta[relay.Constant][253], meta[relay.Constant][254], meta[relay.Constant][255], meta[relay.Constant][256]);
%220 = %219.0;
%221 = nn.relu(%220);
%222 = nn.global_avg_pool2d(%221);
%223 = reshape(%222, newshape=[0, -1]);
%224 = nn.dense(%223, meta[relay.Constant][257], units=1000);
add(%224, meta[relay.Constant][258])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnet50",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def resnet50_16():
metatable = {"relay.Constant": resnet50_consts("float16")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 3, 224, 224), float16]) -> Tensor[(1, 1000), float16] {
%0 = nn.batch_norm(%data, meta[relay.Constant][0], meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3]);
%1 = %0.0;
%2 = nn.conv2d(%1, meta[relay.Constant][4], strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]);
%3 = nn.batch_norm(%2, meta[relay.Constant][5], meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8]);
%4 = %3.0;
%5 = nn.relu(%4);
%6 = nn.max_pool2d(%5, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]);
%7 = nn.batch_norm(%6, meta[relay.Constant][9], meta[relay.Constant][10], meta[relay.Constant][11], meta[relay.Constant][12]);
%8 = %7.0;
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][13], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%11 = nn.batch_norm(%10, meta[relay.Constant][14], meta[relay.Constant][15], meta[relay.Constant][16], meta[relay.Constant][17]);
%12 = %11.0;
%13 = nn.relu(%12);
%14 = nn.conv2d(%13, meta[relay.Constant][18], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%15 = nn.batch_norm(%14, meta[relay.Constant][19], meta[relay.Constant][20], meta[relay.Constant][21], meta[relay.Constant][22]);
%16 = %15.0;
%17 = nn.relu(%16);
%18 = nn.conv2d(%17, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%19 = nn.conv2d(%9, meta[relay.Constant][24], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%18, %19);
%21 = nn.batch_norm(%20, meta[relay.Constant][25], meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28]);
%22 = %21.0;
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%25 = nn.batch_norm(%24, meta[relay.Constant][30], meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33]);
%26 = %25.0;
%27 = nn.relu(%26);
%28 = nn.conv2d(%27, meta[relay.Constant][34], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%29 = nn.batch_norm(%28, meta[relay.Constant][35], meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38]);
%30 = %29.0;
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%33 = add(%32, %20);
%34 = nn.batch_norm(%33, meta[relay.Constant][40], meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43]);
%35 = %34.0;
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%38 = nn.batch_norm(%37, meta[relay.Constant][45], meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48]);
%39 = %38.0;
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][49], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%42 = nn.batch_norm(%41, meta[relay.Constant][50], meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53]);
%43 = %42.0;
%44 = nn.relu(%43);
%45 = nn.conv2d(%44, meta[relay.Constant][54], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%46 = add(%45, %33);
%47 = nn.batch_norm(%46, meta[relay.Constant][55], meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58]);
%48 = %47.0;
%49 = nn.relu(%48);
%50 = nn.conv2d(%49, meta[relay.Constant][59], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%51 = nn.batch_norm(%50, meta[relay.Constant][60], meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63]);
%52 = %51.0;
%53 = nn.relu(%52);
%54 = nn.conv2d(%53, meta[relay.Constant][64], strides=[2, 2], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%55 = nn.batch_norm(%54, meta[relay.Constant][65], meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68]);
%56 = %55.0;
%57 = nn.relu(%56);
%58 = nn.conv2d(%57, meta[relay.Constant][69], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%59 = nn.conv2d(%49, meta[relay.Constant][70], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = add(%58, %59);
%61 = nn.batch_norm(%60, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%62 = %61.0;
%63 = nn.relu(%62);
%64 = nn.conv2d(%63, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%65 = nn.batch_norm(%64, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%66 = %65.0;
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][80], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%69 = nn.batch_norm(%68, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%70 = %69.0;
%71 = nn.relu(%70);
%72 = nn.conv2d(%71, meta[relay.Constant][85], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%73 = add(%72, %60);
%74 = nn.batch_norm(%73, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%78 = nn.batch_norm(%77, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][95], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%82 = nn.batch_norm(%81, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%83 = %82.0;
%84 = nn.relu(%83);
%85 = nn.conv2d(%84, meta[relay.Constant][100], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%86 = add(%85, %73);
%87 = nn.batch_norm(%86, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%88 = %87.0;
%89 = nn.relu(%88);
%90 = nn.conv2d(%89, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%91 = nn.batch_norm(%90, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%92 = %91.0;
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][110], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%95 = nn.batch_norm(%94, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%96 = %95.0;
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][115], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%99 = add(%98, %86);
%100 = nn.batch_norm(%99, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%101 = %100.0;
%102 = nn.relu(%101);
%103 = nn.conv2d(%102, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%104 = nn.batch_norm(%103, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%105 = %104.0;
%106 = nn.relu(%105);
%107 = nn.conv2d(%106, meta[relay.Constant][125], strides=[2, 2], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%108 = nn.batch_norm(%107, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%109 = %108.0;
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][130], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%112 = nn.conv2d(%102, meta[relay.Constant][131], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%113 = add(%111, %112);
%114 = nn.batch_norm(%113, meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134], meta[relay.Constant][135]);
%115 = %114.0;
%116 = nn.relu(%115);
%117 = nn.conv2d(%116, meta[relay.Constant][136], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%118 = nn.batch_norm(%117, meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139], meta[relay.Constant][140]);
%119 = %118.0;
%120 = nn.relu(%119);
%121 = nn.conv2d(%120, meta[relay.Constant][141], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%122 = nn.batch_norm(%121, meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144], meta[relay.Constant][145]);
%123 = %122.0;
%124 = nn.relu(%123);
%125 = nn.conv2d(%124, meta[relay.Constant][146], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%126 = add(%125, %113);
%127 = nn.batch_norm(%126, meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149], meta[relay.Constant][150]);
%128 = %127.0;
%129 = nn.relu(%128);
%130 = nn.conv2d(%129, meta[relay.Constant][151], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%131 = nn.batch_norm(%130, meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154], meta[relay.Constant][155]);
%132 = %131.0;
%133 = nn.relu(%132);
%134 = nn.conv2d(%133, meta[relay.Constant][156], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%135 = nn.batch_norm(%134, meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159], meta[relay.Constant][160]);
%136 = %135.0;
%137 = nn.relu(%136);
%138 = nn.conv2d(%137, meta[relay.Constant][161], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%139 = add(%138, %126);
%140 = nn.batch_norm(%139, meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164], meta[relay.Constant][165]);
%141 = %140.0;
%142 = nn.relu(%141);
%143 = nn.conv2d(%142, meta[relay.Constant][166], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169], meta[relay.Constant][170]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][171], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174], meta[relay.Constant][175]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][176], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%152 = add(%151, %139);
%153 = nn.batch_norm(%152, meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179], meta[relay.Constant][180]);
%154 = %153.0;
%155 = nn.relu(%154);
%156 = nn.conv2d(%155, meta[relay.Constant][181], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%157 = nn.batch_norm(%156, meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184], meta[relay.Constant][185]);
%158 = %157.0;
%159 = nn.relu(%158);
%160 = nn.conv2d(%159, meta[relay.Constant][186], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%161 = nn.batch_norm(%160, meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189], meta[relay.Constant][190]);
%162 = %161.0;
%163 = nn.relu(%162);
%164 = nn.conv2d(%163, meta[relay.Constant][191], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%165 = add(%164, %152);
%166 = nn.batch_norm(%165, meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194], meta[relay.Constant][195]);
%167 = %166.0;
%168 = nn.relu(%167);
%169 = nn.conv2d(%168, meta[relay.Constant][196], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%170 = nn.batch_norm(%169, meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199], meta[relay.Constant][200]);
%171 = %170.0;
%172 = nn.relu(%171);
%173 = nn.conv2d(%172, meta[relay.Constant][201], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%174 = nn.batch_norm(%173, meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204], meta[relay.Constant][205]);
%175 = %174.0;
%176 = nn.relu(%175);
%177 = nn.conv2d(%176, meta[relay.Constant][206], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%178 = add(%177, %165);
%179 = nn.batch_norm(%178, meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209], meta[relay.Constant][210]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][211], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%183 = nn.batch_norm(%182, meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214], meta[relay.Constant][215]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][216], strides=[2, 2], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%187 = nn.batch_norm(%186, meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219], meta[relay.Constant][220]);
%188 = %187.0;
%189 = nn.relu(%188);
%190 = nn.conv2d(%189, meta[relay.Constant][221], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%191 = nn.conv2d(%181, meta[relay.Constant][222], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%192 = add(%190, %191);
%193 = nn.batch_norm(%192, meta[relay.Constant][223], meta[relay.Constant][224], meta[relay.Constant][225], meta[relay.Constant][226]);
%194 = %193.0;
%195 = nn.relu(%194);
%196 = nn.conv2d(%195, meta[relay.Constant][227], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%197 = nn.batch_norm(%196, meta[relay.Constant][228], meta[relay.Constant][229], meta[relay.Constant][230], meta[relay.Constant][231]);
%198 = %197.0;
%199 = nn.relu(%198);
%200 = nn.conv2d(%199, meta[relay.Constant][232], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%201 = nn.batch_norm(%200, meta[relay.Constant][233], meta[relay.Constant][234], meta[relay.Constant][235], meta[relay.Constant][236]);
%202 = %201.0;
%203 = nn.relu(%202);
%204 = nn.conv2d(%203, meta[relay.Constant][237], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%205 = add(%204, %192);
%206 = nn.batch_norm(%205, meta[relay.Constant][238], meta[relay.Constant][239], meta[relay.Constant][240], meta[relay.Constant][241]);
%207 = %206.0;
%208 = nn.relu(%207);
%209 = nn.conv2d(%208, meta[relay.Constant][242], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%210 = nn.batch_norm(%209, meta[relay.Constant][243], meta[relay.Constant][244], meta[relay.Constant][245], meta[relay.Constant][246]);
%211 = %210.0;
%212 = nn.relu(%211);
%213 = nn.conv2d(%212, meta[relay.Constant][247], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%214 = nn.batch_norm(%213, meta[relay.Constant][248], meta[relay.Constant][249], meta[relay.Constant][250], meta[relay.Constant][251]);
%215 = %214.0;
%216 = nn.relu(%215);
%217 = nn.conv2d(%216, meta[relay.Constant][252], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%218 = add(%217, %205);
%219 = nn.batch_norm(%218, meta[relay.Constant][253], meta[relay.Constant][254], meta[relay.Constant][255], meta[relay.Constant][256]);
%220 = %219.0;
%221 = nn.relu(%220);
%222 = nn.global_avg_pool2d(%221);
%223 = reshape(%222, newshape=[0, -1]);
%224 = nn.dense(%223, meta[relay.Constant][257], units=1000);
add(%224, meta[relay.Constant][258])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnet50_16",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def mobilenet_consts(dtype):
return make_consts(
dtype,
[
(32, 3, 3, 3), # 0
(32,), # 1
(32,), # 2
(32,), # 3
(32,), # 4
(32, 32, 1, 1), # 5
(32,), # 6
(32,), # 7
(32,), # 8
(32,), # 9
(32, 1, 3, 3), # 10
(32,), # 11
(32,), # 12
(32,), # 13
(32,), # 14
(16, 32, 1, 1), # 15
(16,), # 16
(16,), # 17
(16,), # 18
(16,), # 19
(96, 16, 1, 1), # 20
(96,), # 21
(96,), # 22
(96,), # 23
(96,), # 24
(96, 1, 3, 3), # 25
(96,), # 26
(96,), # 27
(96,), # 28
(96,), # 29
(24, 96, 1, 1), # 30
(24,), # 31
(24,), # 32
(24,), # 33
(24,), # 34
(144, 24, 1, 1), # 35
(144,), # 36
(144,), # 37
(144,), # 38
(144,), # 39
(144, 1, 3, 3), # 40
(144,), # 41
(144,), # 42
(144,), # 43
(144,), # 44
(24, 144, 1, 1), # 45
(24,), # 46
(24,), # 47
(24,), # 48
(24,), # 49
(144, 24, 1, 1), # 50
(144,), # 51
(144,), # 52
(144,), # 53
(144,), # 54
(144, 1, 3, 3), # 55
(144,), # 56
(144,), # 57
(144,), # 58
(144,), # 59
(32, 144, 1, 1), # 60
(32,), # 61
(32,), # 62
(32,), # 63
(32,), # 64
(192, 32, 1, 1), # 65
(192,), # 66
(192,), # 67
(192,), # 68
(192,), # 69
(192, 1, 3, 3), # 70
(192,), # 71
(192,), # 72
(192,), # 73
(192,), # 74
(32, 192, 1, 1), # 75
(32,), # 76
(32,), # 77
(32,), # 78
(32,), # 79
(192, 32, 1, 1), # 80
(192,), # 81
(192,), # 82
(192,), # 83
(192,), # 84
(192, 1, 3, 3), # 85
(192,), # 86
(192,), # 87
(192,), # 88
(192,), # 89
(32, 192, 1, 1), # 90
(32,), # 91
(32,), # 92
(32,), # 93
(32,), # 94
(192, 32, 1, 1), # 95
(192,), # 96
(192,), # 97
(192,), # 98
(192,), # 99
(192, 1, 3, 3), # 100
(192,), # 101
(192,), # 102
(192,), # 103
(192,), # 104
(64, 192, 1, 1), # 105
(64,), # 106
(64,), # 107
(64,), # 108
(64,), # 109
(384, 64, 1, 1), # 110
(384,), # 111
(384,), # 112
(384,), # 113
(384,), # 114
(384, 1, 3, 3), # 115
(384,), # 116
(384,), # 117
(384,), # 118
(384,), # 119
(64, 384, 1, 1), # 120
(64,), # 121
(64,), # 122
(64,), # 123
(64,), # 124
(384, 64, 1, 1), # 125
(384,), # 126
(384,), # 127
(384,), # 128
(384,), # 129
(384, 1, 3, 3), # 130
(384,), # 131
(384,), # 132
(384,), # 133
(384,), # 134
(64, 384, 1, 1), # 135
(64,), # 136
(64,), # 137
(64,), # 138
(64,), # 139
(384, 64, 1, 1), # 140
(384,), # 141
(384,), # 142
(384,), # 143
(384,), # 144
(384, 1, 3, 3), # 145
(384,), # 146
(384,), # 147
(384,), # 148
(384,), # 149
(64, 384, 1, 1), # 150
(64,), # 151
(64,), # 152
(64,), # 153
(64,), # 154
(384, 64, 1, 1), # 155
(384,), # 156
(384,), # 157
(384,), # 158
(384,), # 159
(384, 1, 3, 3), # 160
(384,), # 161
(384,), # 162
(384,), # 163
(384,), # 164
(96, 384, 1, 1), # 165
(96,), # 166
(96,), # 167
(96,), # 168
(96,), # 169
(576, 96, 1, 1), # 170
(576,), # 171
(576,), # 172
(576,), # 173
(576,), # 174
(576, 1, 3, 3), # 175
(576,), # 176
(576,), # 177
(576,), # 178
(576,), # 179
(96, 576, 1, 1), # 180
(96,), # 181
(96,), # 182
(96,), # 183
(96,), # 184
(576, 96, 1, 1), # 185
(576,), # 186
(576,), # 187
(576,), # 188
(576,), # 189
(576, 1, 3, 3), # 190
(576,), # 191
(576,), # 192
(576,), # 193
(576,), # 194
(96, 576, 1, 1), # 195
(96,), # 196
(96,), # 197
(96,), # 198
(96,), # 199
(576, 96, 1, 1), # 200
(576,), # 201
(576,), # 202
(576,), # 203
(576,), # 204
(576, 1, 3, 3), # 205
(576,), # 206
(576,), # 207
(576,), # 208
(576,), # 209
(160, 576, 1, 1), # 210
(160,), # 211
(160,), # 212
(160,), # 213
(160,), # 214
(960, 160, 1, 1), # 215
(960,), # 216
(960,), # 217
(960,), # 218
(960,), # 219
(960, 1, 3, 3), # 220
(960,), # 221
(960,), # 222
(960,), # 223
(960,), # 224
(160, 960, 1, 1), # 225
(160,), # 226
(160,), # 227
(160,), # 228
(160,), # 229
(960, 160, 1, 1), # 230
(960,), # 231
(960,), # 232
(960,), # 233
(960,), # 234
(960, 1, 3, 3), # 235
(960,), # 236
(960,), # 237
(960,), # 238
(960,), # 239
(160, 960, 1, 1), # 240
(160,), # 241
(160,), # 242
(160,), # 243
(160,), # 244
(960, 160, 1, 1), # 245
(960,), # 246
(960,), # 247
(960,), # 248
(960,), # 249
(960, 1, 3, 3), # 250
(960,), # 251
(960,), # 252
(960,), # 253
(960,), # 254
(320, 960, 1, 1), # 255
(320,), # 256
(320,), # 257
(320,), # 258
(320,), # 259
(1280, 320, 1, 1), # 260
(1280,), # 261
(1280,), # 262
(1280,), # 263
(1280,), # 264
(1000, 1280, 1, 1), # 265
],
)
def mobilenet():
metatable = {"relay.Constant": mobilenet_consts("float32")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1000), float32] {
%0 = nn.conv2d(%data, meta[relay.Constant][0], strides=[2, 2], padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]);
%1 = nn.batch_norm(%0, meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3], meta[relay.Constant][4]);
%2 = %1.0;
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][5], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%5 = nn.batch_norm(%4, meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8], meta[relay.Constant][9]);
%6 = %5.0;
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][10], padding=[1, 1, 1, 1], groups=32, channels=32, kernel_size=[3, 3]);
%9 = nn.batch_norm(%8, meta[relay.Constant][11], meta[relay.Constant][12], meta[relay.Constant][13], meta[relay.Constant][14]);
%10 = %9.0;
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][15], padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]);
%13 = nn.batch_norm(%12, meta[relay.Constant][16], meta[relay.Constant][17], meta[relay.Constant][18], meta[relay.Constant][19]);
%14 = %13.0;
%15 = nn.conv2d(%14, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%16 = nn.batch_norm(%15, meta[relay.Constant][21], meta[relay.Constant][22], meta[relay.Constant][23], meta[relay.Constant][24]);
%17 = %16.0;
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][25], strides=[2, 2], padding=[1, 1, 1, 1], groups=96, channels=96, kernel_size=[3, 3]);
%20 = nn.batch_norm(%19, meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28], meta[relay.Constant][29]);
%21 = %20.0;
%22 = nn.relu(%21);
%23 = nn.conv2d(%22, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%24 = nn.batch_norm(%23, meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33], meta[relay.Constant][34]);
%25 = %24.0;
%26 = nn.conv2d(%25, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%27 = nn.batch_norm(%26, meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38], meta[relay.Constant][39]);
%28 = %27.0;
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%31 = nn.batch_norm(%30, meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43], meta[relay.Constant][44]);
%32 = %31.0;
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][45], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%35 = nn.batch_norm(%34, meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48], meta[relay.Constant][49]);
%36 = %35.0;
%37 = add(%36, %25);
%38 = nn.conv2d(%37, meta[relay.Constant][50], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%39 = nn.batch_norm(%38, meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53], meta[relay.Constant][54]);
%40 = %39.0;
%41 = nn.relu(%40);
%42 = nn.conv2d(%41, meta[relay.Constant][55], strides=[2, 2], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%43 = nn.batch_norm(%42, meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58], meta[relay.Constant][59]);
%44 = %43.0;
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][60], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%47 = nn.batch_norm(%46, meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63], meta[relay.Constant][64]);
%48 = %47.0;
%49 = nn.conv2d(%48, meta[relay.Constant][65], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%50 = nn.batch_norm(%49, meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68], meta[relay.Constant][69]);
%51 = %50.0;
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][70], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%54 = nn.batch_norm(%53, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%55 = %54.0;
%56 = nn.relu(%55);
%57 = nn.conv2d(%56, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%58 = nn.batch_norm(%57, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%59 = %58.0;
%60 = add(%59, %48);
%61 = nn.conv2d(%60, meta[relay.Constant][80], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%62 = nn.batch_norm(%61, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%63 = %62.0;
%64 = nn.relu(%63);
%65 = nn.conv2d(%64, meta[relay.Constant][85], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%66 = nn.batch_norm(%65, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%67 = %66.0;
%68 = nn.relu(%67);
%69 = nn.conv2d(%68, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%70 = nn.batch_norm(%69, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%71 = %70.0;
%72 = add(%71, %60);
%73 = nn.conv2d(%72, meta[relay.Constant][95], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%74 = nn.batch_norm(%73, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][100], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%78 = nn.batch_norm(%77, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%82 = nn.batch_norm(%81, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%83 = %82.0;
%84 = nn.conv2d(%83, meta[relay.Constant][110], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%85 = nn.batch_norm(%84, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%86 = %85.0;
%87 = nn.relu(%86);
%88 = nn.conv2d(%87, meta[relay.Constant][115], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%89 = nn.batch_norm(%88, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%90 = %89.0;
%91 = nn.relu(%90);
%92 = nn.conv2d(%91, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%93 = nn.batch_norm(%92, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%94 = %93.0;
%95 = add(%94, %83);
%96 = nn.conv2d(%95, meta[relay.Constant][125], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%97 = nn.batch_norm(%96, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%98 = %97.0;
%99 = nn.relu(%98);
%100 = nn.conv2d(%99, meta[relay.Constant][130], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%101 = nn.batch_norm(%100, meta[relay.Constant][131], meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134]);
%102 = %101.0;
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][135], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%105 = nn.batch_norm(%104, meta[relay.Constant][136], meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139]);
%106 = %105.0;
%107 = add(%106, %95);
%108 = nn.conv2d(%107, meta[relay.Constant][140], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%109 = nn.batch_norm(%108, meta[relay.Constant][141], meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144]);
%110 = %109.0;
%111 = nn.relu(%110);
%112 = nn.conv2d(%111, meta[relay.Constant][145], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%113 = nn.batch_norm(%112, meta[relay.Constant][146], meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149]);
%114 = %113.0;
%115 = nn.relu(%114);
%116 = nn.conv2d(%115, meta[relay.Constant][150], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%117 = nn.batch_norm(%116, meta[relay.Constant][151], meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154]);
%118 = %117.0;
%119 = add(%118, %107);
%120 = nn.conv2d(%119, meta[relay.Constant][155], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%121 = nn.batch_norm(%120, meta[relay.Constant][156], meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159]);
%122 = %121.0;
%123 = nn.relu(%122);
%124 = nn.conv2d(%123, meta[relay.Constant][160], strides=[2, 2], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%125 = nn.batch_norm(%124, meta[relay.Constant][161], meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164]);
%126 = %125.0;
%127 = nn.relu(%126);
%128 = nn.conv2d(%127, meta[relay.Constant][165], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%129 = nn.batch_norm(%128, meta[relay.Constant][166], meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169]);
%130 = %129.0;
%131 = nn.conv2d(%130, meta[relay.Constant][170], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%132 = nn.batch_norm(%131, meta[relay.Constant][171], meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174]);
%133 = %132.0;
%134 = nn.relu(%133);
%135 = nn.conv2d(%134, meta[relay.Constant][175], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%136 = nn.batch_norm(%135, meta[relay.Constant][176], meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179]);
%137 = %136.0;
%138 = nn.relu(%137);
%139 = nn.conv2d(%138, meta[relay.Constant][180], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%140 = nn.batch_norm(%139, meta[relay.Constant][181], meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184]);
%141 = %140.0;
%142 = add(%141, %130);
%143 = nn.conv2d(%142, meta[relay.Constant][185], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][186], meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][190], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][191], meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][195], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%152 = nn.batch_norm(%151, meta[relay.Constant][196], meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199]);
%153 = %152.0;
%154 = add(%153, %142);
%155 = nn.conv2d(%154, meta[relay.Constant][200], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%156 = nn.batch_norm(%155, meta[relay.Constant][201], meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204]);
%157 = %156.0;
%158 = nn.relu(%157);
%159 = nn.conv2d(%158, meta[relay.Constant][205], strides=[2, 2], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%160 = nn.batch_norm(%159, meta[relay.Constant][206], meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209]);
%161 = %160.0;
%162 = nn.relu(%161);
%163 = nn.conv2d(%162, meta[relay.Constant][210], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%164 = nn.batch_norm(%163, meta[relay.Constant][211], meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214]);
%165 = %164.0;
%166 = nn.conv2d(%165, meta[relay.Constant][215], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%167 = nn.batch_norm(%166, meta[relay.Constant][216], meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219]);
%168 = %167.0;
%169 = nn.relu(%168);
%170 = nn.conv2d(%169, meta[relay.Constant][220], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%171 = nn.batch_norm(%170, meta[relay.Constant][221], meta[relay.Constant][222], meta[relay.Constant][223], meta[relay.Constant][224]);
%172 = %171.0;
%173 = nn.relu(%172);
%174 = nn.conv2d(%173, meta[relay.Constant][225], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%175 = nn.batch_norm(%174, meta[relay.Constant][226], meta[relay.Constant][227], meta[relay.Constant][228], meta[relay.Constant][229]);
%176 = %175.0;
%177 = add(%176, %165);
%178 = nn.conv2d(%177, meta[relay.Constant][230], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%179 = nn.batch_norm(%178, meta[relay.Constant][231], meta[relay.Constant][232], meta[relay.Constant][233], meta[relay.Constant][234]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][235], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%183 = nn.batch_norm(%182, meta[relay.Constant][236], meta[relay.Constant][237], meta[relay.Constant][238], meta[relay.Constant][239]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][240], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%187 = nn.batch_norm(%186, meta[relay.Constant][241], meta[relay.Constant][242], meta[relay.Constant][243], meta[relay.Constant][244]);
%188 = %187.0;
%189 = add(%188, %177);
%190 = nn.conv2d(%189, meta[relay.Constant][245], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%191 = nn.batch_norm(%190, meta[relay.Constant][246], meta[relay.Constant][247], meta[relay.Constant][248], meta[relay.Constant][249]);
%192 = %191.0;
%193 = nn.relu(%192);
%194 = nn.conv2d(%193, meta[relay.Constant][250], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%195 = nn.batch_norm(%194, meta[relay.Constant][251], meta[relay.Constant][252], meta[relay.Constant][253], meta[relay.Constant][254]);
%196 = %195.0;
%197 = nn.relu(%196);
%198 = nn.conv2d(%197, meta[relay.Constant][255], padding=[0, 0, 0, 0], channels=320, kernel_size=[1, 1]);
%199 = nn.batch_norm(%198, meta[relay.Constant][256], meta[relay.Constant][257], meta[relay.Constant][258], meta[relay.Constant][259]);
%200 = %199.0;
%201 = nn.conv2d(%200, meta[relay.Constant][260], padding=[0, 0, 0, 0], channels=1280, kernel_size=[1, 1]);
%202 = nn.batch_norm(%201, meta[relay.Constant][261], meta[relay.Constant][262], meta[relay.Constant][263], meta[relay.Constant][264]);
%203 = %202.0;
%204 = nn.relu(%203);
%205 = nn.global_avg_pool2d(%204);
%206 = nn.conv2d(%205, meta[relay.Constant][265], padding=[0, 0, 0, 0], channels=1000, kernel_size=[1, 1]);
reshape(%206, newshape=[0, -1])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "mobilenet",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def mobilenet_16():
metatable = {"relay.Constant": mobilenet_consts("float16")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 3, 224, 224), float16]) -> Tensor[(1, 1000), float16] {
%0 = nn.conv2d(%data, meta[relay.Constant][0], strides=[2, 2], padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]);
%1 = nn.batch_norm(%0, meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3], meta[relay.Constant][4]);
%2 = %1.0;
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][5], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%5 = nn.batch_norm(%4, meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8], meta[relay.Constant][9]);
%6 = %5.0;
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][10], padding=[1, 1, 1, 1], groups=32, channels=32, kernel_size=[3, 3]);
%9 = nn.batch_norm(%8, meta[relay.Constant][11], meta[relay.Constant][12], meta[relay.Constant][13], meta[relay.Constant][14]);
%10 = %9.0;
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][15], padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]);
%13 = nn.batch_norm(%12, meta[relay.Constant][16], meta[relay.Constant][17], meta[relay.Constant][18], meta[relay.Constant][19]);
%14 = %13.0;
%15 = nn.conv2d(%14, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%16 = nn.batch_norm(%15, meta[relay.Constant][21], meta[relay.Constant][22], meta[relay.Constant][23], meta[relay.Constant][24]);
%17 = %16.0;
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][25], strides=[2, 2], padding=[1, 1, 1, 1], groups=96, channels=96, kernel_size=[3, 3]);
%20 = nn.batch_norm(%19, meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28], meta[relay.Constant][29]);
%21 = %20.0;
%22 = nn.relu(%21);
%23 = nn.conv2d(%22, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%24 = nn.batch_norm(%23, meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33], meta[relay.Constant][34]);
%25 = %24.0;
%26 = nn.conv2d(%25, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%27 = nn.batch_norm(%26, meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38], meta[relay.Constant][39]);
%28 = %27.0;
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%31 = nn.batch_norm(%30, meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43], meta[relay.Constant][44]);
%32 = %31.0;
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][45], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%35 = nn.batch_norm(%34, meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48], meta[relay.Constant][49]);
%36 = %35.0;
%37 = add(%36, %25);
%38 = nn.conv2d(%37, meta[relay.Constant][50], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%39 = nn.batch_norm(%38, meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53], meta[relay.Constant][54]);
%40 = %39.0;
%41 = nn.relu(%40);
%42 = nn.conv2d(%41, meta[relay.Constant][55], strides=[2, 2], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%43 = nn.batch_norm(%42, meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58], meta[relay.Constant][59]);
%44 = %43.0;
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][60], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%47 = nn.batch_norm(%46, meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63], meta[relay.Constant][64]);
%48 = %47.0;
%49 = nn.conv2d(%48, meta[relay.Constant][65], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%50 = nn.batch_norm(%49, meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68], meta[relay.Constant][69]);
%51 = %50.0;
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][70], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%54 = nn.batch_norm(%53, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%55 = %54.0;
%56 = nn.relu(%55);
%57 = nn.conv2d(%56, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%58 = nn.batch_norm(%57, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%59 = %58.0;
%60 = add(%59, %48);
%61 = nn.conv2d(%60, meta[relay.Constant][80], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%62 = nn.batch_norm(%61, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%63 = %62.0;
%64 = nn.relu(%63);
%65 = nn.conv2d(%64, meta[relay.Constant][85], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%66 = nn.batch_norm(%65, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%67 = %66.0;
%68 = nn.relu(%67);
%69 = nn.conv2d(%68, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%70 = nn.batch_norm(%69, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%71 = %70.0;
%72 = add(%71, %60);
%73 = nn.conv2d(%72, meta[relay.Constant][95], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%74 = nn.batch_norm(%73, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][100], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%78 = nn.batch_norm(%77, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%82 = nn.batch_norm(%81, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%83 = %82.0;
%84 = nn.conv2d(%83, meta[relay.Constant][110], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%85 = nn.batch_norm(%84, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%86 = %85.0;
%87 = nn.relu(%86);
%88 = nn.conv2d(%87, meta[relay.Constant][115], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%89 = nn.batch_norm(%88, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%90 = %89.0;
%91 = nn.relu(%90);
%92 = nn.conv2d(%91, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%93 = nn.batch_norm(%92, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%94 = %93.0;
%95 = add(%94, %83);
%96 = nn.conv2d(%95, meta[relay.Constant][125], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%97 = nn.batch_norm(%96, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%98 = %97.0;
%99 = nn.relu(%98);
%100 = nn.conv2d(%99, meta[relay.Constant][130], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%101 = nn.batch_norm(%100, meta[relay.Constant][131], meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134]);
%102 = %101.0;
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][135], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%105 = nn.batch_norm(%104, meta[relay.Constant][136], meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139]);
%106 = %105.0;
%107 = add(%106, %95);
%108 = nn.conv2d(%107, meta[relay.Constant][140], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%109 = nn.batch_norm(%108, meta[relay.Constant][141], meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144]);
%110 = %109.0;
%111 = nn.relu(%110);
%112 = nn.conv2d(%111, meta[relay.Constant][145], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%113 = nn.batch_norm(%112, meta[relay.Constant][146], meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149]);
%114 = %113.0;
%115 = nn.relu(%114);
%116 = nn.conv2d(%115, meta[relay.Constant][150], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%117 = nn.batch_norm(%116, meta[relay.Constant][151], meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154]);
%118 = %117.0;
%119 = add(%118, %107);
%120 = nn.conv2d(%119, meta[relay.Constant][155], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%121 = nn.batch_norm(%120, meta[relay.Constant][156], meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159]);
%122 = %121.0;
%123 = nn.relu(%122);
%124 = nn.conv2d(%123, meta[relay.Constant][160], strides=[2, 2], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%125 = nn.batch_norm(%124, meta[relay.Constant][161], meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164]);
%126 = %125.0;
%127 = nn.relu(%126);
%128 = nn.conv2d(%127, meta[relay.Constant][165], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%129 = nn.batch_norm(%128, meta[relay.Constant][166], meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169]);
%130 = %129.0;
%131 = nn.conv2d(%130, meta[relay.Constant][170], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%132 = nn.batch_norm(%131, meta[relay.Constant][171], meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174]);
%133 = %132.0;
%134 = nn.relu(%133);
%135 = nn.conv2d(%134, meta[relay.Constant][175], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%136 = nn.batch_norm(%135, meta[relay.Constant][176], meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179]);
%137 = %136.0;
%138 = nn.relu(%137);
%139 = nn.conv2d(%138, meta[relay.Constant][180], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%140 = nn.batch_norm(%139, meta[relay.Constant][181], meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184]);
%141 = %140.0;
%142 = add(%141, %130);
%143 = nn.conv2d(%142, meta[relay.Constant][185], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][186], meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][190], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][191], meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][195], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%152 = nn.batch_norm(%151, meta[relay.Constant][196], meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199]);
%153 = %152.0;
%154 = add(%153, %142);
%155 = nn.conv2d(%154, meta[relay.Constant][200], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%156 = nn.batch_norm(%155, meta[relay.Constant][201], meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204]);
%157 = %156.0;
%158 = nn.relu(%157);
%159 = nn.conv2d(%158, meta[relay.Constant][205], strides=[2, 2], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%160 = nn.batch_norm(%159, meta[relay.Constant][206], meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209]);
%161 = %160.0;
%162 = nn.relu(%161);
%163 = nn.conv2d(%162, meta[relay.Constant][210], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%164 = nn.batch_norm(%163, meta[relay.Constant][211], meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214]);
%165 = %164.0;
%166 = nn.conv2d(%165, meta[relay.Constant][215], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%167 = nn.batch_norm(%166, meta[relay.Constant][216], meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219]);
%168 = %167.0;
%169 = nn.relu(%168);
%170 = nn.conv2d(%169, meta[relay.Constant][220], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%171 = nn.batch_norm(%170, meta[relay.Constant][221], meta[relay.Constant][222], meta[relay.Constant][223], meta[relay.Constant][224]);
%172 = %171.0;
%173 = nn.relu(%172);
%174 = nn.conv2d(%173, meta[relay.Constant][225], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%175 = nn.batch_norm(%174, meta[relay.Constant][226], meta[relay.Constant][227], meta[relay.Constant][228], meta[relay.Constant][229]);
%176 = %175.0;
%177 = add(%176, %165);
%178 = nn.conv2d(%177, meta[relay.Constant][230], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%179 = nn.batch_norm(%178, meta[relay.Constant][231], meta[relay.Constant][232], meta[relay.Constant][233], meta[relay.Constant][234]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][235], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%183 = nn.batch_norm(%182, meta[relay.Constant][236], meta[relay.Constant][237], meta[relay.Constant][238], meta[relay.Constant][239]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][240], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%187 = nn.batch_norm(%186, meta[relay.Constant][241], meta[relay.Constant][242], meta[relay.Constant][243], meta[relay.Constant][244]);
%188 = %187.0;
%189 = add(%188, %177);
%190 = nn.conv2d(%189, meta[relay.Constant][245], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%191 = nn.batch_norm(%190, meta[relay.Constant][246], meta[relay.Constant][247], meta[relay.Constant][248], meta[relay.Constant][249]);
%192 = %191.0;
%193 = nn.relu(%192);
%194 = nn.conv2d(%193, meta[relay.Constant][250], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%195 = nn.batch_norm(%194, meta[relay.Constant][251], meta[relay.Constant][252], meta[relay.Constant][253], meta[relay.Constant][254]);
%196 = %195.0;
%197 = nn.relu(%196);
%198 = nn.conv2d(%197, meta[relay.Constant][255], padding=[0, 0, 0, 0], channels=320, kernel_size=[1, 1]);
%199 = nn.batch_norm(%198, meta[relay.Constant][256], meta[relay.Constant][257], meta[relay.Constant][258], meta[relay.Constant][259]);
%200 = %199.0;
%201 = nn.conv2d(%200, meta[relay.Constant][260], padding=[0, 0, 0, 0], channels=1280, kernel_size=[1, 1]);
%202 = nn.batch_norm(%201, meta[relay.Constant][261], meta[relay.Constant][262], meta[relay.Constant][263], meta[relay.Constant][264]);
%203 = %202.0;
%204 = nn.relu(%203);
%205 = nn.global_avg_pool2d(%204);
%206 = nn.conv2d(%205, meta[relay.Constant][265], padding=[0, 0, 0, 0], channels=1000, kernel_size=[1, 1]);
reshape(%206, newshape=[0, -1])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "mobilenet_16",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def batch_norm_extract():
consts = make_consts(
"float32",
[
(32,), # 0
(32,), # 1
(32,), # 2
(32,), # 3
],
)
metatable = {"relay.Constant": consts}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%FunctionVar_0: Tensor[(1, 32, 112, 112), float32]) -> Tensor[(1, 32, 112, 112), float32] {
%3 = nn.batch_norm(%FunctionVar_0, meta[relay.Constant][0], meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3]);
%3.0
}
""",
"from_string",
None,
metatable,
)
return {
"name": "batch_norm_extract",
"input_shapes": {"FunctionVar_0": [1, 32, 112, 112]},
"input_dtypes": {"FunctionVar_0": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def resnext50_32x4d_consts(dtype):
return make_consts(
dtype,
[
(128, 64, 1, 1), # 0
(128, 4, 3, 3), # 1
(256, 128, 1, 1), # 2
(256, 64, 1, 1), # 3
(128, 256, 1, 1), # 4
(128, 4, 3, 3), # 5
(256, 128, 1, 1), # 6
(128, 256, 1, 1), # 7
(128, 4, 3, 3), # 8
(256, 128, 1, 1), # 9
(256, 256, 1, 1), # 10
(256, 8, 3, 3), # 11
(512, 256, 1, 1), # 12
(512, 256, 1, 1), # 13
(256, 512, 1, 1), # 14
(256, 8, 3, 3), # 15
(512, 256, 1, 1), # 16
(256, 512, 1, 1), # 17
(256, 8, 3, 3), # 18
(512, 256, 1, 1), # 19
(256, 512, 1, 1), # 20
(256, 8, 3, 3), # 21
(512, 256, 1, 1), # 22
(512, 512, 1, 1), # 23
(512, 16, 3, 3), # 24
(1024, 512, 1, 1), # 25
(1024, 512, 1, 1), # 26
(512, 1024, 1, 1), # 27
(512, 16, 3, 3), # 28
(1024, 512, 1, 1), # 29
(512, 1024, 1, 1), # 30
(512, 16, 3, 3), # 31
(1024, 512, 1, 1), # 32
(512, 1024, 1, 1), # 33
(512, 16, 3, 3), # 34
(1024, 512, 1, 1), # 35
(512, 1024, 1, 1), # 36
(512, 16, 3, 3), # 37
(1024, 512, 1, 1), # 38
(512, 1024, 1, 1), # 39
(512, 16, 3, 3), # 40
(1024, 512, 1, 1), # 41
(1024, 1024, 1, 1), # 42
(1024, 32, 3, 3), # 43
(2048, 1024, 1, 1), # 44
(2048, 1024, 1, 1), # 45
(1024, 2048, 1, 1), # 46
(1024, 32, 3, 3), # 47
(2048, 1024, 1, 1), # 48
(1024, 2048, 1, 1), # 49
(1024, 32, 3, 3), # 50
(2048, 1024, 1, 1), # 51
],
)
def resnext50_32x4d():
metatable = {"relay.Constant": resnext50_32x4d_consts("float32")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 64, 56, 56), float32]) {
%0 = nn.conv2d(%x, meta[relay.Constant][0], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%1 = nn.relu(%0);
%2 = nn.conv2d(%1, meta[relay.Constant][1], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][2], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%5 = nn.conv2d(%x, meta[relay.Constant][3], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%6 = add(%4, %5);
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][4], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][5], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][6], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%13 = add(%12, %7);
%14 = nn.relu(%13);
%15 = nn.conv2d(%14, meta[relay.Constant][7], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%16 = nn.relu(%15);
%17 = nn.conv2d(%16, meta[relay.Constant][8], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][9], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%19, %14);
%21 = nn.relu(%20);
%22 = nn.conv2d(%21, meta[relay.Constant][10], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][11], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%25 = nn.relu(%24);
%26 = nn.conv2d(%25, meta[relay.Constant][12], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%27 = nn.conv2d(%21, meta[relay.Constant][13], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%28 = add(%26, %27);
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][14], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][15], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][16], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%35 = add(%34, %29);
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][17], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%38 = nn.relu(%37);
%39 = nn.conv2d(%38, meta[relay.Constant][18], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][19], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%42 = add(%41, %36);
%43 = nn.relu(%42);
%44 = nn.conv2d(%43, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][21], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%47 = nn.relu(%46);
%48 = nn.conv2d(%47, meta[relay.Constant][22], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%49 = add(%48, %43);
%50 = nn.relu(%49);
%51 = nn.conv2d(%50, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][24], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%54 = nn.relu(%53);
%55 = nn.conv2d(%54, meta[relay.Constant][25], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%56 = nn.conv2d(%50, meta[relay.Constant][26], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%57 = add(%55, %56);
%58 = nn.relu(%57);
%59 = nn.conv2d(%58, meta[relay.Constant][27], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = nn.relu(%59);
%61 = nn.conv2d(%60, meta[relay.Constant][28], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%62 = nn.relu(%61);
%63 = nn.conv2d(%62, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%64 = add(%63, %58);
%65 = nn.relu(%64);
%66 = nn.conv2d(%65, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][31], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%69 = nn.relu(%68);
%70 = nn.conv2d(%69, meta[relay.Constant][32], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%71 = add(%70, %65);
%72 = nn.relu(%71);
%73 = nn.conv2d(%72, meta[relay.Constant][33], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%74 = nn.relu(%73);
%75 = nn.conv2d(%74, meta[relay.Constant][34], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%78 = add(%77, %72);
%79 = nn.relu(%78);
%80 = nn.conv2d(%79, meta[relay.Constant][36], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%81 = nn.relu(%80);
%82 = nn.conv2d(%81, meta[relay.Constant][37], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%83 = nn.relu(%82);
%84 = nn.conv2d(%83, meta[relay.Constant][38], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%85 = add(%84, %79);
%86 = nn.relu(%85);
%87 = nn.conv2d(%86, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%88 = nn.relu(%87);
%89 = nn.conv2d(%88, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%90 = nn.relu(%89);
%91 = nn.conv2d(%90, meta[relay.Constant][41], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%92 = add(%91, %86);
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][42], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%95 = nn.relu(%94);
%96 = nn.conv2d(%95, meta[relay.Constant][43], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%99 = nn.conv2d(%93, meta[relay.Constant][45], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%100 = add(%98, %99);
%101 = nn.relu(%100);
%102 = nn.conv2d(%101, meta[relay.Constant][46], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][47], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%105 = nn.relu(%104);
%106 = nn.conv2d(%105, meta[relay.Constant][48], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%107 = add(%106, %101);
%108 = nn.relu(%107);
%109 = nn.conv2d(%108, meta[relay.Constant][49], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][50], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%112 = nn.relu(%111);
%113 = nn.conv2d(%112, meta[relay.Constant][51], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%114 = add(%113, %108);
nn.relu(%114)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnext50_32x4d",
"input_shapes": {"x": [1, 64, 56, 56]},
"input_dtypes": {"x": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def resnext50_32x4d_16():
metatable = {"relay.Constant": resnext50_32x4d_consts("float16")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 64, 56, 56), float16]) {
%0 = nn.conv2d(%x, meta[relay.Constant][0], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%1 = nn.relu(%0);
%2 = nn.conv2d(%1, meta[relay.Constant][1], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][2], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%5 = nn.conv2d(%x, meta[relay.Constant][3], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%6 = add(%4, %5);
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][4], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][5], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][6], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%13 = add(%12, %7);
%14 = nn.relu(%13);
%15 = nn.conv2d(%14, meta[relay.Constant][7], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%16 = nn.relu(%15);
%17 = nn.conv2d(%16, meta[relay.Constant][8], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][9], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%19, %14);
%21 = nn.relu(%20);
%22 = nn.conv2d(%21, meta[relay.Constant][10], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][11], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%25 = nn.relu(%24);
%26 = nn.conv2d(%25, meta[relay.Constant][12], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%27 = nn.conv2d(%21, meta[relay.Constant][13], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%28 = add(%26, %27);
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][14], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][15], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][16], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%35 = add(%34, %29);
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][17], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%38 = nn.relu(%37);
%39 = nn.conv2d(%38, meta[relay.Constant][18], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][19], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%42 = add(%41, %36);
%43 = nn.relu(%42);
%44 = nn.conv2d(%43, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][21], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%47 = nn.relu(%46);
%48 = nn.conv2d(%47, meta[relay.Constant][22], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%49 = add(%48, %43);
%50 = nn.relu(%49);
%51 = nn.conv2d(%50, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][24], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%54 = nn.relu(%53);
%55 = nn.conv2d(%54, meta[relay.Constant][25], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%56 = nn.conv2d(%50, meta[relay.Constant][26], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%57 = add(%55, %56);
%58 = nn.relu(%57);
%59 = nn.conv2d(%58, meta[relay.Constant][27], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = nn.relu(%59);
%61 = nn.conv2d(%60, meta[relay.Constant][28], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%62 = nn.relu(%61);
%63 = nn.conv2d(%62, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%64 = add(%63, %58);
%65 = nn.relu(%64);
%66 = nn.conv2d(%65, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][31], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%69 = nn.relu(%68);
%70 = nn.conv2d(%69, meta[relay.Constant][32], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%71 = add(%70, %65);
%72 = nn.relu(%71);
%73 = nn.conv2d(%72, meta[relay.Constant][33], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%74 = nn.relu(%73);
%75 = nn.conv2d(%74, meta[relay.Constant][34], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%78 = add(%77, %72);
%79 = nn.relu(%78);
%80 = nn.conv2d(%79, meta[relay.Constant][36], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%81 = nn.relu(%80);
%82 = nn.conv2d(%81, meta[relay.Constant][37], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%83 = nn.relu(%82);
%84 = nn.conv2d(%83, meta[relay.Constant][38], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%85 = add(%84, %79);
%86 = nn.relu(%85);
%87 = nn.conv2d(%86, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%88 = nn.relu(%87);
%89 = nn.conv2d(%88, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%90 = nn.relu(%89);
%91 = nn.conv2d(%90, meta[relay.Constant][41], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%92 = add(%91, %86);
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][42], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%95 = nn.relu(%94);
%96 = nn.conv2d(%95, meta[relay.Constant][43], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%99 = nn.conv2d(%93, meta[relay.Constant][45], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%100 = add(%98, %99);
%101 = nn.relu(%100);
%102 = nn.conv2d(%101, meta[relay.Constant][46], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][47], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%105 = nn.relu(%104);
%106 = nn.conv2d(%105, meta[relay.Constant][48], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%107 = add(%106, %101);
%108 = nn.relu(%107);
%109 = nn.conv2d(%108, meta[relay.Constant][49], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][50], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%112 = nn.relu(%111);
%113 = nn.conv2d(%112, meta[relay.Constant][51], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%114 = add(%113, %108);
nn.relu(%114)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnext50_32x4d_16",
"input_shapes": {"x": [1, 64, 56, 56]},
"input_dtypes": {"x": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def describe_onnx(name, filename):
"""Returns the description of the ONNX model at filename, which can be passed to from_onnx to actually load
the model. Note that ? (ie unknown) shape dimensions must be manually changed to concrete dimensions
which are consistent with the overall model."""
onnx_model = onnx.load(MODEL_PREFIX + filename)
input_shapes = {}
input_dtypes = {}
initializer_names = [n.name for n in onnx_model.graph.initializer]
for input_info in onnx_model.graph.input:
if input_info.name not in initializer_names:
_, shape, dtype, _ = tvm.relay.frontend.onnx.get_info(input_info)
if dtype is None:
raise ValueError(f"Unknown dtype on input '{input_info.name}' is not supported.")
input_shapes.update({input_info.name: shape})
input_dtypes.update({input_info.name: dtype})
print(
f"{{'name': '{name}', 'filename': '{filename}', 'input_shapes': {input_shapes}, 'input_dtypes': {input_dtypes}, 'main_dtype': 'float32'}}"
)
def from_onnx(model):
logging.info("-------------------- BEGIN ONNX IMPORT --------------------")
filename = MODEL_PREFIX + model["filename"]
logging.info(f"Loading ONNX model from {filename}")
onnx_model = onnx.load(filename)
logging.info(f"Loaded model from {filename}")
mod, params = tvm.relay.frontend.from_onnx(
onnx_model, model["input_shapes"], freeze_params=True
)
mod = tvm.relay.transform.InferType()(mod)
logging.info("-------------------- END ONNX IMPORT --------------------")
logging.info(f"Imported model:\n{mod}")
logging.info(f"Params:\n{params}")
return {
"name": model["name"],
"input_shapes": model["input_shapes"],
"input_dtypes": model["input_dtypes"],
"mod": mod,
"params": params,
"main_dtype": model["main_dtype"],
}
def to_onnx(model):
logging.info("-------------------- BEGIN ONNX EXPORT --------------------")
short_filename = model["name"] + ".onnx"
filename = MODEL_PREFIX + short_filename
logging.info(f"Saving ONNX model to {filename}")
params = model["params"]
if params is None:
params = {}
tvm.contrib.target.onnx.to_onnx(model["mod"], params, model["name"], path=filename)
logging.info("-------------------- END ONNX EXPORT --------------------")
return {
"name": model["name"],
"filename": short_filename,
"input_shapes": model["input_shapes"],
"input_dtypes": model["input_dtypes"],
"main_dtype": model["main_dtype"],
}
| 215,685 | 49.288179 | 146 | py |
tvm | tvm-main/tests/python/relay/collage/demo_collage_partitioner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Compares Collage with various other baselines."""
# CAUTION: Requires some changes in python/tvm/autotvm/task/dispatcher.py
# so that AutoTVM tuning records can be cached between runs and between
# models. See https://github.com/mbs-octoml/mbs-tvm/tree/mbs-collage-hacks.
import tvm
import logging
import tempfile
import os
import shutil
import menangerie
# The following are necessary to force global functions or pattern tables to be registered
from tvm.relay.op.contrib.cutlass import partition_for_cutlass
from tvm.contrib.cutlass import num_cutlass_partitions
from tvm.relay.op.contrib.cublas import partition_for_cublas
from tvm.relay.op.contrib.cudnn import partition_for_cudnn
logging.basicConfig(level=logging.INFO)
########### Configuration ###########
###
### Rename to match your hardware, eg ..._vt100...
###
TUNING_LOG = "/home/mbs/collage_autotvm_rtx3070.tuninglog"
###
### If true, runs final model under nvprof
###
PROFILE = True
###
### If true, run all models
###
ALL_MODELS = False
###
### If true, run all configurations
###
ALL_CONFIGS = False
###
### How aggressively to look for candidates?
###
TVM_MAX_DEPTH = 8
BYOC_MAX_DEPTH = 8
###
### AutoTVM tuning parameters.
###
AUTOTVM_NUM_TRIALS = 2000
AUTOTVM_EARLY_STOPPING = 600
TIMEOUT = 10
MEASURE_NUMBER = tvm.relay.collage.MEASURE_NUMBER
MEASURE_REPEAT = tvm.relay.collage.MEASURE_REPEAT
WARMUP_MIN_REPEAT_MS = tvm.relay.collage.WARMUP_MIN_REPEAT_MS
HOST = tvm.target.Target("llvm")
CUDA = tvm.target.Target("cuda", HOST)
########### Runtime ###########
# Code to run a model. The actual call to 'run' is appended at compile time.
# We invoke the model as a sub-process so that we can wrap profiling tools around it.
runner_template = f"""
import tvm
import tvm.runtime.vm
import numpy as np
import logging
logging.basicConfig(level=logging.INFO)
MEASURE_NUMBER = {MEASURE_NUMBER}
MEASURE_REPEAT = {MEASURE_REPEAT}
WARMUP_MIN_REPEAT_MS = {WARMUP_MIN_REPEAT_MS}
def arg_for(shape, dtype, device):
return tvm.nd.array(
np.random.rand(*shape).astype(dtype), device=device)
def vm_estimate_seconds(device, vm, args):
vm.benchmark(device, repeat=1, number=1, min_repeat_ms=WARMUP_MIN_REPEAT_MS, **args)
return vm.benchmark(device, repeat=MEASURE_REPEAT, number=MEASURE_NUMBER, min_repeat_ms=0,
**args)
def run(label, name, device, lib_path, code_path, input_shapes, input_dtypes):
logging.info(f"Loading compiled code for {{name}} generated by {{label}} from {{lib_path}} and {{code_path}}...")
loaded_lib = tvm.runtime.load_module(lib_path)
loaded_code = bytearray(open(code_path, "rb").read())
loaded_exe = tvm.runtime.vm.Executable.load_exec(loaded_code, loaded_lib)
vm = tvm.runtime.vm.VirtualMachine(loaded_exe, device)
args = {{
input_name: arg_for(input_shapes[input_name], input_dtypes[input_name], device)
for input_name in input_shapes.keys()
}}
logging.info(f"Benchmarking for {{name}} generated by {{label}}...")
profile = vm_estimate_seconds(device, vm, args)
logging.info(f"Benchmarked for {{name}} generated by {{label}}: {{profile}}")
logging.info(f"RESULT: {{label}} | {{name}} | {{profile.median * 1e3}}ms")
if __name__ == "__main__":
"""
########### AutoTVM tuning helpers ###########
def extract_autotvm_tasks(mod, target):
"""Returns TVM kernels to tune for mod and target."""
return tvm.autotvm.task.extract_from_program(mod, target=target, params=None)
def optional_tuning_records(log_filename):
"""Returns existing tuning records, if any."""
if log_filename == "" or not os.path.exists(log_filename):
return tvm.autotvm.task.FallbackContext()
else:
return tvm.autotvm.task.ApplyHistoryBest(log_filename)
def is_already_tuned(task, log_filename):
"""Returns True if we already have a tuning record for task in turning logs in log_filename"""
if not os.path.exists(log_filename):
return False
dispatch_context = tvm.autotvm.task.ApplyHistoryBest(log_filename)
return dispatch_context.contains(task.target, task.workload)
def tune_autotvm_tasks(tasks, log_filename):
"""Appends to log_filename the best strategies for tasks"""
if len(tasks) == 0:
return
measure_option = tvm.autotvm.measure_option(
builder=tvm.autotvm.LocalBuilder(timeout=TIMEOUT),
runner=tvm.autotvm.LocalRunner(
number=MEASURE_NUMBER, repeat=MEASURE_REPEAT, timeout=TIMEOUT, min_repeat_ms=0
),
)
logging.info(
f"Using autotvm tuning for {len(tasks)} tasks with {AUTOTVM_NUM_TRIALS} trials, logging to {log_filename}"
)
# create tmp log file, starting with contents from existing log file
tmp_log_filename = log_filename + ".tmp"
if os.path.exists(tmp_log_filename):
os.remove(tmp_log_filename)
if os.path.exists(log_filename):
logging.info(f"Copying existing log {log_filename} to {tmp_log_filename}")
shutil.copy(log_filename, tmp_log_filename)
for i, task in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
logging.info(f"Considering task {task.name} {prefix}")
if is_already_tuned(task, tmp_log_filename):
logging.info(f"Re-using existing record for {task.name}")
continue
logging.info(f"Using autotvm to tune {task.name}")
tuner_obj = tvm.autotvm.tuner.XGBTuner(task, loss_type="reg")
if os.path.exists(tmp_log_filename):
tuner_obj.load_history(tvm.autotvm.record.load_from_file(tmp_log_filename))
# do tuning
n_trial = min(AUTOTVM_NUM_TRIALS, len(task.config_space))
tuner_obj.tune(
n_trial=n_trial,
early_stopping=AUTOTVM_EARLY_STOPPING,
measure_option=measure_option,
callbacks=[
tvm.autotvm.callback.progress_bar(n_trial, prefix=prefix),
tvm.autotvm.callback.log_to_file(tmp_log_filename),
],
)
# pick best records and copy back to main log file
tvm.autotvm.record.pick_best(tmp_log_filename, log_filename)
os.remove(tmp_log_filename)
logging.info("Done with autotvm tuning")
def autotvm_tune_module(mod, target, log_filename):
if log_filename == "":
logging.info("Not tuning with autotvm since disabled")
return
# Extract and tune any TVM kernels. BYOC partitions will have no tasks extracted.
logging.info("Extracting tasks from overall module")
tasks = extract_autotvm_tasks(mod, target)
logging.info(f"Auto-tuning {len(tasks)} tasks from overall module")
tune_autotvm_tasks(tasks, log_filename)
########### Drivers ###########
def compile_and_benchmark(label, model, targets, dev, tmp_dir):
"""Compile model for target and run it with profiling."""
logging.info(f"Compiling {model['name']} using {label} with {targets}...")
exe = tvm.relay.vm.compile(model["mod"], target=targets, params=model["params"])
lib_path = os.path.join(tmp_dir, "lib.so")
code_path = os.path.join(tmp_dir, "code.ro")
code, lib = exe.save()
logging.info(f"Saving VM code to {code_path}...")
with open(code_path, "wb") as fo:
fo.write(code)
logging.info(f"Exporting library to {lib_path}...")
lib.export_library(lib_path, workspace_dir=tmp_dir, cc="nvcc")
runner = f"{runner_template} run('{label}', '{model['name']}', tvm.device({dev.device_type}), '{lib_path}', '{code_path}', {model['input_shapes']}, {model['input_dtypes']})\n"
runner_path = os.path.join(tmp_dir, "runner.py")
logging.info(f"Saving runner to {runner_path}...")
with open(runner_path, "w") as fo:
fo.write(runner)
logging.info(f"Invoking runner...")
if PROFILE:
profile_path = os.path.join(tmp_dir, "profile.txt")
os.system(f"nsys nvprof -o {profile_path} python3 {runner_path}")
else:
os.system(f"python3 {runner_path}")
def collage(model):
"""Run the Collage partitioner for a set of CUDA-related targets and profile the result"""
logging.info(f"collage | {model['name']}")
logging.info("-------------- BEGIN ORIGINAL --------------")
logging.info(model["mod"])
logging.info("-------------- END ORIGINAL ----------------")
autotvm_tune_module(model["mod"], CUDA, TUNING_LOG)
with optional_tuning_records(TUNING_LOG):
targets = []
targets.append(CUDA)
use_fp16 = model["main_dtype"] == "float16"
targets.append(
tvm.target.Target(f"tensorrt -use_implicit_batch=False -use_fp16={use_fp16}", HOST)
)
tmp_dir = tempfile.mkdtemp()
targets.append(tvm.target.Target(f"cutlass -tmp_dir={tmp_dir}", HOST))
targets.append(tvm.target.Target("cublas", HOST))
targets.append(tvm.target.Target("cudnn", HOST))
config = {
"relay.collage.tvm_max_depth": TVM_MAX_DEPTH,
"relay.collage.byoc_max_depth": BYOC_MAX_DEPTH,
"relay.collage.byoc_fusion_style": [
"cutlass.NoFusion",
"cublas.NoFusion",
"cudnn.NoFusion",
"tensorrt.TVMFusion",
],
}
logging.info(f"Using PassContext(config={config}")
ctxt = tvm.transform.PassContext(config=config)
config = tvm.target.make_compilation_config(ctxt, targets)
with ctxt:
mod = model["mod"]
mod = tvm.relay.transform.CapturePostDfsIndexInSpans()(mod)
logging.info("-------------- BEGIN INDEXED --------------")
logging.info(mod)
logging.info("-------------- END INDEXED ----------------")
mod = tvm.relay.transform.CollagePartition(config)(mod)
partitioned_model = model.copy()
partitioned_model["mod"] = mod
logging.info("-------------- BEGIN PARTITIONED --------------")
logging.info(partitioned_model["mod"])
logging.info("-------------- END PARTITIONED ----------------")
dev = tvm.device(CUDA.get_target_device_type())
compile_and_benchmark("collage", partitioned_model, targets, dev, tmp_dir)
def just_tensorrt(model):
"""Run partition_for_tensorrt, complete the compilation with TVM, and profile the result."""
logging.info(f"just_tensorrt | {model['name']}")
logging.info("-------------- BEGIN ORIGINAL --------------")
logging.info(model["mod"])
logging.info("-------------- END ORIGINAL ----------------")
tmp_dir = tempfile.mkdtemp()
autotvm_tune_module(model["mod"], CUDA, TUNING_LOG)
with optional_tuning_records(TUNING_LOG):
logging.info("Partitioning for TensorRT...")
use_fp16 = model["main_dtype"] == "float16"
trt_target = tvm.target.Target(
f"tensorrt -use_implicit_batch=False -use_fp16={use_fp16}", HOST
)
mod = tvm.relay.op.contrib.partition_for_tensorrt(
mod=model["mod"], params=model["params"], target=trt_target
)
partitioned_model = model.copy()
partitioned_model["mod"] = mod
logging.info("-------------- BEGIN PARTITIONED --------------")
logging.info(partitioned_model["mod"])
logging.info("-------------- END PARTITIONED ----------------")
targets = []
targets.append(CUDA)
targets.append(trt_target)
dev = tvm.device(CUDA.get_target_device_type())
compile_and_benchmark("just_tensorrt", partitioned_model, targets, dev, tmp_dir)
def just_cutlass(model):
"""Run partition_for_cutlass, complete the compilation with TVM, and profile the result."""
logging.info(f"just_cutlass | {model['name']}")
logging.info("-------------- BEGIN ORIGINAL --------------")
logging.info(model["mod"])
logging.info("-------------- END ORIGINAL ----------------")
tmp_dir = tempfile.mkdtemp()
autotvm_tune_module(model["mod"], CUDA, TUNING_LOG)
with optional_tuning_records(TUNING_LOG):
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
logging.info("Partitioning for CUTLASS...")
mod = tvm.relay.op.contrib.partition_for_cutlass(model["mod"], model["params"])
partitioned_model = model.copy()
partitioned_model["mod"] = mod
logging.info("-------------- BEGIN PARTITIONED --------------")
logging.info(partitioned_model["mod"])
logging.info("-------------- END PARTITIONED ----------------")
targets = []
targets.append(CUDA)
targets.append(tvm.target.Target(f"cutlass -tmp_dir={tmp_dir}", HOST))
dev = tvm.device(CUDA.get_target_device_type())
compile_and_benchmark("just_cutlass", partitioned_model, targets, dev, tmp_dir)
def just_tvm(model):
"""Compile and profile using vanilla TVM."""
logging.info(f"just_tvm | {model['name']}")
logging.info("-------------- BEGIN ORIGINAL --------------")
logging.info(model["mod"])
logging.info("-------------- END ORIGINAL ----------------")
tmp_dir = tempfile.mkdtemp()
autotvm_tune_module(model["mod"], CUDA, TUNING_LOG)
with optional_tuning_records(TUNING_LOG):
dev = tvm.device(CUDA.get_target_device_type())
compile_and_benchmark("just_tvm", model, CUDA, dev, tmp_dir)
def tvm_with_libs(model):
"""As for just_tvm, but use the existing -libs mechanism to enable standard CUDA libs."""
logging.info(f"tvm_with_libs | {model['name']}")
logging.info("-------------- BEGIN ORIGINAL --------------")
logging.info(model["mod"])
logging.info("-------------- END ORIGINAL ----------------")
tmp_dir = tempfile.mkdtemp()
cuda_target = tvm.target.Target("cuda -libs=cudnn,cublas", HOST)
autotvm_tune_module(model["mod"], cuda_target, TUNING_LOG)
with optional_tuning_records(TUNING_LOG):
dev = tvm.device(cuda_target.get_target_device_type())
compile_and_benchmark("tvm_with_libs", model, cuda_target, dev, tmp_dir)
########### Runners ###########
def run_all():
"""Run the whole test suite."""
make_models = []
make_models.append(menangerie.resnext50_32x4d)
if ALL_MODELS:
make_models.append(menangerie.resnext50_32x4d_16)
make_models.append(menangerie.gpt2_16)
make_models.append(menangerie.gpt2)
make_models.append(menangerie.mobilenet_16)
make_models.append(menangerie.mobilenet)
make_models.append(menangerie.resnet50_16)
make_models.append(menangerie.resnet50)
run_models = []
if ALL_CONFIGS:
run_models.append(just_tensorrt)
run_models.append(just_tvm)
run_models.append(tvm_with_libs)
run_models.append(collage)
for make_model in make_models:
model = make_model()
for run_model in run_models:
run_model(model)
def run_mini():
"""Run Collage on a tiny GPT2 extract."""
collage(menangerie.gpt2_16_for_cutlass_extract())
if __name__ == "__main__":
# run_all()
run_mini()
| 15,885 | 37.936275 | 182 | py |
tvm | tvm-main/tests/python/relay/strategy/test_select_implementation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests strategy selection for Relay ops """
import pytest
import tvm
from tvm import relay
from tvm import te
from tvm.relay.testing import run_infer_type
import tvm.testing
@pytest.mark.parametrize(
"target, expected_implementation",
[("llvm", "concatenate.cpu"), ("llvm -device=arm_cpu", "concatenate.arm_cpu")],
)
def test_concatenate(target, expected_implementation):
target = tvm.target.Target(target)
shape = (1, 1, 1, 3)
dtype = "float32"
axis = 1
inputs = []
inputs.append(relay.var("var0", shape=shape, dtype=dtype))
inputs.append(relay.var("var1", shape=shape, dtype=dtype))
input_tuple = relay.Tuple(inputs)
out = relay.op.concatenate(input_tuple, axis)
out = run_infer_type(out)
impl, xx = relay.backend.te_compiler.select_implementation(
relay.op.get("concatenate"),
out.attrs,
[te.placeholder(shape)],
out.checked_type,
target,
use_autotvm=False,
)
assert impl.name == expected_implementation
if __name__ == "__main__":
tvm.testing.main()
| 1,860 | 31.649123 | 83 | py |
tvm | tvm-main/tests/python/relay/strategy/arm_cpu/test_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for arm_cpu schedules for regular conv2d."""
from test_generalized_conv2d import GeneralizedConv2dTests
from tvm.testing import fixture, main, parameter, parameters
class Conv2dTests(GeneralizedConv2dTests):
"""Helper for constructing regular Conv2ds. Always sets groups to 1. We set the reference
kernel layout here as we must pick something, but the x86 implementation supports several."""
@fixture
def groups(self):
"""Using a fixture instead of a parameter stops Pytest from adding the (redundant) number of
groups to the name of each test."""
return 1
def setup_method(self):
self.ref_kernel_layout = "HWIO"
class TestConv2d_NHWC_DSP(Conv2dTests):
"""This test is for conv2d_nhwc_dsp.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
# TODO(mehrdadh): Fails due to https://github.com/apache/tvm/issues/11216
# ((1, 32, 32, 1), (3, 3), 12, 1, 0, 1),
# ((1, 32, 10, 3), (3, 3), 16, 1, 0, 1),
# ((1, 49, 10, 1), (10, 4), 64, (2, 1), (4, 1, 5, 1), 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
# from Keyword Spotting model from MLPerfTiny models
# TODO(mehrdad): Fails due to https://github.com/apache/tvm/issues/11216
# ((1, 49, 10, 1), (10, 4), 64, (2, 2), (4, 1, 5, 1), 1),
# from Visual Wake Word model from MLPerfTiny models
# TODO(mehrdadh): fails due to https://github.com/apache/tvm/issues/11216
# ((1, 96, 96, 3), (3, 3), 8, (2, 2), (0, 0, 1, 1), 1),
# from Image Classification model from MLPerfTiny models
((1, 16, 16, 32), (1, 1), 64, (2, 2), 0, 1),
((4, 16, 16, 8), (5, 5), 8, 2, (0, 4, 4, 0), 1),
((4, 16, 16, 8), (5, 5), 16, 2, (0, 4, 4, 0), 1),
((4, 16, 16, 8), (5, 5), 8, 2, 0, 1),
((4, 16, 16, 8), (5, 5), 16, 2, 0, 1),
((1, 16, 16, 8), (3, 3), 16, 2, (0, 0, 1, 1), 1),
((1, 16, 16, 8), (3, 3), 16, 2, (1, 1, 2, 2), 1),
((1, 16, 16, 8), (5, 5), 16, 2, (3, 3, 2, 2), 1),
((1, 16, 16, 8), (3, 3), 16, 2, (0, 1, 2, 3), 1),
)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NHWC")
kernel_layout = parameter("HWOI")
out_layout = parameter("NHWC")
schedule_name = parameter("conv2d_nhwc_dsp.arm_cpu")
class TestConv2d_NHWC_Spatial_Pack(Conv2dTests):
"""This test is for conv2d_nhwc_spatial_pack.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 1), (3, 3), 12, 1, 0, 1),
((1, 32, 10, 3), (3, 3), 16, 1, 0, 1),
((1, 49, 10, 1), (10, 4), 64, (2, 1), (4, 1, 5, 1), 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NHWC")
kernel_layout = parameter("HWIO")
out_layout = parameter("NHWC")
schedule_name = parameter("conv2d_nhwc_spatial_pack.arm_cpu")
class TestConv2d_NCHW_Spatial_Pack(Conv2dTests):
"""This test is for conv2d_nchw_spatial_pack.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation, in_dtype = parameters(
((1, 32, 32, 16), (3, 3), 12, 1, 0, 1, "int8"),
((1, 32, 32, 16), (3, 3), 12, 1, 0, 1, "int16"),
((1, 16, 16, 32), (3, 3), 12, 1, 0, 1, "int16"),
)
data_layout = parameter("NCHW")
kernel_layout = parameter("OIHW")
out_layout = parameter("NCHW")
schedule_name = parameter("conv2d_nchw_spatial_pack.arm_cpu")
if __name__ == "__main__":
main()
| 4,821 | 42.053571 | 100 | py |
tvm | tvm-main/tests/python/relay/strategy/arm_cpu/test_avg_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicPoolTests:
@tvm.testing.requires_corstone300
def test_pool(
self,
pool_type,
shape,
dtype,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
count_include_pad,
schedule_name,
):
"""Test a subgraph with a single pool operator."""
ishape = shape
input0 = relay.var("input", relay.TensorType(ishape, dtype))
out0 = getattr(relay.op.nn, pool_type)(
input0,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout=layout,
out_layout="",
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
out1 = getattr(relay.op.nn, pool_type)(
input1,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout=layout,
out_layout="",
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestAvgPool1d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
(
shape,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
count_include_pad,
) = tvm.testing.parameters(
((3, 32, 27), (3,), (2,), 0, 1, "NCW", False, False),
((3, 32, 27), (3,), (2,), 0, 1, "NWC", False, False),
((3, 32, 27), (3,), (2,), 0, 1, "NCW", True, False),
((3, 32, 27), (3,), (2,), 1, 1, "NCW", False, True),
((1, 1, 32), 3, 1, 0, 1, "NCW", False, False),
((1, 4, 20), 3, 2, 2, 1, "NCW", False, False),
)
pool_type = tvm.testing.parameter("avg_pool1d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
class TestAvgPool2d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
(
shape,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
count_include_pad,
) = tvm.testing.parameters(
((3, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", False, False),
((3, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NHWC", False, False),
((2, 16, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", True, False),
((2, 27, 27, 16), (3, 3), (2, 2), 0, 1, "NHWC", True, False),
((2, 16, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", True, True),
((1, 25, 5, 64), (25, 5), (25, 5), 0, 1, "NHWC", False, False),
((1, 3, 3, 256), (3, 3), (3, 3), 0, 1, "NHWC", False, False),
((1, 8, 8, 64), (8, 8), (8, 8), 0, 1, "NHWC", False, False),
((1, 1, 32, 32), (3, 3), 1, 0, 1, "NCHW", False, False),
((1, 4, 32, 20), (3, 3), (2, 2), 0, 1, "NCHW", False, False),
)
pool_type = tvm.testing.parameter("avg_pool2d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
class TestAvgPool3d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
(
shape,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
count_include_pad,
) = tvm.testing.parameters(
((3, 4, 8, 27, 27), (3, 3, 3), 2, 0, 1, "NCDHW", False, False),
)
pool_type = tvm.testing.parameter("avg_pool3d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
if __name__ == "__main__":
tvm.testing.main()
| 5,361 | 31.107784 | 91 | py |
tvm | tvm-main/tests/python/relay/strategy/arm_cpu/test_generalized_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Helper class for testing variations of 2D convolution. Should be used by subclassing
`GeneralizedConv2dTests`, and then setting the arguments using tvm.testing.parameter(s)."""
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import AOT_CORSTONE300_RUNNER
def change_ndarray_layout(arr, src_layout, dst_layout):
"""Makes a copy of an ndarray, reshaping it to a new data layout.
Parameter
---------
arr : numpy.ndarray
The ndarray to be reformatted.
src_layout : str
The current layout of the Relay constant. Must be alphabetic (e.g. NHWC
or OIHW, but not NCHW2c).
dst_layout : str
The desired layout of new the Relay constant. Must be alphabetic (e.g. NHWC
or OIHW, but not NCHW2c).
Returns
-------
dst_shape : numpy.ndarray
A copy of the ndarray with the new layout.
"""
assert src_layout.isalpha() and dst_layout.isalpha()
axis_order = [src_layout.index(c) for c in dst_layout]
return np.transpose(arr, axis_order)
class GeneralizedConv2dTests:
"""Superclass which can be used to test regular, depthwise, or grouped conv2D. Cannot be used
for 5D data formats (NCHWc and such) as written, but could be extended. Might also be worth
abstracting some of this logic into an even more general class that could be used for other
operators.
Note that data_shape should always be a tuple of length four indicating the data shape in NHWC
format (it will later be reshaped according to the given data_layout), and kernel_size should be
a length two tuple giving the height and width of the kernel.
This test (and other base Conv2dTests classes) are not run by Pytest, as their names do not
start with `Test`."""
@tvm.testing.requires_corstone300
def test_conv2d(
self,
data_shape,
kernel_size,
num_filter,
in_dtype,
strides,
padding,
groups,
dilation,
data_layout,
kernel_layout,
out_layout,
schedule_name,
):
"""Test a subgraph with a single conv2d operator."""
ref_input_data = np.random.randint(low=-128, high=127, size=data_shape, dtype=in_dtype)
ref_input_var = relay.var("input", relay.TensorType(data_shape, in_dtype)) # NHWC layout
kernel_shape = (*kernel_size, data_shape[-1] // groups, num_filter) # HWIO layout
ref_kernel_data = np.random.randint(low=-10, high=10, size=kernel_shape, dtype=in_dtype)
"""Our x86 depthwise implementation only supports HWOI with NHWC, so we need to change our
kernel layout to work around this. We can't just change the whole thing to HWIO or
something else, as then group conv2d would not work. Eventually, we should switch to using
TensorFlow to create the reference output so we can ensure our implementation is right.
See https://github.com/apache/tvm/issues/13137 for details."""
ref_relay_op = relay.op.nn.conv2d(
ref_input_var,
relay.const(change_ndarray_layout(ref_kernel_data, "HWIO", self.ref_kernel_layout)),
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
dilation=(dilation, dilation),
data_layout="NHWC",
kernel_layout=self.ref_kernel_layout,
out_dtype="int32",
out_layout="NHWC",
)
ref_module = tvm.IRModule.from_expr(relay.Function([ref_input_var], ref_relay_op))
ref_outputs = generate_ref_data(ref_module, {"input": ref_input_data})
# Reshape output dictionary to match out_layout
assert len(ref_outputs) == 1
output_tensor_name, output_tensor = next(iter(ref_outputs.items()))
ref_outputs[output_tensor_name] = change_ndarray_layout(output_tensor, "NHWC", out_layout)
test_input_data = change_ndarray_layout(ref_input_data, "NHWC", data_layout)
test_input_var = relay.var("input", relay.TensorType(test_input_data.shape, in_dtype))
test_kernel_data = change_ndarray_layout(ref_kernel_data, "HWIO", kernel_layout)
test_relay_op = relay.op.nn.conv2d(
test_input_var,
relay.const(test_kernel_data),
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
dilation=(dilation, dilation),
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype="int32",
out_layout=out_layout,
)
test_function = relay.Function([test_input_var], test_relay_op)
test_model = AOTTestModel(
module=tvm.IRModule.from_expr(test_function),
inputs={"input": test_input_data},
outputs=ref_outputs,
)
compile_and_run(
test_model,
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
| 6,104 | 38.901961 | 100 | py |
tvm | tvm-main/tests/python/relay/strategy/arm_cpu/test_conv1d_nwc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicConv1dTests:
@tvm.testing.requires_corstone300
def test_conv1d(
self,
data_shape,
kernel_size,
kernel_layout,
num_filter,
strides,
padding,
dilation,
dtype,
schedule_name,
):
"""Test a subgraph with a single conv1d_nwc operator."""
ishape = data_shape
wshape = (kernel_size, data_shape[-1], num_filter)
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
input0 = relay.var("input", relay.TensorType(ishape, dtype))
weight0 = relay.const(weight_data)
out0 = relay.op.nn.conv1d(
input0,
weight0,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout="NWC",
kernel_layout="WIO",
out_dtype="int32",
out_layout="NWC",
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
if kernel_layout == "WOI":
weight1 = relay.const(np.moveaxis(weight_data, 1, -1))
else:
weight1 = relay.const(weight_data)
out1 = relay.op.nn.conv1d(
input1,
weight1,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout="NWC",
kernel_layout=kernel_layout,
out_dtype="int32",
out_layout="NWC",
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestConv1d_dsp(BasicConv1dTests):
"""This test is for conv1d_dsp schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = tvm.testing.parameters(
((4, 32, 16), 3, 12, 1, 0, 1),
((4, 16, 32), 3, 12, 1, 0, 1),
((4, 32, 16), 3, 12, 1, 0, 1),
((1, 32, 12), 3, 16, 1, 0, 1),
# TODO: The following 4 tests fail due to https://github.com/apache/tvm/issues/11466
# ((3, 12, 10), 4, 24, 1, 0, 1),
# ((1, 7, 7), 3, 5, 1, 0, 1),
# ((1, 10, 2), 4, 4, 2, (1, 1), 1),
# ((1, 20, 2), 4, 4, 2, (0, 1), 1),
((1, 16, 4), 1, 12, 1, (1, 0), 1),
((1, 24, 16), 1, 32, 3, (2, 2), 1),
)
dtype = tvm.testing.parameter("int8", "int16")
data_layout = tvm.testing.parameter("NWC")
kernel_layout = tvm.testing.parameter("WOI")
schedule_name = tvm.testing.parameter("conv1d_dsp")
class TestConv1d_nwc(BasicConv1dTests):
"""This test is for conv1d_nwc.generic schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = tvm.testing.parameters(
((4, 32, 16), 3, 12, 1, 0, 1),
((4, 16, 32), 3, 12, 1, 0, 1),
((4, 32, 16), 3, 12, 1, 0, 1),
((1, 32, 12), 3, 16, 1, 0, 1),
((3, 12, 10), 4, 24, 1, 0, 1),
((1, 7, 7), 3, 5, 1, 0, 1),
((1, 10, 2), 4, 4, 2, (1, 1), 1),
((1, 20, 2), 4, 4, 2, (0, 1), 1),
((1, 16, 4), 1, 12, 1, (1, 0), 1),
((1, 24, 16), 1, 32, 3, (2, 2), 1),
)
dtype = tvm.testing.parameter("int8", "int16")
data_layout = tvm.testing.parameter("NWC")
kernel_layout = tvm.testing.parameter("WIO")
schedule_name = tvm.testing.parameter("conv1d_nwc.generic")
if __name__ == "__main__":
tvm.testing.main()
| 4,984 | 33.618056 | 93 | py |
tvm | tvm-main/tests/python/relay/strategy/arm_cpu/test_max_pool.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicPoolTests:
@tvm.testing.requires_corstone300
def test_pool(
self,
pool_type,
shape,
dtype,
pool_size,
strides,
padding,
dilation,
layout,
ceil_mode,
schedule_name,
):
"""Test a subgraph with a single max_pool operator."""
ishape = shape
input0 = relay.var("input", relay.TensorType(ishape, dtype))
out0 = getattr(relay.op.nn, pool_type)(
input0,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout=layout,
out_layout="",
ceil_mode=ceil_mode,
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
out1 = getattr(relay.op.nn, pool_type)(
input1,
pool_size=pool_size,
strides=strides,
dilation=dilation,
padding=padding,
layout=layout,
out_layout="",
ceil_mode=ceil_mode,
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestMaxPool1d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
shape, pool_size, strides, padding, dilation, layout, ceil_mode = tvm.testing.parameters(
((3, 32, 27), (3,), (2,), 0, 1, "NCW", True),
((1, 32, 1), 3, 1, 0, 1, "NWC", False),
((1, 20, 4), 3, 2, 0, 1, "NWC", False),
)
pool_type = tvm.testing.parameter("max_pool1d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
class TestMaxPool2d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
shape, pool_size, strides, padding, dilation, layout, ceil_mode = tvm.testing.parameters(
((2, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", False),
((2, 32, 27, 27), (3, 3), (2, 2), 0, 1, "NCHW", True),
((1, 26, 26, 12), (2, 2), (2, 2), 0, 1, "NHWC", False),
((1, 11, 11, 32), (2, 2), (2, 2), 0, 1, "NHWC", False),
((1, 3, 3, 64), (2, 2), (2, 2), 0, 1, "NHWC", False),
((1, 32, 32, 1), (3, 3), 1, 0, 1, "NHWC", False),
((1, 32, 20, 4), (3, 3), (2, 2), 0, 1, "NHWC", False),
((1, 32, 32, 1), (3, 3), 1, 0, 1, "NHWC", True),
((1, 32, 20, 4), (3, 3), (2, 2), 0, 1, "NHWC", True),
)
pool_type = tvm.testing.parameter("max_pool2d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
class TestMaxPool3d(BasicPoolTests):
"""This test is for pool.arm_cpu schedule."""
shape, pool_size, strides, padding, dilation, layout, ceil_mode = tvm.testing.parameters(
((3, 4, 8, 27, 27), (3, 3, 3), 2, 0, 1, "NCDHW", False),
)
pool_type = tvm.testing.parameter("max_pool3d")
dtype = tvm.testing.parameter("int32")
schedule_name = tvm.testing.parameter("pool.arm_cpu")
if __name__ == "__main__":
tvm.testing.main()
| 4,611 | 33.676692 | 93 | py |
tvm | tvm-main/tests/python/relay/strategy/arm_cpu/test_depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for arm_cpu schedules for depthwise_conv2d."""
from test_generalized_conv2d import GeneralizedConv2dTests
from tvm.testing import fixture, main, parameter, parameters
class DepthwiseConv2dTests(GeneralizedConv2dTests):
"""Helper for constructing depthwise Conv2ds. Sets the reference kernel layout to what x86 code
supports."""
@fixture
def groups(self, data_shape):
"""By definition, a depthwise_conv2d has a number of groups equal to the number of input
channels, so we don't need to specify the number of groups each time."""
return data_shape[3]
def setup_method(self):
self.ref_kernel_layout = "HWOI"
class TestDepthwiseConv2d_NCHW_OIHW(DepthwiseConv2dTests):
"""This test is for depthwise_conv2d_nchw.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 10, 3, 32), (3, 3), 32, 1, 0, 1),
((1, 32, 16, 32), (3, 3), 32, 1, (0, 2, 2, 0), 1),
((1, 32, 16, 32), (3, 3), 32, 1, 0, 1),
((1, 32, 16, 32), (3, 3), 32, 1, 0, 1),
((1, 32, 16, 32), (3, 3), 32, 1, (0, 2, 2, 0), 2),
((1, 32, 16, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NCHW")
kernel_layout = parameter("OIHW")
out_layout = parameter("NCHW")
schedule_name = parameter("depthwise_conv2d_nchw.arm_cpu")
class TestDepthwiseConv2d_NHWC_HWOI(DepthwiseConv2dTests):
"""This test is for depthwise_conv2d_nhwc.generic schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 10, 16), (3, 3), 16, 1, 0, 1),
((1, 49, 10, 64), (10, 4), 64, (2, 1), (4, 1, 5, 1), 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NHWC")
kernel_layout = parameter("HWOI")
out_layout = parameter("NHWC")
schedule_name = parameter("depthwise_conv2d_nhwc.generic")
class TestDepthwiseConv2d_NHWC_HWOI_DSP(DepthwiseConv2dTests):
"""This test is for depthwise_conv2d_nhwc_dsp.arm_cpu schedule. The tests that are parameterized
by dtype work for both int8 and int16, while the others only work on the specified dtype."""
in_dtype_parameterized_tests = [
# Depthwise_conv2d parameters from MobileNetV1 0.25x
((1, 48, 48, 8), (3, 3), 8, (1, 1), 1),
((1, 48, 48, 16), (3, 3), 16, (2, 2), (1, 1, 0, 0)),
((1, 24, 24, 32), (3, 3), 32, (1, 1), 1),
((1, 24, 24, 32), (3, 3), 32, (2, 2), (1, 1, 0, 0)),
((1, 12, 12, 64), (3, 3), 64, (1, 1), 1),
((1, 12, 12, 64), (3, 3), 64, (2, 2), (1, 1, 0, 0)),
((1, 6, 6, 128), (3, 3), 128, (1, 1), 1),
((1, 6, 6, 128), (3, 3), 128, (2, 2), (1, 1, 0, 0)),
((1, 3, 3, 256), (3, 3), 256, (1, 1), 1),
# Asymmetric and larger kernels
((1, 25, 5, 64), (3, 3), 64, (1, 1), 1),
((1, 24, 24, 8), (5, 5), 8, (1, 1), 1),
((1, 24, 24, 8), (3, 5), 8, (1, 1), 1),
]
data_shape, kernel_size, num_filter, strides, padding, in_dtype = parameters(
# Make a copy of each parameterized test for int8 and one for int16
*map(lambda t: t + ("int8",), in_dtype_parameterized_tests),
*map(lambda t: t + ("int16",), in_dtype_parameterized_tests),
# Test the int16 implementation with channel numbers not divisible by four
((1, 48, 48, 6), (3, 3), 6, (1, 1), 1, "int16"),
)
dilation = parameter(1)
data_layout = parameter("NHWC")
kernel_layout = parameter("HWOI")
out_layout = parameter("NHWC")
schedule_name = parameter("depthwise_conv2d_nhwc_dsp.arm_cpu")
if __name__ == "__main__":
main()
| 4,856 | 41.234783 | 100 | py |
tvm | tvm-main/tests/python/relay/strategy/arm_cpu/test_quantized_convolution.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""microTVM cares a lot about the convolution + bias + requantize + fused ReLU use case. There have
been some accuracy issues in the past, so this test steps through a model (MobileNetV1) layer by
layer and ensures there is 1-1 correspondance at each step. This test would run way faster if we ran
the model all at once, but then we wouldn't know which layers had issues.
Furthermore, this test uses some in-development optimizations for microTVM that aren't part of the
main pipeline.
"""
import numpy as np
from PIL import Image
import pytest
import tvm
import tvm.testing
from tvm import meta_schedule, relay
from tvm.testing.aot import AOTTestModel, run_and_check, AOTCompiledTestModel
from tvm.relay.backend import Executor, Runtime
from tvm.micro.testing.aot_test_utils import AOT_CORSTONE300_RUNNER
from tvm.contrib.download import download_testdata
from test_generalized_conv2d import change_ndarray_layout
# The model is the v0.7 version of the TinyML person detection (aka visual wake words) model. This
# is an RGB 96x96 MobileNet V1 model.
MODEL_URL = "https://github.com/mlcommons/tiny/raw/v0.7/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite"
SAMPLE_URL = (
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/elephant-299.jpg"
)
MODEL_NUM_CONVS = 27
@pytest.fixture(scope="module")
def interpreter():
"""Returns a TFLite interpreter with the MLPerf Tiny visual wakewords model loaded, with an
elephant image run through it, and with all intermediate layer outputs saved."""
# Make sure the Tensorflow import is skipped if the test is being skipped. This is needed to
# prevent the "python: i386" tests from failing, as they don't have Tensorflow installed.
import tensorflow as tf # pylint: disable=import-outside-toplevel
# Download the reference model
rel_model_path = "model_microtvm_mobilenetv1.tflite"
file = download_testdata(MODEL_URL, rel_model_path, overwrite=False)
# Load it into TensorFlow and allocate memory
interpreter = tf.lite.Interpreter(file, experimental_preserve_all_tensors=True)
interpreter.allocate_tensors()
# Download an image. The neuron activations are strange if we use random data or ones,
# so downloading an image is useful.
rel_image_path = "image_microtvm_mobilenetv1.jpg"
img_path = download_testdata(SAMPLE_URL, rel_image_path, overwrite=False)
image = Image.open(img_path).resize((96, 96))
image_data_hwc_uint8 = np.asarray(image)
assert image_data_hwc_uint8.shape == (96, 96, 3)
assert image_data_hwc_uint8.dtype == "uint8"
image_data_nhwc_int8 = (image_data_hwc_uint8 + 128).view("int8").reshape((1, 96, 96, 3))
# Load the image into the TFLite interpreter and compute all intermediate tensor values
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]["index"], image_data_nhwc_int8)
interpreter.invoke()
return interpreter
def _get_mobilenet_v1_layer_attributes(layer_num):
"""Returns the relevant padding and stride for a given layer in a MobileNetV1 model. It's a huge
headache to read this data from TensorFlow, as it is not user accessible via the interpreter. If
we really wanted to, we would have to parse the .tflite file ourselves. This function is a bit
of a hack, but lets us skip that."""
if layer_num == 0: # Regular conv2d
return ((0, 0, 1, 1), (2, 2), False)
if layer_num % 2 == 0: # 1x1 conv2d
return ((0, 0, 0, 0), (1, 1), False)
if layer_num in [3, 7, 11, 23]: # Downsizing depthwise_conv2d layers
return ((0, 0, 1, 1), (2, 2), True)
# Depthwise conv2d
return ((1, 1, 1, 1), (1, 1), True)
@pytest.mark.parametrize("layer", range(2, 27, 2))
@tvm.testing.requires_package("tensorflow")
def test_empty_channel_detection(interpreter, layer):
"""Some models (mainly MobileNetV1) have kernels with many output channels full entirely of
zeroes. The VWW model is one of these. This test confirms that the outputs of these channels,
as computed by TensorFlow, are indeed not dependent upon the input values.
"""
_, kernel, bias, output = _load_tflite_layer(interpreter, layer)
kernel_data, _ = kernel
bias_data, bias_quant = bias
output_data, output_quant = output
is_depthwise = _get_mobilenet_v1_layer_attributes(layer)[2]
assert not is_depthwise
assert kernel_data.shape[1] == kernel_data.shape[2] == 1
out_channels = kernel_data.shape[3]
fixed_channels = {}
out_zero_point = output_quant["zero_points"][0]
assert out_zero_point == -128
for i in range(out_channels):
# Skip over output channels with data
if np.any(kernel_data[i, 0, 0, :]):
continue
scale = bias_quant["scales"][i] / output_quant["scales"][0]
channel_constant = round(bias_data[i] * scale + out_zero_point)
clipped = min(127, max(-128, channel_constant))
out_channel_values = output_data[0, :, :, i].flatten()
assert all(x == clipped for x in out_channel_values)
fixed_channels[i] = clipped
# Check if we are on the final convolution and skip the next test if so
if layer + 1 >= MODEL_NUM_CONVS:
return
# We now need to compute values for the following depthwise layer
depthwise_output = _load_tflite_layer(interpreter, layer + 1)[3][0]
is_depthwise = _get_mobilenet_v1_layer_attributes(layer + 1)[2]
assert is_depthwise
for i in fixed_channels:
assert np.all(depthwise_output[:, :, :, i] == depthwise_output[0, 0, 0, i])
def _get_relu_activation_prefix(layer_num):
if layer_num == 0:
return "model/activation/Relu;"
return f"model/activation_{layer_num}/Relu;"
def _get_main_path_tensor_details(details, tensor_num):
"""A "main path" tensor is a fused layer input/output. Gets the tensor details from the tensor
index, where 0 gives the original input tensor, 1 gives the output of the first fused
convolution layer, and so on. TFLite names are a little wack, so we get this information by
finding the SECOND tensor (which has the suffix "1") for each ReLU activation (the first tensor
is the bias)."""
if tensor_num == 0:
return details[0]
prefix = _get_relu_activation_prefix(tensor_num - 1)
detail = next(d for d in details if d["name"].startswith(prefix) and d["name"].endswith("1"))
assert len(detail["shape"]) == 4
assert detail["dtype"] == np.int8
return detail
def _get_bias_details(details, layer_num):
"""Gets the tensor details for the bias tensor for the corresponding convolution layer. The
bias tensors always appear before the main path tensors, so we don't have to check the ending to
make sure we have the right one."""
prefix = _get_relu_activation_prefix(layer_num)
detail = next(d for d in details if d["name"].startswith(prefix))
assert len(detail["shape"]) == 1
assert detail["dtype"] == np.int32
return detail
def _get_kernel_details(details, layer_num):
"""Gets the tensor details for the kernel tensor for the corresponding convolution layer. These
have a different naming scheme from the main path and bias tensors, as they are converted before
activation function fusion. Note that regular vs depthwise conv2ds have different prefixes."""
if layer_num == 0:
prefix = "model/conv2d/Conv2D"
elif layer_num % 2 == 0:
prefix = f"model/conv2d_{layer_num // 2}/"
else:
prefix = f"model/batch_normalization_{layer_num}/"
detail = next(d for d in details if d["name"].startswith(prefix))
assert len(detail["shape"]) == 4
assert detail["dtype"] == np.int8
return detail
def _get_quant_scale_const(quantization_dict, as_scalar=False):
scales = quantization_dict["scales"]
if as_scalar:
assert len(scales) == 1
scales = scales[0]
return relay.const(scales, "float32")
def _get_quant_zp_const(quantization_dict, as_scalar=False):
zero_points = quantization_dict["zero_points"]
if as_scalar:
assert len(zero_points) == 1
zero_points = zero_points[0]
return relay.const(zero_points, "int32")
def _change_layout(data, old_layout, new_layout, dtype):
return change_ndarray_layout(data, old_layout, new_layout).astype(dtype)
def _load_tflite_layer(interpreter, layer):
tensor_details = interpreter.get_tensor_details()
def lookup(detail):
return interpreter.get_tensor(detail["index"]), detail["quantization_parameters"]
input_data = lookup(_get_main_path_tensor_details(tensor_details, layer))
kernel_data = lookup(_get_kernel_details(tensor_details, layer))
bias_data = lookup(_get_bias_details(tensor_details, layer))
output_data = lookup(_get_main_path_tensor_details(tensor_details, layer + 1))
return input_data, kernel_data, bias_data, output_data
def _make_relay_partial_func(relay_op, *args, **kwargs):
return lambda op: relay_op(op, *args, **kwargs)
def _make_conv2d_op(kernel, data_quant, kernel_quant, hyperparams, is_depthwise=False):
dtype, padding, strides, data_layout, kernel_layout, output_layout = hyperparams
kernel_size = kernel.shape[1:3]
if is_depthwise:
channels = groups = kernel.shape[3]
else:
channels = kernel.shape[0]
groups = 1
kernel_ndarr = _change_layout(kernel, "OHWI", kernel_layout, dtype)
return _make_relay_partial_func(
relay.qnn.op.conv2d,
relay.const(kernel_ndarr, dtype),
input_zero_point=_get_quant_zp_const(data_quant, as_scalar=True),
kernel_zero_point=_get_quant_zp_const(kernel_quant),
input_scale=_get_quant_scale_const(data_quant, as_scalar=True),
kernel_scale=_get_quant_scale_const(kernel_quant),
kernel_size=kernel_size,
data_layout=data_layout,
kernel_layout="IOHW" if is_depthwise else kernel_layout,
dilation=(1, 1),
strides=strides,
padding=padding,
groups=groups,
channels=channels,
out_dtype="int32",
out_layout=output_layout,
)
def _make_bias_op(bias, output_layout):
requantize_axis = output_layout.index("C")
return _make_relay_partial_func(
relay.op.nn.bias_add,
relay.const(bias, "int32"),
axis=requantize_axis,
)
def _make_requantize_op(bias_quant, output_quant, output_dtype, output_layout):
requantize_axis = output_layout.index("C")
return _make_relay_partial_func(
relay.qnn.op.requantize,
_get_quant_scale_const(bias_quant),
_get_quant_zp_const(bias_quant),
_get_quant_scale_const(output_quant, as_scalar=True),
_get_quant_zp_const(output_quant, as_scalar=True),
axis=requantize_axis,
compute_dtype="int64",
out_dtype=output_dtype,
)
def _make_aot_model(params, hyperparams, layouts, is_depthwise=False):
tensors, quantizations = zip(*params)
data, kernel, bias, output = tensors
data_quant, kernel_quant, bias_quant, output_quant = quantizations
dtype, _padding, _strides = hyperparams
data_layout, _, output_layout = layouts
data_ndarr = _change_layout(data, "NHWC", data_layout, dtype)
output_ndarr = _change_layout(output, "NHWC", output_layout, dtype)
input_var = relay.var("input", relay.TensorType(data_ndarr.shape, dtype))
conv2d = _make_conv2d_op(kernel, data_quant, kernel_quant, hyperparams + layouts, is_depthwise)
bias = _make_bias_op(bias, output_layout)
requantize = _make_requantize_op(bias_quant, output_quant, dtype, output_layout)
relay_mod = requantize(bias(conv2d(input_var)))
relay_func = relay.Function([input_var], relay_mod)
return AOTTestModel(
module=tvm.IRModule.from_expr(relay_func),
inputs={"input": data_ndarr},
outputs={"output": output_ndarr},
output_tolerance=1,
)
def _make_target():
return tvm.target.Target("c -keys=arm_cpu -mcpu=cortex-m7")
def _make_executor():
return Executor(
"aot",
{
"workspace-byte-alignment": 8,
"constant-byte-alignment": 8,
"interface-api": "c",
"unpacked-api": True,
},
)
@pytest.mark.parametrize("output_layout", ["NHWC", "NCHW"])
@pytest.mark.parametrize("layer", range(27))
@tvm.testing.requires_corstone300
def test_qnn_conv2d_mobilenetv1_layer(interpreter, layer, output_layout):
"""Checks microTVM output against TFLite for one MobileNetV1 layer.
Loads the input, kernel, bias, expected output, and quantization parameters from the specified
layer in a TFLite Interpreter. That information is used to construct a Relay Function with the
same structure. The Function is run using microTVM and AOTTestModel, and we verify microTVM's
output is the same as the TFLite ground truth.
This function only cross-checks the first 27 layers in MobileNetV1, which are regular and
depthwise 2D convolutions (this function only works for 2D convolutions). We do not test the
average pool, dense, or softmax layers at the end of the model.
Note that we disable the QNN Legalization pass. This allows TVM to use its QNN compute
definitions, fuse the three operations together, and perform other optimizations.
Parameters
----------
interpreter: tensorflow.lite.python.interpreter.Interpreter
A TensorFlow Lite interpreter for a MobileNetV1 model, where invoke() has already been
called and experimental_preserve_all_tensors=True. Should be passed as a Pytest fixture.
layer: int
The index of the layer to check against TensorFlow's ground truth values.
output_layout: str
The output_layout for microTVM to use. Does not have to match the TensorFlow layout.
"""
dtype = "int16"
tensor, kernel, bias, output = _load_tflite_layer(interpreter, layer)
padding, strides, is_depthwise = _get_mobilenet_v1_layer_attributes(layer)
if is_depthwise:
data_layout, kernel_layout = "NCHW", "OIHW"
else:
data_layout, kernel_layout = "NHWC", "OHWI"
test_model = _make_aot_model(
(tensor, kernel, bias, output),
(dtype, padding, strides),
(data_layout, kernel_layout, output_layout),
is_depthwise=is_depthwise,
)
def schedule_fn(_sch):
return True
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.disable_vectorize": True,
"relay.backend.use_meta_schedule": True,
"relay.backend.tir_converter": "allow_extern",
},
disabled_pass=["qnn.Legalize"],
), meta_schedule.database.ScheduleFnDatabase(schedule_fn):
executor_factory = tvm.relay.build(
test_model.module,
_make_target(),
executor=_make_executor(),
runtime=Runtime("crt"),
params=test_model.params,
mod_name=test_model.name,
)
compiled = AOTCompiledTestModel(model=test_model, executor_factory=executor_factory)
run_and_check(
models=[compiled],
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
workspace_byte_alignment=8,
constant_byte_alignment=8,
)
| 16,102 | 38.662562 | 127 | py |
tvm | tvm-main/tests/python/relay/strategy/arm_cpu/test_dense_dsp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicDenseTests:
@tvm.testing.requires_corstone300
def test_dense(self, shape, weight_shape, dtype, schedule_name, enable_bias):
"""Test a subgraph with a single dense operator."""
ishape = shape
wshape = weight_shape
out_dtype = "int32"
units = weight_shape[0]
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
if enable_bias:
bias_data = np.random.randint(low=-10, high=10, size=(wshape[0]), dtype=out_dtype)
input = relay.var("input", relay.TensorType(ishape, dtype))
weight = relay.const(weight_data)
dense = relay.op.nn.dense(
input,
weight,
units=units,
out_dtype=out_dtype,
)
if enable_bias:
bias = relay.const(bias_data)
relay_op = relay.op.nn.bias_add(dense, bias)
else:
relay_op = dense
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
ref_mod = tvm.IRModule.from_expr(relay.Function([input], relay_op))
output_list = generate_ref_data(ref_mod, inputs)
mod = tvm.IRModule.from_expr(relay.Function([input], relay_op))
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestDense(BasicDenseTests):
"""This test is for dense_dsp schedule."""
shape, weight_shape = tvm.testing.parameters(
((8, 128), (32, 128)),
((32, 32), (32, 32)),
((1, 64), (1, 64)),
((11, 2), (2, 2)),
((1, 32), (64, 32)),
((3, 12), (10, 12)),
)
dtype = tvm.testing.parameter("int8", "int16")
schedule_name = tvm.testing.parameter("dense_dsp.arm_cpu")
enable_bias = tvm.testing.parameter(False, True)
if __name__ == "__main__":
tvm.testing.main()
| 3,156 | 34.47191 | 94 | py |
tvm | tvm-main/tests/python/relay/strategy/arm_cpu/test_conv2d_NCHWc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicConv2dTests:
@tvm.testing.requires_corstone300
def test_conv2d_NCHWc(
self,
data_shape,
kernel_size,
data_layout,
kernel_layout,
num_filter,
strides,
padding,
dilation,
dtype,
schedule_name,
):
"""Test a subgraph with a single conv2d_NCHWc operator."""
ishape = data_shape
wshape = (num_filter, data_shape[1], *kernel_size)
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
input0 = relay.var("input", relay.TensorType(ishape, dtype))
weight0 = relay.const(weight_data)
out0 = relay.op.nn.contrib_conv2d_nchwc(
relay.layout_transform(input0, "NCHW", data_layout),
relay.layout_transform(weight0, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
channels=num_filter,
out_dtype="",
out_layout="",
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
weight1 = relay.const(weight_data)
out1 = relay.op.nn.contrib_conv2d_nchwc(
relay.layout_transform(input1, "NCHW", data_layout),
relay.layout_transform(weight1, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
channels=num_filter,
out_dtype="",
out_layout="",
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestConv2d_NCHWc(BasicConv2dTests):
"""This test is for conv2d_NCHWc.x86 schedule."""
(
data_shape,
kernel_size,
num_filter,
strides,
padding,
dilation,
dtype,
kernel_layout,
data_layout,
) = tvm.testing.parameters(
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int16", "OIHW4i4o", "NCHW4c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int32", "OIHW4i4o", "NCHW4c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int8", "OIHW2i8o", "NCHW8c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int16", "OIHW2i8o", "NCHW8c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1), (1, 1), "int32", "OIHW2i8o", "NCHW8c"),
# ResNet18 workloads
# this test does not fit in corstone300 DCTM section.
# ((1, 3, 112, 112), (7, 7), 64, (2, 2), (3, 3), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 64, 28, 28), (3, 3), 64, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 64, 28, 28), (1, 1), 64, (1, 1), (0, 0), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 64, 28, 28), (3, 3), 128, (2, 2), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 64, 28, 28), (1, 1), 128, (2, 2), (0, 0), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 128, 14, 14), (3, 3), 128, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 128, 14, 14), (3, 3), 256, (2, 2), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 128, 14, 14), (1, 1), 256, (2, 2), (0, 0), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 256, 7, 7), (3, 3), 256, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 256, 7, 7), (3, 3), 512, (2, 2), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 256, 7, 7), (1, 1), 512, (2, 2), (0, 0), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
((1, 512, 3, 3), (3, 3), 512, (1, 1), (1, 1), (1, 1), "int8", "OIHW4i4o", "NCHW4c"),
)
schedule_name = tvm.testing.parameter("conv2d_NCHWc.x86")
if __name__ == "__main__":
tvm.testing.main()
| 5,621 | 40.036496 | 95 | py |
tvm | tvm-main/tests/python/relay/strategy/arm_cpu/test_conv1d_ncw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicConv1dTests:
@tvm.testing.requires_corstone300
def test_conv1d(
self,
data_shape,
kernel_size,
num_filter,
strides,
padding,
dilation,
dtype,
schedule_name,
):
"""Test a subgraph with a single conv1d_ncw operator."""
ishape = data_shape
wshape = (num_filter, data_shape[1], kernel_size)
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
input0 = relay.var("input", relay.TensorType(ishape, dtype))
weight0 = relay.const(weight_data)
out0 = relay.op.nn.conv1d(
input0,
weight0,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout="NCW",
kernel_layout="OIW",
out_dtype="int32",
out_layout="NCW",
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
weight1 = relay.const(weight_data)
out1 = relay.op.nn.conv1d(
input1,
weight1,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout="NCW",
kernel_layout="OIW",
out_dtype="int32",
out_layout="NCW",
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestConv1d_ncw(BasicConv1dTests):
"""This test is for conv1d_ncw.generic schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = tvm.testing.parameters(
((4, 32, 16), 3, 12, 1, 0, 1),
((4, 16, 32), 3, 12, 1, 0, 1),
((1, 12, 32), 3, 16, 1, 0, 1),
((3, 10, 12), 4, 24, 1, 0, 1),
((1, 7, 7), 3, 5, 1, 0, 1),
((1, 2, 10), 4, 4, 2, (1, 1), 1),
((1, 2, 20), 4, 4, 2, (0, 1), 1),
((1, 4, 16), 1, 12, 1, (1, 0), 1),
((1, 16, 24), 1, 32, 3, (2, 2), 1),
)
dtype = tvm.testing.parameter("int8", "int16")
data_layout = tvm.testing.parameter("NCW")
schedule_name = tvm.testing.parameter("conv1d_ncw.generic")
if __name__ == "__main__":
tvm.testing.main()
| 3,847 | 32.172414 | 93 | py |
tvm | tvm-main/tests/python/relay/strategy/arm_cpu/test_group_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for arm_cpu schedules for grouped conv2d."""
from test_generalized_conv2d import GeneralizedConv2dTests
from tvm.testing import main, parameter, parameters
class GroupConv2dTests(GeneralizedConv2dTests):
"""Helper for constructing group Conv2ds. Sets the reference kernel layout to what x86 code
supports."""
def setup_method(self):
self.ref_kernel_layout = "HWIO"
class TestGroupConv2d_NCHW_OIHW(GroupConv2dTests):
"""This test is for group_conv2d_nchw.arm_cpu schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 16), (3, 3), 12, 1, 0, 1),
((1, 32, 10, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 32, 1, (1, 1, 2, 2), 2),
)
groups = parameter(2, 4)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NCHW")
kernel_layout = parameter("OIHW")
out_layout = parameter("NCHW")
schedule_name = parameter("group_conv2d_nchw.arm_cpu")
class TestGroupConv2d_NHWC_HWIO(GroupConv2dTests):
"""This test is for group_conv2d_nhwc.generic schedule."""
data_shape, kernel_size, num_filter, strides, padding, dilation = parameters(
((1, 32, 32, 16), (3, 3), 12, 1, 0, 1),
((1, 32, 10, 16), (3, 3), 16, 1, 0, 1),
((1, 49, 10, 16), (10, 4), 64, (2, 1), (4, 1, 5, 1), 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, 0, 1),
((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2),
((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2),
)
groups = parameter(2, 4)
in_dtype = parameter("int8", "int16")
data_layout = parameter("NHWC")
kernel_layout = parameter("HWIO")
out_layout = parameter("NHWC")
schedule_name = parameter("group_conv2d_nhwc.generic")
if __name__ == "__main__":
main()
| 2,916 | 37.381579 | 95 | py |
tvm | tvm-main/tests/python/relay/strategy/arm_cpu/test_depthwise_conv2d_NCHWc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import relay
from tvm.testing.aot import AOTTestModel, compile_and_run, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
)
class BasicConv2dTests:
@tvm.testing.requires_corstone300
def test_depthwise_conv2d_NCHWc(
self,
data_shape,
kernel_size,
data_layout,
kernel_layout,
groups,
strides,
padding,
dilation,
dtype,
schedule_name,
):
"""Test a subgraph with a single depthwise_conv2d_nchwc operator."""
ishape = data_shape
wshape = (data_shape[1], 1, *kernel_size)
weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype)
groups = groups
input0 = relay.var("input", relay.TensorType(ishape, dtype))
weight0 = relay.const(weight_data)
out0 = relay.op.nn.contrib_depthwise_conv2d_nchwc(
relay.layout_transform(input0, "NCHW", data_layout),
relay.layout_transform(weight0, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
groups=groups,
out_dtype="",
out_layout="",
)
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0))
input1 = relay.var("input", relay.TensorType(ishape, dtype))
weight1 = relay.const(weight_data)
out1 = relay.op.nn.contrib_depthwise_conv2d_nchwc(
relay.layout_transform(input1, "NCHW", data_layout),
relay.layout_transform(weight1, "OIHW", kernel_layout),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
data_layout=data_layout,
kernel_layout=kernel_layout,
groups=groups,
out_dtype="",
out_layout="",
)
mod = tvm.IRModule.from_expr(relay.Function([input1], out1))
inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)}
output_list = generate_ref_data(ref_mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
runner=AOT_CORSTONE300_RUNNER,
interface_api="c",
use_unpacked_api=True,
target_opts={
"-keys": "arm_cpu",
"-mcpu": "cortex-m7",
},
schedule_name=schedule_name,
)
class TestDepthWiseConv2d_NCHWc(BasicConv2dTests):
"""This test is for depthwise_conv2d_NCHWc schedule."""
(
data_shape,
kernel_size,
groups,
strides,
padding,
dilation,
kernel_layout,
data_layout,
) = tvm.testing.parameters(
((1, 16, 32, 32), (3, 3), 16, (1, 1), (1, 1, 1, 1), (1, 1), "OIHW1i4o", "NCHW4c"),
((1, 16, 32, 32), (3, 3), 12, (1, 1), (1, 1, 1, 1), (1, 1), "OIHW1i8o", "NCHW8c"),
)
dtype = tvm.testing.parameter("int8", "int16", "int32")
schedule_name = tvm.testing.parameter("depthwise_conv2d_NCHWc")
if __name__ == "__main__":
tvm.testing.main()
| 4,124 | 33.375 | 91 | py |
tvm | tvm-main/tests/python/relay/aot/test_cpp_aot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""AOT with C++ Runtime Tests"""
import re
import textwrap
import numpy as np
import pytest
import tvm
from tvm import IRModule
from tvm import relay
from tvm.relay import backend, testing
from tvm.testing.aot import generate_ref_data
def test_error_c_interface():
"""Checks that an error occurs when using the packed API in combination with C interface"""
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
with pytest.raises(
tvm.TVMError,
match=re.escape(
'Need unpacked-api == false (got: 0) and interface-api == "packed" (got: c) when '
"targeting c++ runtime"
),
):
tvm.relay.build(
IRModule.from_expr(func),
target="llvm",
executor=backend.Executor("aot", {"interface-api": "c"}),
)
@pytest.mark.parametrize("enable_usmp", [True, False])
@pytest.mark.parametrize("target_kind", ["c", "llvm"])
def test_conv2d(enable_usmp, target_kind):
"""Tests compilation of convolutions"""
relay_model = textwrap.dedent(
"""\
#[version = "0.0.5"]
def @main(%data : Tensor[(1, 3, 64, 64), uint8], %weight : Tensor[(3, 3, 5, 5), int8]) {
%1 = nn.conv2d(
%data,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%2 = cast(nn.max_pool2d(%1, pool_size=[3, 3]), dtype="int8");
%3 = nn.conv2d(
%2,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%4 = nn.max_pool2d(%3, pool_size=[3, 3]);
%4
}
"""
)
ir_mod = tvm.relay.fromtext(relay_model)
main_func = ir_mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.random.randint(1, 255, shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {"weight": weight_data}
inputs = {"data": input_data}
ref_outputs = generate_ref_data(ir_mod, inputs, params)
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.disable_vectorize": True,
"tir.usmp.enable": enable_usmp,
},
):
mod = tvm.relay.build(
ir_mod,
params=params,
target=target_kind,
executor=backend.Executor("aot", {"interface-api": "packed", "unpacked-api": False}),
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="gcc", options=["-std=c11", "-g3", "-O0"])
loaded_mod = tvm.runtime.load_module(test_so_path)
runner = tvm.runtime.executor.AotModule(loaded_mod["default"](tvm.cpu(0)))
runner.set_input(**inputs)
assert runner.get_input_name(0) == "data"
shape_dict, dtype_dict = runner.get_input_info()
assert shape_dict == {"data": (1, 3, 64, 64)}
assert dtype_dict == {"data": "uint8"}
runner.run()
assert (runner.get_output(0).numpy() == list(ref_outputs.values())[0]).all()
@pytest.mark.parametrize("enable_usmp", [True, False])
@pytest.mark.parametrize("target_kind", ["c", "llvm"])
def test_mobilenet(enable_usmp: bool, target_kind: str):
"""Full network test with Mobilenet"""
ir_mod, params = testing.mobilenet.get_workload(batch_size=1)
data_shape = [int(x) for x in ir_mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype("float32")
inputs = {"data": data}
ref_outputs = generate_ref_data(ir_mod, inputs, params)
with tvm.transform.PassContext(
opt_level=3, config={"tir.disable_vectorize": True, "tir.usmp.enable": enable_usmp}
):
mod = tvm.relay.build(
ir_mod,
params=params,
target=target_kind,
executor=backend.Executor("aot", {"interface-api": "packed"}),
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="c++", options=["-std=gnu++17", "-g3", "-O0"])
loaded_mod = tvm.runtime.load_module(test_so_path)
runner = tvm.runtime.executor.AotModule(loaded_mod["default"](tvm.cpu(0)))
runner.set_input(**inputs)
runner.run()
assert (runner.get_output(0).asnumpy() == list(ref_outputs.values())[0]).all()
def test_module_list():
"""Checks the correct list of module names is generated"""
input_x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
expr = tvm.relay.add(input_x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
mod = tvm.relay.build(
tvm.IRModule.from_expr(tvm.relay.Function([input_x], expr)),
target="c",
executor=tvm.relay.backend.Executor("aot", {"interface-api": "packed"}),
mod_name="unusual_module_name_fred",
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="gcc", options=["-std=c11"])
loaded_mod = tvm.runtime.load_module(test_so_path)
list_module_names = loaded_mod.get_function("list_module_names")
names_expected = ["unusual_module_name_fred"]
assert list(sorted(names_expected)) == list(sorted(list_module_names()))
def test_create_executor():
x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
expr = tvm.relay.add(x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
actual = relay.create_executor(
"aot", mod=tvm.IRModule.from_expr(tvm.relay.Function([x], expr)), target="c"
).evaluate()(np.array([2], dtype="float32"))
np.isfinite(np.array([3], dtype="float32"))
np.testing.assert_allclose(actual.numpy(), np.array([3], dtype="float32"))
def test_pass_wrong_device_arg():
"""Ensure an error is generated if the incorrect number of devices are passed"""
x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
expr = tvm.relay.add(x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(
tvm.IRModule.from_expr(tvm.relay.Function([x], expr)),
target="c",
executor=backend.Executor("aot", {"interface-api": "packed"}),
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="gcc", options=["-std=c11", "-g3", "-O0"])
loaded_mod = tvm.runtime.load_module(test_so_path)
with pytest.raises(tvm.TVMError) as error:
tvm.runtime.executor.AotModule(loaded_mod["default"](tvm.cpu(0), tvm.cpu(0)))
assert (
"Check failed: devices_.size() == 1 (2 vs. 1) : Expect exactly 1 device passed."
in str(error.exception)
)
# TODO write asserts for # and type of device.
@pytest.mark.parametrize("target_kind", ["c", "llvm"])
@pytest.mark.parametrize("input_name", ["input:0", "input@0", "input_0"])
def test_aot_input_name_with_special_character(target_kind: str, input_name: str):
"""Test name transforms in AOT for input names with special characters."""
dtype = "float32"
input_1 = relay.var(input_name, shape=(10, 5), dtype=dtype)
weight = relay.var("weight", shape=(1, 5), dtype=dtype)
output = relay.add(input_1, weight)
func = relay.Function([input_1, weight], output)
input_data = np.random.rand(10, 5).astype(dtype)
weight_data = np.random.rand(1, 5).astype(dtype)
expected_output = input_data + weight_data
params = {"weight": weight_data}
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(
tvm.IRModule.from_expr(func),
target=target_kind,
params=params,
executor=tvm.relay.backend.Executor("aot", {"interface-api": "packed"}),
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="c++", options=["-std=gnu++17", "-g3", "-O0"])
# test both original name and transformed name
for name in ["input_0", input_name]:
loaded_mod = tvm.runtime.load_module(test_so_path)
runner = tvm.runtime.executor.AotModule(loaded_mod["default"](tvm.cpu(0)))
inputs = {name: input_data}
runner.set_input(**inputs)
input_ind = runner.get_input_index(name)
assert (runner.get_input(input_ind).asnumpy() == input_data).all()
runner.run()
assert (runner.get_output(0).asnumpy() == expected_output).all()
@pytest.mark.parametrize("target_kind", ["c", "llvm"])
def test_aot_incorrect_input_name(target_kind: str):
"""Test passing incorrect input name."""
dtype = "float32"
correct_input_name = "input"
incorrect_input_name = "input1"
input1 = relay.var(correct_input_name, shape=(10, 5), dtype=dtype)
weight = relay.var("weight", shape=(1, 5), dtype=dtype)
output = relay.add(input1, weight)
func = relay.Function([input1, weight], output)
input_data = np.random.rand(10, 5).astype(dtype)
weight_data = np.random.rand(1, 5).astype(dtype)
params = {"weight": weight_data}
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(
tvm.IRModule.from_expr(func),
target=target_kind,
params=params,
executor=tvm.relay.backend.Executor("aot", {"interface-api": "packed"}),
)
temp_dir = tvm.contrib.utils.TempDirectory()
test_so_path = temp_dir / "test.so"
mod.export_library(test_so_path, cc="c++", options=["-std=gnu++17", "-g3", "-O0"])
loaded_mod = tvm.runtime.load_module(test_so_path)
runner = tvm.runtime.executor.AotModule(loaded_mod["default"](tvm.cpu(0)))
inputs = {incorrect_input_name: input_data}
error_regex = r"Invalid input name."
with pytest.raises(tvm.TVMError, match=error_regex):
runner.set_input(**inputs)
with pytest.raises(tvm.TVMError, match=error_regex):
runner.get_input_index(incorrect_input_name)
if __name__ == "__main__":
tvm.testing.main()
| 11,494 | 38.501718 | 99 | py |
tvm | tvm-main/tests/python/relay/aot/test_aot_create_executor_metadata.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long,missing-class-docstring,missing-module-docstring,missing-function-docstring,no-self-argument,unused-argument,invalid-name
import numpy as np
import tvm
import tvm.testing
from tvm.script import tir as T
from tvm.runtime.ndarray import array
from tvm.relay.backend import Executor
from tvm.relay.backend.aot import CreateExecutorMetadata
from tvm.relay import TensorType
from tvm.tir.usmp.utils import PoolAllocation
from tvm.ir.memory_pools import AllocatedPoolInfo, ConstantPoolInfo, WorkspacePoolInfo, ConstantInfo
def _check_executor_metadata(executor_metadata, expected_metadata):
assert list(executor_metadata.inputs) == expected_metadata["inputs"]
assert list(executor_metadata.input_tensor_types) == expected_metadata["input_tensor_types"]
assert list(executor_metadata.outputs) == expected_metadata["outputs"]
assert list(executor_metadata.output_tensor_types) == expected_metadata["output_tensor_types"]
assert list(executor_metadata.pools) == expected_metadata["pools"]
assert executor_metadata.devices == expected_metadata["devices"]
assert executor_metadata.executor == expected_metadata["executor"]
assert executor_metadata.mod_name == expected_metadata["mod_name"]
assert executor_metadata.interface_api == expected_metadata["interface_api"]
assert executor_metadata.unpacked_api == expected_metadata["unpacked_api"]
assert executor_metadata.workspace_alignment == expected_metadata["workspace_alignment"]
assert executor_metadata.constant_alignment == expected_metadata["constant_alignment"]
assert set(executor_metadata.pool_inputs.keys()) == set(expected_metadata["pool_inputs"].keys())
assert set(executor_metadata.io_pool_allocations.keys()) == set(
expected_metadata["io_pool_allocations"].keys()
)
def test_create_executor_metadata_single_func():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(
a: T.handle, output: T.handle, workspace: T.handle("uint8"), constants: T.handle("uint8")
) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind": "llvm", "tag": "", "keys": ["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": ["test_device"]})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, constant_0, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
target = Module["__tvm_main__"].attrs["target"]
executor = Executor("aot", {"interface-api": "c"})
workspace_pool_info = AllocatedPoolInfo(
WorkspacePoolInfo("sram", [target]),
256,
3,
)
constant_pool_info = AllocatedPoolInfo(
ConstantPoolInfo(
"flash",
[target],
[ConstantInfo("a", 0, array(np.array([0])))],
),
512,
2,
)
io_pool_allocations = {
"a": PoolAllocation(WorkspacePoolInfo("sram", [target]), 0),
"output": PoolAllocation(WorkspacePoolInfo("sram", [target]), 0),
}
mod = Module.with_attr("io_tensor_pool_allocations", io_pool_allocations)
mod["__tvm_main__"] = mod["__tvm_main__"].with_attr(
"pool_args",
[
constant_pool_info,
workspace_pool_info,
],
)
f = mod["__tvm_main__"]
expected_metadata = {
"inputs": [f.params[0]],
"input_tensor_types": [TensorType((5, 7), "float32")],
"outputs": ["output"],
"output_tensor_types": [TensorType((5, 7), "float32")],
"pools": f.params[2:],
"devices": f.attrs["devices"],
"executor": "aot",
"mod_name": "test_mod",
"interface_api": "c",
"unpacked_api": False,
"workspace_alignment": 16,
"constant_alignment": 1,
"pool_inputs": {
f.params[2]: workspace_pool_info,
f.params[3]: constant_pool_info,
},
"io_pool_allocations": io_pool_allocations,
}
executor_metadata = CreateExecutorMetadata(mod, "test_mod", executor, 16, 1)
_check_executor_metadata(executor_metadata, expected_metadata)
def test_create_executor_metadata_no_usmp():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(
a: T.handle, output: T.handle
) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind": "llvm", "tag": "", "keys": ["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": ["test_device"]})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, constant_0, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
executor = Executor("aot", {"interface-api": "c"})
mod = Module
f = mod["__tvm_main__"]
expected_metadata = {
"inputs": [f.params[0]],
"input_tensor_types": [TensorType((5, 7), "float32")],
"outputs": ["output"],
"output_tensor_types": [TensorType((5, 7), "float32")],
"pools": f.params[2:],
"devices": f.attrs["devices"],
"executor": "aot",
"mod_name": "test_mod",
"interface_api": "c",
"unpacked_api": False,
"workspace_alignment": 16,
"constant_alignment": 1,
"pool_inputs": {},
"io_pool_allocations": {},
}
executor_metadata = CreateExecutorMetadata(mod, "test_mod", executor, 16, 1)
_check_executor_metadata(executor_metadata, expected_metadata)
if __name__ == "__main__":
tvm.testing.main()
| 8,452 | 46.757062 | 230 | py |
tvm | tvm-main/tests/python/relay/aot/test_aot_create_function_metadata.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long,missing-class-docstring,missing-module-docstring,missing-function-docstring,no-self-argument,unused-argument,invalid-name
import numpy as np
import tvm
import tvm.testing
from tvm.script import tir as T
from tvm.runtime.ndarray import array
from tvm.relay.backend.aot import CreateFunctionMetadata
from tvm.ir.memory_pools import AllocatedPoolInfo, ConstantPoolInfo, WorkspacePoolInfo, ConstantInfo
def _check_function_metadata(function_metadata, expected_infos):
for symbol, expected_info in expected_infos.items():
func_info = function_metadata[symbol]
# Check workspace_sizes
key, value = func_info.workspace_sizes.items()[0]
assert str(key) == expected_info["target"]
assert value == expected_info["workspace_sizes"]
# Check io_sizes
key, value = func_info.io_sizes.items()[0]
assert str(key) == expected_info["target"]
assert value == expected_info["io_sizes"]
# Check constant_sizes
key, value = func_info.constant_sizes.items()[0]
assert str(key) == expected_info["target"]
assert value == expected_info["constant_sizes"]
# Check tir_primfuncs
key, value = func_info.tir_primfuncs.items()[0]
assert str(key) == expected_info["target"]
tvm.ir.assert_structural_equal(value, expected_info["tir_primfuncs"])
def test_create_function_metadata_workspace_allocate_only():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]})})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, sid_2, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 432,
"io_sizes": 280,
"constant_sizes": 0,
"tir_primfuncs": Module["__tvm_main__"],
}
}
function_metadata = CreateFunctionMetadata(Module, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_constant_allocate_only():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, constant_0, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 0,
"io_sizes": 280,
"constant_sizes": 140,
"tir_primfuncs": Module["__tvm_main__"],
}
}
function_metadata = CreateFunctionMetadata(Module, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_constant_pool_only():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 0,
"io_sizes": 280,
"constant_sizes": 256,
"tir_primfuncs": Module["__tvm_main__"],
}
}
target = Module["__tvm_main__"].attrs["target"]
mod = Module.with_attr(
"pool_args",
[
AllocatedPoolInfo(
ConstantPoolInfo(
"flash",
[target],
[ConstantInfo("a", 0, array(np.array([0])))],
),
256,
),
],
)
function_metadata = CreateFunctionMetadata(mod, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_workspace_pool_only():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 256,
"io_sizes": 280,
"constant_sizes": 0,
"tir_primfuncs": Module["__tvm_main__"],
}
}
target = Module["__tvm_main__"].attrs["target"]
mod = Module.with_attr(
"pool_args",
[
AllocatedPoolInfo(
WorkspacePoolInfo("sram", [target]),
256,
),
],
)
function_metadata = CreateFunctionMetadata(mod, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_all_single_func():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]})})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, constant_0, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 688,
"io_sizes": 280,
"constant_sizes": 652,
"tir_primfuncs": Module["__tvm_main__"],
}
}
target = Module["__tvm_main__"].attrs["target"]
mod = Module.with_attr(
"pool_args",
[
AllocatedPoolInfo(
ConstantPoolInfo(
"flash",
[target],
[ConstantInfo("a", 0, array(np.array([0])))],
),
512,
),
AllocatedPoolInfo(
WorkspacePoolInfo("sram", [target]),
256,
),
],
)
function_metadata = CreateFunctionMetadata(mod, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
def test_create_function_metadata_workspace_multi_funcs():
# fmt: off
@tvm.script.ir_module
class Module:
@T.prim_func
def __tvm_main__(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "num_inputs": 1, "num_outputs": 1})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
@T.prim_func
def test_fused_add(a: T.handle, b: T.handle, output: T.handle, device_context_unused: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod_test_fused_add", "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]})})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
b_buffer = T.match_buffer(b, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_0 = T.allocate([140], "int8", "global.workspace")
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("magic", a_buffer.data, b_buffer.data, sid_0, constant_0, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
expected_infos = {
"__tvm_main__": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 0,
"io_sizes": 280,
"constant_sizes": 0,
"tir_primfuncs": Module["__tvm_main__"],
},
"test_fused_add": {
"target": "llvm -keys=cpu ",
"workspace_sizes": 144,
"io_sizes": 420,
"constant_sizes": 140,
"tir_primfuncs": Module["test_fused_add"],
},
}
function_metadata = CreateFunctionMetadata(Module, 16, 1)
_check_function_metadata(function_metadata, expected_infos)
if __name__ == "__main__":
tvm.testing.main()
| 13,086 | 42.191419 | 190 | py |
tvm | tvm-main/tests/python/relay/aot/test_pass_aot_lower_main.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=line-too-long,missing-class-docstring,missing-module-docstring,missing-function-docstring,no-self-argument,unused-argument,invalid-name
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm.ir import assert_structural_equal
from tvm.relay.backend.aot import AOTLowerMain, CallType
from tvm.script import tir as T
def _make_const(dtype, shape):
return tvm.relay.const(np.zeros(shape).astype(dtype))
def _make_consts(dtype, shapes):
return [_make_const(dtype, shape) for shape in shapes]
def _plan_devices(mod):
host_target = tvm.target.Target("llvm")
prim_target = tvm.target.Target("llvm", host=host_target)
ctxt = tvm.transform.PassContext()
config = tvm.target.make_compilation_config(ctxt, prim_target)
mod = tvm.relay.transform.PlanDevices(config)(mod)
mod = tvm.relay.transform.InferType()(mod)
return mod, config
def _assert_lowered_main(mod, main_func, call_type, print_script=False):
mod, config = _plan_devices(mod)
mod = AOTLowerMain("test_mod", config, call_type)(mod)
if print_script:
print(mod["__tvm_main__"].script())
assert_structural_equal(mod["__tvm_main__"], main_func)
def test_single_call_cpacked():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_single_call_packed():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_check_return(0, -1, T.tvm_call_packed("test_fused_add", a_buffer.data, output_buffer.data, dtype="int32"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.Packed)
def test_single_call_unpacked():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("test_fused_add", a_buffer.data, output_buffer.data, dtype="int32"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.Unpacked)
def test_constant():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a, meta[relay.Constant][0]) /* ty=(Tensor[(5, 7), float32], Tensor[(5, 7), float32]) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
init_meta_table={"relay.Constant": _make_consts("float32", [(5, 7)])},
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "global_symbol": "test_mod___tvm_main__", "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
constant_0 = T.allocate_const([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "float32", [5, 7])
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, constant_0, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
# TODO(@mbaret) There seems to be a TVMScript round-trip bug causing this to fail
@pytest.mark.xfail()
def test_copy_to_output():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%a
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
tmp_read = T.handle("uint8", "")
# buffer definition
tmp_read_1 = T.Buffer([T.uint64(140)], dtype="uint8", data=tmp_read)
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
tmp_write: T.handle("uint8") = output_buffer.data
tmp_write_1 = T.Buffer([T.uint64(140)], dtype="uint8", data=tmp_write)
for i in T.serial(140):
tmp_write_1[i] = T.Let(tmp_read_1[i], where={tmp_read : a_buffer.data})
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_two_calls():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
%1 = call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */;
%2 = (%1,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %2) /* ty=Tensor[(5, 7), float32] */
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_2 = T.allocate([140], "int8", "global.workspace")
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, sid_2, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add", sid_2, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_tuple_output():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) { (%x, %x) }
def @main(%a: Tensor[(5, 7), float32]) -> (Tensor[(5, 7), float32], Tensor[(5, 7), float32]) {
%0 = (%a,) /* ty=(Tensor[(5, 7), float32],) */;
call_lowered(@test_fused_add, %0) /* ty=(Tensor[(5, 7), float32], Tensor[(5, 7), float32]) */
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output0: T.handle, output1: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output0, output1], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output0_buffer = T.match_buffer(output0, [5, 7], dtype="float32", align=16)
output1_buffer = T.match_buffer(output1, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, output0_buffer.data, output1_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_tuple_intermediate():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @test_fused_add_0(%x: Tensor[(5, 7), float32]) -> (Tensor[(5, 7), float32], Tensor[(5, 7), float32]) { (%x, %x) }
def @test_fused_add_1(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,);
%1 = call_lowered(@test_fused_add_0, %0);
%2 = (%1.0, %1.1);
call_lowered(@test_fused_add_1, %2)
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_multi_input():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) { %x }
def @main(%a: Tensor[(5, 7), float32], %b: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a, %b) /* ty=(Tensor[(5, 7), float32], Tensor[(5, 7), float32]) */;
call_lowered(@test_fused_add, %0) /* ty=Tensor[(5, 7), float32] */
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, b: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a, b], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
b_buffer = T.match_buffer(b, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, b_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_let_binding():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,);
let %v1 = call_lowered(@test_fused_add, %0);
%v1
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_let_binding_branch():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @test_fused_add_0(%x: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @test_fused_add_1(%x: Tensor[(5, 7), float32], %y: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,);
let %v0 = call_lowered(@test_fused_add_0, %0);
%1 = (%v0,);
let %v1 = call_lowered(@test_fused_add_0, %1);
%2 = (%v1,);
let %v2 = call_lowered(@test_fused_add_0, %2);
%3 = (%v1, %v2);
let %v3 = call_lowered(@test_fused_add_1, %3);
%v3
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": []})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
sid_3 = T.allocate([140], "int8", "global.workspace")
sid_2 = T.allocate([140], "int8", "global.workspace")
sid_1 = T.allocate([140], "int8", "global.workspace")
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", a_buffer.data, sid_1, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_1, sid_2, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_0", sid_2, sid_3, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add_1", sid_2, sid_3, output_buffer.data, T.reinterpret(T.uint64(0), dtype="handle"), dtype="int32"))
# fmt: on
_assert_lowered_main(mod, func, CallType.CPacked)
def test_device_hooks():
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @test_fused_add(%x: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] { %x }
def @main(%a: Tensor[(5, 7), float32]) -> Tensor[(5, 7), float32] {
%0 = (%a,);
%1 = call_lowered(@test_fused_add, %0);
%2 = (%1,);
call_lowered(@test_fused_add, %2)
}
""",
)
# fmt: off
@T.prim_func
def func(a: T.handle, output: T.handle, device_context_example_target_hook: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "test_mod___tvm_main__", "runner_function": True, "target": T.target({"kind":"llvm", "tag":"", "keys":["cpu"]}), "input_vars": [a], "output_vars": [output], "devices": ["example_target_hook"]})
a_buffer = T.match_buffer(a, [5, 7], dtype="float32", align=16)
output_buffer = T.match_buffer(output, [5, 7], dtype="float32", align=16)
# body
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookActivate", device_context_example_target_hook, dtype="int32"), dtype="int32"))
with T.allocate([140], "int8", "global.workspace") as sid_2:
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookOpen", device_context_example_target_hook, dtype="int32"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add", a_buffer.data, sid_2, device_context_example_target_hook, dtype="int32"))
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookClose", device_context_example_target_hook, dtype="int32"), dtype="int32"))
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookOpen", device_context_example_target_hook, dtype="int32"), dtype="int32"))
T.evaluate(T.tvm_call_cpacked("test_fused_add", sid_2, output_buffer.data, device_context_example_target_hook, dtype="int32"))
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookClose", device_context_example_target_hook, dtype="int32"), dtype="int32"))
T.evaluate(T.tvm_check_return(0, -1, T.call_extern("TVMDeviceExampleTargetHookDeactivate", device_context_example_target_hook, dtype="int32"), dtype="int32"))
# fmt: on
device_contexts = {}
for gv in mod.get_global_vars():
device_contexts[gv] = "example_target_hook"
mod = mod.with_attr("device_contexts", device_contexts)
_assert_lowered_main(mod, func, CallType.CPacked)
if __name__ == "__main__":
tvm.testing.main()
| 18,809 | 42.744186 | 231 | py |
tvm | tvm-main/tests/python/relay/aot/test_crt_forward_declarations.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""test forward function declarations codegen by CodegenCHost."""
from collections import OrderedDict
import pytest
import numpy as np
import tvm.testing
from tvm import relay
from tvm.contrib.download import download_testdata
from tvm.relay.op.contrib import cmsisnn
from tvm.testing.aot import AOTTestModel, compile_models, generate_ref_data
from tvm.micro.testing.aot_test_utils import (
AOT_CORSTONE300_RUNNER,
AOT_USMP_CORSTONE300_RUNNER,
parametrize_aot_options,
AOTTestRunner,
)
def _change_ndarray_layout(arr, src_layout, dst_layout):
"""Makes a copy of an ndarray, reshaping it to a new data layout.
Parameter
---------
arr : numpy.ndarray
The ndarray to be reformatted.
src_layout : str
The current layout of the Relay constant. Must be alphabetic (e.g. NHWC
or OIHW, but not NCHW2c).
dst_layout : str
The desired layout of new the Relay constant. Must be alphabetic (e.g. NHWC
or OIHW, but not NCHW2c).
Returns
-------
dst_shape : numpy.ndarray
A copy of the ndarray with the new layout.
"""
assert src_layout.isalpha() and dst_layout.isalpha()
axis_order = [src_layout.index(c) for c in dst_layout]
return np.transpose(arr, axis_order)
@tvm.testing.requires_package("tflite")
@tvm.testing.requires_cmsisnn
@pytest.mark.parametrize("test_runner", [AOT_CORSTONE300_RUNNER, AOT_USMP_CORSTONE300_RUNNER])
def test_external_calls(test_runner):
"""Download a small network and partition for CMSIS-NN to test forward declarations for external
calls outside of __tvm_main__."""
# download the model
base_url = (
"https://github.com/ARM-software/ML-zoo/raw/"
"48a22ee22325d15d2371a6df24eb7d67e21dcc97"
"/models/keyword_spotting/cnn_small/tflite_int8"
)
file_to_download = "cnn_s_quantized.tflite"
file_saved = "cnn_s_quantized_15Dec2021.tflite"
model_file = download_testdata("{}/{}".format(base_url, file_to_download), file_saved)
# convert the tflite network into relay model
# pylint: disable=import-outside-toplevel
from tvm.relay.testing.tflite import TFLiteModel
input_shape = (1, 490)
dtype = "int8"
tfl_model = TFLiteModel(dtype)
tfl_model.load_from_file(model_file, [input_shape])
relay_mod, relay_params = tfl_model.convert_to_relay()
cmsisnn_mod = cmsisnn.partition_for_cmsisnn(relay_mod, relay_params)
# obtain the executor factory post relay compilation.
input_map, output_map, output_tolerance = tfl_model.generate_reference_data()
interface_api = "c"
use_unpacked_api = True
compiled_models = compile_models(
AOTTestModel(
module=cmsisnn_mod,
inputs=input_map,
outputs=output_map,
params=None,
output_tolerance=output_tolerance,
),
interface_api,
use_unpacked_api,
pass_config=test_runner.pass_config,
)
# Validate frquency of function appearances in the Host C file after forward declarations.
lib_mod = compiled_models[0].executor_factory.lib.imported_modules[0]
main_source = lib_mod.get_source()
assert (
main_source.count("TVMBackendAllocWorkspace") == 3
or main_source.count("TVMBackendAllocWorkspace") == 0
)
assert main_source.count("tvmgen_default_fused_reshape") == 2
assert main_source.count("tvmgen_default_cmsis_nn_main") == 12
cmsisnn_source = lib_mod.imported_modules[0].get_source()
assert cmsisnn_source.count("arm_convolve_wrapper") == 1
assert cmsisnn_source.count("arm_fully_connected") == 3
assert cmsisnn_source.count("arm_softmax") == 1
@parametrize_aot_options
def test_internal_calls(interface_api, use_unpacked_api, test_runner):
"""Test for all internal function calls. No forward declarations are expected here."""
dtype = "float32"
groups = 32
weight_shape = 1
ishape = (1, 32, 14, 14)
wshape = (32, weight_shape, 3, 3)
pass_config = {"tir.usmp.enable": True}
test_runner = AOTTestRunner(
makefile=test_runner.makefile,
prologue=test_runner.prologue,
epilogue=test_runner.epilogue,
includes=test_runner.includes,
parameters=test_runner.parameters,
pass_config=pass_config,
)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=groups)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = tvm.relay.transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
compiled_models = compile_models(
models=AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
)
lib_mod = compiled_models[0].executor_factory.lib.imported_modules[0]
main_source = lib_mod.get_source()
assert main_source.count("int32_t tvmgen_default_fused_nn_contrib_depthwise_conv2d_NCHWc") == 1
assert main_source.count("int32_t tvmgen_default_fused_layout_transform") == 3
@tvm.testing.requires_corstone300
def test_tensorized_calls():
"""Test a subgraph with a mix of internal and tensorized calls."""
data_shape, kernel_size, num_filter, groups, strides, padding, dilation = (
(1, 32, 32, 16),
(3, 3),
16,
1,
1,
(0, 2, 2, 0),
1,
)
in_dtype = "int8"
data_layout = "NHWC"
kernel_layout = "HWOI"
ref_kernel_layout = "HWIO"
out_layout = "NHWC"
schedule_name = "conv2d_nhwc_dsp.arm_cpu"
ref_input_data = np.random.randint(low=-128, high=127, size=data_shape, dtype=in_dtype)
ref_input_var = relay.var("input", relay.TensorType(data_shape, in_dtype)) # NHWC layout
kernel_shape = (*kernel_size, data_shape[-1] // groups, num_filter) # HWIO layout
ref_kernel_data = np.random.randint(low=-10, high=10, size=kernel_shape, dtype=in_dtype)
ref_relay_op = relay.op.nn.conv2d(
ref_input_var,
relay.const(_change_ndarray_layout(ref_kernel_data, "HWIO", ref_kernel_layout)),
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
dilation=(dilation, dilation),
data_layout="NHWC",
kernel_layout=ref_kernel_layout,
out_dtype="int32",
out_layout="NHWC",
)
ref_module = tvm.IRModule.from_expr(relay.Function([ref_input_var], ref_relay_op))
ref_outputs = generate_ref_data(ref_module, {"input": ref_input_data})
# Reshape output dictionary to match out_layout
assert len(ref_outputs) == 1
output_tensor_name, output_tensor = next(iter(ref_outputs.items()))
ref_outputs[output_tensor_name] = _change_ndarray_layout(output_tensor, "NHWC", out_layout)
test_input_data = _change_ndarray_layout(ref_input_data, "NHWC", data_layout)
test_input_var = relay.var("input", relay.TensorType(test_input_data.shape, in_dtype))
test_kernel_data = _change_ndarray_layout(ref_kernel_data, "HWIO", kernel_layout)
test_relay_op = relay.op.nn.conv2d(
test_input_var,
relay.const(test_kernel_data),
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
dilation=(dilation, dilation),
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype="int32",
out_layout=out_layout,
)
test_function = relay.Function([test_input_var], test_relay_op)
test_model = AOTTestModel(
module=tvm.IRModule.from_expr(test_function),
inputs={"input": test_input_data},
outputs=ref_outputs,
)
compiled_models = compile_models(
test_model,
interface_api="c",
use_unpacked_api=True,
pass_config=AOT_CORSTONE300_RUNNER.pass_config,
target="c -keys=arm_cpu -mcpu=cortex-m7",
schedule_name=schedule_name,
)
lib_mod = compiled_models[0].executor_factory.lib.imported_modules[0]
main_source = lib_mod.get_source()
assert main_source.count("tvmgen_default_fused_nn_conv2d") == 2
assert main_source.count("gemm_") == 13
if __name__ == "__main__":
tvm.testing.main()
| 9,371 | 36.190476 | 100 | py |
tvm | tvm-main/tests/python/relay/aot/test_crt_aot_usmp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" This file contains test that use USMP + AoT using C runtime APIs"""
from collections import OrderedDict
import re
import random
import numpy as np
import pytest
import tvm
from tvm import relay
from tvm.relay import testing # pylint: disable=W0611
from tvm.relay import transform
from tvm.relay.op.annotation import compiler_begin, compiler_end
from tvm.relay.backend import Executor, Runtime
from tvm import (
WorkspaceMemoryPools,
ConstantMemoryPools,
WorkspacePoolInfo,
ConstantPoolInfo,
PoolInfoProperties,
)
from tvm.micro import model_library_format as mlf
from tvm.micro.testing.aot_test_utils import parametrize_aot_options
from tvm.testing.aot import (
AOTTestModel,
AOTTestRunner,
generate_ref_data,
compile_and_run,
compile_models,
run_and_check,
create_relay_module_and_inputs_from_tflite_file,
)
from tvm.testing.usmp import is_tvm_backendallocworkspace_calls
def _check_for_no_tvm_backendallocworkspace_calls(mod: tvm.runtime.module):
assert (
is_tvm_backendallocworkspace_calls(mod) is False
), "This is failing because USMP was unable to plan for every tir.allocate node."
# U1 test case
@parametrize_aot_options
def test_synthetic(interface_api, use_unpacked_api, test_runner):
"""
Simple U1 usecase test
"""
mod, params = tvm.relay.testing.synthetic.get_workload()
main_func = mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {}
for name, _ in shape_dict.items():
if name != "data":
params[name] = np.ones(shape_dict[name]).astype(type_dict[name])
inputs = {"data": input_data}
output_list = generate_ref_data(mod, inputs, params)
config = (
{
"tir.disable_vectorize": True,
"tir.disable_storage_rewrite": True,
"tir.usmp.enable": True,
"tir.usmp.algorithm": "greedy_by_conflicts",
},
)
test_runner = AOTTestRunner(
makefile=test_runner.makefile,
prologue=test_runner.prologue,
epilogue=test_runner.epilogue,
includes=test_runner.includes,
parameters=test_runner.parameters,
pass_config={**test_runner.pass_config},
)
test_runner.pass_config.update(*config)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
@pytest.mark.parametrize(
"workspace_byte_alignment,constant_byte_alignment,"
"main_workspace_size,main_constant_size,usmp_algo",
[
(8, 8, 14208, 948, "greedy_by_conflicts"),
(16, 8, 14208, 948, "greedy_by_conflicts"),
(256, 8, 14720, 948, "greedy_by_conflicts"),
(8, 16, 14208, 956, "greedy_by_conflicts"),
(16, 16, 14208, 956, "greedy_by_conflicts"),
(256, 16, 14720, 956, "greedy_by_conflicts"),
(8, 256, 14208, 1804, "greedy_by_conflicts"),
(16, 256, 14208, 1804, "greedy_by_conflicts"),
(256, 256, 14720, 1804, "greedy_by_conflicts"),
(8, 8, 18576, 948, "greedy_by_size"),
(16, 8, 18576, 948, "greedy_by_size"),
(256, 8, 19392, 948, "greedy_by_size"),
(8, 16, 18576, 956, "greedy_by_size"),
(16, 16, 18576, 956, "greedy_by_size"),
(256, 16, 19392, 956, "greedy_by_size"),
(8, 256, 18576, 1804, "greedy_by_size"),
(16, 256, 18576, 1804, "greedy_by_size"),
(256, 256, 19392, 1804, "greedy_by_size"),
(8, 8, 11424, 948, "hill_climb"),
(16, 8, 11424, 948, "hill_climb"),
(256, 8, 11920, 948, "hill_climb"),
(8, 16, 11424, 956, "hill_climb"),
(16, 16, 11424, 956, "hill_climb"),
(256, 16, 11920, 956, "hill_climb"),
(8, 256, 11424, 1804, "hill_climb"),
(16, 256, 11424, 1804, "hill_climb"),
(256, 256, 11920, 1804, "hill_climb"),
],
)
def test_memory_planning(
workspace_byte_alignment,
constant_byte_alignment,
main_workspace_size,
main_constant_size,
usmp_algo,
):
"""Checks calculated workspace against known values"""
random.seed(0)
mod, params = tvm.relay.testing.synthetic.get_workload()
target = "c"
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": workspace_byte_alignment,
"constant-byte-alignment": constant_byte_alignment,
},
)
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.disable_vectorize": True,
"tir.disable_storage_rewrite": True,
"tir.usmp.enable": True,
"tir.usmp.algorithm": usmp_algo,
},
):
lib = tvm.relay.build(mod, target, executor=executor, runtime=runtime, params=params)
# The workspace_size dictionary will have an entry for both the 'primitive' and 'host'
# targets, though both are identical.
assert (
sum(lib.function_metadata["__tvm_main__"].workspace_sizes.values()) == main_workspace_size
)
assert sum(lib.function_metadata["__tvm_main__"].constant_sizes.values()) == main_constant_size
@parametrize_aot_options
@pytest.mark.parametrize("groups,weight_shape", [(1, 32), (32, 1)])
def test_conv2d(interface_api, use_unpacked_api, test_runner, groups, weight_shape):
"""Test a subgraph with a single conv2d operator."""
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, weight_shape, 3, 3)
pass_config = {"tir.usmp.enable": True}
test_runner = AOTTestRunner(
makefile=test_runner.makefile,
prologue=test_runner.prologue,
epilogue=test_runner.epilogue,
includes=test_runner.includes,
parameters=test_runner.parameters,
pass_config=pass_config,
)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=groups)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
compiled_test_mods = compile_models(
models=AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize("merge_compiler_regions", [False, True])
def test_byoc_microtvm(merge_compiler_regions):
"""
This is a simple test to check BYOC capabilities of AOT
with and without merging compiler regions to test for https://github.com/apache/tvm/issues/9036
"""
use_unpacked_api = False
interface_api = "packed"
test_runner = AOTTestRunner(pass_config={"tir.usmp.enable": True})
input_x = relay.var("x", shape=(10, 10))
input_w0 = relay.var("w0", shape=(10, 10))
input_w1 = relay.var("w1", shape=(10, 10))
# z0 = x + w0
marked_input_x = compiler_begin(input_x, "ccompiler")
marked_input_w0 = compiler_begin(input_w0, "ccompiler")
add_x_and_w0 = relay.add(marked_input_x, marked_input_w0)
end_inner_add = compiler_end(add_x_and_w0, "ccompiler")
# z1 = z0 + w1
marked_inner_add = compiler_begin(end_inner_add, "ccompiler")
marked_w1 = compiler_begin(input_w1, "ccompiler")
add_nested_and_w1 = relay.add(marked_inner_add, marked_w1)
end_outer_add = compiler_end(add_nested_and_w1, "ccompiler")
# z2 = z0 + z1
final_add = relay.add(end_inner_add, end_outer_add)
relay_func = relay.Function([input_x, input_w0, input_w1], final_add)
mod = tvm.IRModule()
mod["main"] = relay_func
if merge_compiler_regions:
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph("mod_name")(mod)
mod = transform.InferType()(mod)
x_data = [("x", np.random.rand(10, 10).astype("float32"))]
w_data = [("w{}".format(i), np.random.rand(10, 10).astype("float32")) for i in range(2)]
map_inputs = OrderedDict(x_data + w_data)
output_list = generate_ref_data(mod, map_inputs)
compiled_test_mods = compile_models(
AOTTestModel(name="my_mod", module=mod, inputs=map_inputs, outputs=output_list),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
MOBILENET_V1_URL = (
"https://storage.googleapis.com/download.tensorflow.org/models/"
+ "mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
MOBILENET_V2_URL = (
"https://storage.googleapis.com/download.tensorflow.org/models/"
+ "tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz",
"mobilenet_v2_1.0_224_quant.tflite",
)
@pytest.mark.parametrize(
"model_url, usmp_algo, workspace_size, constant_size",
[
(MOBILENET_V1_URL, "greedy_by_size", 4845696, 8468008),
(MOBILENET_V1_URL, "greedy_by_conflicts", 4845696, 8468008),
(MOBILENET_V1_URL, "hill_climb", 3240064, 8468008),
],
)
def test_tflite_model_u1_usecase(model_url, usmp_algo, workspace_size, constant_size):
"""
This checks for ML models and the memory used by them
when using USMP with different algorithms
"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo}
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
# Checking the workspace size reported in model library format
mlf_memory_map = mlf._build_function_memory_map(
compiled_test_mods[0].executor_factory.function_metadata
)
assert mlf_memory_map["main"][0]["workspace_size_bytes"] == workspace_size
assert mlf_memory_map["main"][0]["constants_size_bytes"] == constant_size
# That should match to workspace size that will be codegen'd to the entry point.
allocated_pool_info_size = sum(
[
_.allocated_size
for _ in list(
dict(
compiled_test_mods[0].executor_factory.executor_codegen_metadata.pool_inputs
).values()
)
]
)
assert allocated_pool_info_size == workspace_size + constant_size
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
def _get_workspace_size_define_macro(pool_name: str, model_name="default") -> str:
"""This function converts pool names to compiler generated
pool size macros"""
prefix = "TVMGEN_" + model_name.upper() + "_"
postfix = "_WORKSPACE_POOL_SIZE"
return prefix + pool_name.upper() + postfix
def _get_constant_size_define_macro(pool_name: str, model_name="default") -> str:
"""This function converts pool names to compiler generated
pool size macros"""
prefix = "TVMGEN_" + model_name.upper() + "_"
postfix = "_CONSTANT_POOL_SIZE"
return prefix + pool_name.upper() + postfix
def _get_constant_data_define_macro(pool_name: str, model_name="default") -> str:
"""This function converts pool names to compiler generated
pool data macros"""
prefix = "TVMGEN_" + model_name.upper() + "_"
postfix = "_CONSTANT_POOL_DATA"
return prefix + pool_name.upper() + postfix
def _add_module_prefix(suffix: str, model_name="default") -> str:
"""A helper function create struct types"""
return "tvmgen_" + model_name + "_" + suffix
@pytest.mark.parametrize(
"model_url, usmp_algo",
[
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u3_usecase_single_external_pool(model_url, usmp_algo):
"""This checks for inference with USMP using external pool placed in the application"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
pool_name = "my_memory_pool"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools([WorkspacePoolInfo(pool_name, [target])])
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t {pool_name}[{_get_workspace_size_define_macro(pool_name)}];
""",
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"usmp_algo",
[("greedy_by_size"), ("hill_climb")],
)
def test_tflite_model_u3_usecase_conv2d_var_cons(usmp_algo):
"""This checks for inference using workspace and constant pools placed in the application"""
mod = tvm.relay.fromtext(
"""\
#[version = "0.0.5"]
def @main(%data : Tensor[(1, 3, 64, 64), uint8], %weight : Tensor[(3, 3, 5, 5), int8]) {
%1 = nn.conv2d(
%data,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%2 = cast(nn.max_pool2d(%1, pool_size=[3, 3]), dtype="int8");
%3 = nn.conv2d(
%2,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%4 = nn.max_pool2d(%3, pool_size=[3, 3]);
%4
}
"""
)
main_func = mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.random.randint(1, 255, shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {"weight": weight_data}
inputs = {"data": input_data}
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_mem_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=8500000)
),
]
)
constant_mem_pools = ConstantMemoryPools(
[
ConstantPoolInfo("my_const_pool_1", [target], []),
]
)
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
__attribute__((section(".bss.noinit"), aligned(TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES)))
static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}];
__attribute__((section(".rodata.tvm"), aligned(TVM_RUNTIME_CONST_ALLOC_ALIGNMENT_BYTES)))
static uint8_t my_const_pool_1[{_get_constant_size_define_macro("my_const_pool_1")}] = {{ {_get_constant_data_define_macro("my_const_pool_1")} }};
""",
)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_mem_pools,
constant_memory_pools=constant_mem_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"model_url, usmp_algo",
[
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u3_usecase_var_cons_ext_pools(model_url, usmp_algo):
"""This checks for inference using one external workspace and one external constant
pools placed in the application"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_mem_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=8500000)
),
]
)
constant_mem_pools = ConstantMemoryPools(
[
ConstantPoolInfo("my_const_pool_1", [target], []),
]
)
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
__attribute__((section(".bss.noinit"), aligned(TVM_RUNTIME_ALLOC_ALIGNMENT_BYTES)))
static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}];
__attribute__((section(".rodata.tvm"), aligned(TVM_RUNTIME_CONST_ALLOC_ALIGNMENT_BYTES)))
static uint8_t my_const_pool_1[{_get_constant_size_define_macro("my_const_pool_1")}] = {{ {_get_constant_data_define_macro("my_const_pool_1")} }};
""",
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_mem_pools,
constant_memory_pools=constant_mem_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"model_url, usmp_algo",
[
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u3_usecase_two_external_pools(model_url, usmp_algo):
"""This checks for inference using two external pools placed in the application"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=2500000)
),
WorkspacePoolInfo("my_memory_pool_2", [target]),
]
)
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}];
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool_2[{_get_workspace_size_define_macro("my_memory_pool_2")}];
""",
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"model_urls, usmp_algo",
[
((MOBILENET_V1_URL, MOBILENET_V2_URL), "greedy_by_size"),
],
)
def test_two_models_with_a_single_external_pool(model_urls, usmp_algo):
"""This checks for inference using a single large enough common pool"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools([WorkspacePoolInfo("my_memory_pool", [target])])
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": usmp_algo},
prologue=f"""
#define MAX(A, B) ((A > B) ? A : B)
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool[MAX({_get_workspace_size_define_macro("my_memory_pool", "mod1")},{_get_workspace_size_define_macro("my_memory_pool", "mod2")})];
""",
)
tflite_model_file1 = tf_testing.get_workload_official(
model_urls[0][0],
model_urls[0][1],
)
mod1, inputs1, params1 = create_relay_module_and_inputs_from_tflite_file(tflite_model_file1)
output_list1 = generate_ref_data(mod1, inputs1, params1)
tflite_model_file2 = tf_testing.get_workload_official(
model_urls[1][0],
model_urls[1][1],
)
mod2, inputs2, params2 = create_relay_module_and_inputs_from_tflite_file(tflite_model_file2)
output_list2 = generate_ref_data(mod2, inputs2, params2)
compiled_test_mods = compile_models(
[
AOTTestModel(
name="mod1", module=mod1, inputs=inputs1, outputs=output_list1, params=params1
),
AOTTestModel(
name="mod2", module=mod2, inputs=inputs2, outputs=output_list2, params=params2
),
],
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
)
@pytest.mark.parametrize(
"model_url, usmp_algo",
[
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u4_usecase_single_external_pool(model_url, usmp_algo):
"""This checks for inference with USMP using external pool placed in the application"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
pool_name = "my_memory_pool"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools([WorkspacePoolInfo(pool_name, [target])])
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
input_name, input_data = list(inputs.items())[0]
input_size_bytes = input_data.size * input_data.itemsize
test_runner = AOTTestRunner(
pass_config={
"tir.usmp.enable": True,
"tir.usmp.algorithm": usmp_algo,
"tir.usmp.use_workspace_io": True,
},
prologue=f"""
#include <string.h>
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t {pool_name}[{_get_workspace_size_define_macro(pool_name)}];
struct {_add_module_prefix("workspace_pools")} {_add_module_prefix("workspace_pools")} = {{
.{pool_name} = {pool_name}
}};
struct {_add_module_prefix("inputs")} {_add_module_prefix("inputs")} = {_add_module_prefix("map_inputs")}(&{_add_module_prefix("workspace_pools")});
memcpy({_add_module_prefix("inputs")}.{input_name}, tvmgen_default_input_data_input, {input_size_bytes});
struct {_add_module_prefix("outputs")} {_add_module_prefix("outputs")} = {_add_module_prefix("map_outputs")}(&{_add_module_prefix("workspace_pools")});
""",
)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
use_workspace_io=True,
)
@pytest.mark.parametrize(
"model_url, usmp_algo",
[
(MOBILENET_V1_URL, "greedy_by_size"),
],
)
def test_tflite_model_u4_usecase_two_external_pools(model_url, usmp_algo):
"""This checks for inference with USMP using external pool placed in the application"""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
target = tvm.target.Target("c")
workspace_memory_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"my_memory_pool_1", [target], PoolInfoProperties(size_hint_bytes=2500000)
),
WorkspacePoolInfo("my_memory_pool_2", [target]),
]
)
tflite_model_file = tf_testing.get_workload_official(
model_url[0],
model_url[1],
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
input_name, input_data = list(inputs.items())[0]
input_size_bytes = input_data.size * input_data.itemsize
test_runner = AOTTestRunner(
pass_config={
"tir.usmp.enable": True,
"tir.usmp.algorithm": usmp_algo,
"tir.usmp.use_workspace_io": True,
},
prologue=f"""
#include <string.h>
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool_1[{_get_workspace_size_define_macro("my_memory_pool_1")}];
__attribute__((section(".data.tvm"), aligned(16)))
static uint8_t my_memory_pool_2[{_get_workspace_size_define_macro("my_memory_pool_2")}];
struct {_add_module_prefix("workspace_pools")} {_add_module_prefix("workspace_pools")} = {{
.my_memory_pool_1 = my_memory_pool_1,
.my_memory_pool_2 = my_memory_pool_2,
}};
struct {_add_module_prefix("inputs")} {_add_module_prefix("inputs")} = {_add_module_prefix("map_inputs")}(&{_add_module_prefix("workspace_pools")});
memcpy({_add_module_prefix("inputs")}.{input_name}, tvmgen_default_input_data_input, {input_size_bytes});
struct {_add_module_prefix("outputs")} {_add_module_prefix("outputs")} = {_add_module_prefix("map_outputs")}(&{_add_module_prefix("workspace_pools")});
""",
)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
workspace_memory_pools=workspace_memory_pools,
target=target,
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
run_and_check(
models=compiled_test_mods,
runner=test_runner,
interface_api=interface_api,
use_workspace_io=True,
)
def test_incompatible_interface_api_errors():
"""Ensures an error is thrown if not using the C interface API"""
mod, params = tvm.relay.testing.synthetic.get_workload()
target = "c"
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"interface-api": "packed",
},
)
with pytest.raises(
tvm.TVMError,
match=re.escape(
"tir.usmp.use_workspace_io option is only compatible with interface_api c.\n"
"Please use interface_api c to be able to enable tir.usmp.use_workspace_io"
),
):
with tvm.transform.PassContext(
opt_level=3,
config={"tir.usmp.enable": True, "tir.usmp.use_workspace_io": True},
):
tvm.relay.build(mod, target, executor=executor, runtime=runtime, params=params)
@parametrize_aot_options
def test_usmp_enabled_by_default_for_crt(interface_api, use_unpacked_api, test_runner):
"""This test checks whether USMP is enabled by default
for cortex-M targets.
"""
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, 32, 3, 3)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=1)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
compiled_test_mods = compile_models(
models=AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
pass_config=test_runner.pass_config,
target=tvm.target.target.micro("host"),
)
for compiled_model in compiled_test_mods:
_check_for_no_tvm_backendallocworkspace_calls(compiled_model.executor_factory.lib)
if __name__ == "__main__":
tvm.testing.main()
| 33,910 | 34.80887 | 166 | py |
tvm | tvm-main/tests/python/relay/aot/test_c_device_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""AOT with C Device API Tests"""
import re
from collections import OrderedDict
import numpy as np
import pytest
import tvm.testing
from tvm import relay
from tvm.ir.module import IRModule
from tvm.micro.testing.aot_test_utils import AOT_DEFAULT_RUNNER
from tvm.testing.aot import AOTTestModel, compile_models, generate_ref_data
@pytest.fixture(name="device_api_main_func")
def fixture_device_api_main_func():
"""Test function generator which generates C Device API calls"""
# Ideally we should have a sample Target registered here
# but we're going to re-use this for now
pytest.importorskip("ethosu.vela")
# pylint: disable=import-outside-toplevel
import tensorflow as tf
import tflite.Model
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
from tests.python.contrib.test_ethosu.infra import (
create_test_runner,
generate_ref_data_tflite,
)
# pylint: enable=import-outside-toplevel
tf.config.run_functions_eagerly(True)
class Model(tf.Module):
@tf.function
def tf_function(self, x):
return tf.nn.max_pool(x, [1, 2], [1, 2], "SAME")
def representative_dataset():
for _ in range(100):
data = np.random.rand(1, 3, 4, 3)
yield [data.astype(np.float32)]
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec([1, 3, 4, 3], dtype=tf.float32)
)
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_graph = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"x": [1, 3, 4, 3]},
dtype_dict={"x": "int8"},
)
mod = partition_for_ethosu(relay_module, params)
# Generate reference data
input_data, output_data = generate_ref_data_tflite(tflite_graph)
def compile_to_main_func(interface_api="c", use_unpacked_api=True):
test_runner = create_test_runner()
compiled_models = compile_models(
models=AOTTestModel(
module=mod,
inputs=input_data,
outputs=output_data,
),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
workspace_byte_alignment=16,
pass_config=test_runner.pass_config,
)
main_ir_module = compiled_models[0].executor_factory.lowered_ir_mods.items()[0][1]
main_func = main_ir_module["__tvm_main__"]
return main_func
return compile_to_main_func
@pytest.fixture(name="non_device_api_main_func")
def fixture_non_device_api_main_func():
"""Test function generator which does not generate C Device API calls"""
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(1, 10))
func = relay.Function([x, y], relay.multiply(x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(1, 10).astype("float32")
inputs = OrderedDict([("x", x_data), ("y", y_data)])
output_list = generate_ref_data(func, inputs)
def compile_to_main_func(interface_api="c", use_unpacked_api=True):
test_runner = AOT_DEFAULT_RUNNER
compiled_models = compile_models(
models=AOTTestModel(
module=IRModule.from_expr(func),
inputs=inputs,
outputs=output_list,
),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
workspace_byte_alignment=16,
pass_config=test_runner.pass_config,
)
main_ir_module = list(compiled_models[0].executor_factory.lowered_ir_mods.values())[0]
main_func = main_ir_module["__tvm_main__"]
return main_func
return compile_to_main_func
def test_device_api_hooks_unpacked_api(device_api_main_func):
"""Check for Device API hooks with unpacked internal calls"""
main_func = device_api_main_func(interface_api="c", use_unpacked_api=True)
# Activate Device
assert (
str(main_func.body[0])
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUActivate",'
+ " device_context_ethos_u))\n"
)
# Open Device
print("main func", repr(main_func.body))
assert (
str(main_func.body[1][0][0][0])
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUOpen",'
+ " device_context_ethos_u))\n"
)
# Device Call
# We dont need to check exact input and output var names in this test.
# Hence, using a regex to cover any legal I/O name.
regex = re.compile(
r"tir\.tvm_check_return\("
r"0, -1, "
r'tir\.call_extern\("tvmgen_default_ethos_u_main_0", '
r"\w+, \w+, device_context_ethos_u\)\)"
)
assert regex.match(str(main_func.body[1][0][0][1]))
# Close Device
assert (
str(main_func.body[1][0][0][2])
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUClose",'
+ " device_context_ethos_u))\n"
)
# Deactivate Device
assert (
str(str(main_func.body[2]))
== "tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUDeactivate",'
+ " device_context_ethos_u))\n"
)
@pytest.mark.skip(
"Skipping this test as this is incorrectly using Arm(R) Ethos(TM)-U NPU "
"with packed calling convention which is not supported by the NPU codegen's "
"TIR to Runtime Hook. We need to use a different target to test this feature"
)
def test_device_api_hooks_packed_api(device_api_main_func):
"""Check for Device API hooks with packed internal calls"""
main_func = device_api_main_func(interface_api="packed", use_unpacked_api=False)
# Activate Device
assert (
str(main_func.body[0][0].value)
== "@tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUActivate",'
+ " device_context_ethos_u: handle,"
+ " dtype=int32))"
)
# Open Device
assert (
str(main_func.body[1].body.body[0][0][0].value)
== "@tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUOpen",'
+ " device_context_ethos_u: handle,"
+ " dtype=int32))"
)
# Device Call
assert (
str(main_func.body[1].body.body[0][0][1][0].value)
== "@tir.tvm_call_cpacked("
+ '"tvmgen_default_ethos_u_main_0",'
+ " input: handle, output: handle,"
+ " device_context_ethos_u: handle,"
+ " dtype=int32)"
)
# Close Device
assert (
str(main_func.body[1].body.body[0][0][2].value)
== "@tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUClose",'
+ " device_context_ethos_u: handle,"
+ " dtype=int32))"
)
# Deactivate Device
assert (
str(main_func.body[2][0].value)
== "@tir.tvm_check_return(0, -1, tir.call_extern("
+ '"TVMDeviceEthosUDeactivate",'
+ " device_context_ethos_u: handle,"
+ " dtype=int32))"
)
def test_without_device_api_unpacked_api(non_device_api_main_func):
"""Test a graph without the Device API with the unpacked internal calls"""
main_func = non_device_api_main_func(interface_api="c", use_unpacked_api=True)
body = main_func.body.value
assert (
repr(body)
== 'T.tvm_check_return(0, -1, T.call_extern("int32", '
+ '"tvmgen_default_fused_multiply",'
+ " x_buffer_var, y_buffer_var, output_buffer_var))"
)
def test_without_device_api_packed_api(non_device_api_main_func):
"""Test a graph without the Device API with the packed internal calls"""
main_func = non_device_api_main_func(interface_api="packed", use_unpacked_api=False)
body = main_func.body.value
assert repr(body) == (
'T.call_cpacked("tvmgen_default_fused_multiply", '
"T.tvm_stack_make_array(x_buffer_var, T.tvm_stack_make_shape(10, 10), "
'T.reinterpret("handle", T.uint64(0)), T.uint32(2), T.Cast("float32", 0), 0), '
"T.tvm_stack_make_array(y_buffer_var, T.tvm_stack_make_shape(1, 10), "
'T.reinterpret("handle", T.uint64(0)), T.uint32(2), T.Cast("float32", 0), 0), '
"T.tvm_stack_make_array(output_buffer_var, T.tvm_stack_make_shape(10, 10), "
'T.reinterpret("handle", T.uint64(0)), T.uint32(2), T.Cast("float32", 0), 0), '
'T.reinterpret("handle", T.uint64(0)))'
)
if __name__ == "__main__":
tvm.testing.main()
| 9,687 | 34.881481 | 94 | py |
tvm | tvm-main/tests/python/relay/aot/test_crt_aot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""AOT with C Runtime Tests"""
import os
import pathlib
import re
import tarfile
from collections import OrderedDict
import numpy as np
import pytest
import tvm
from tvm import TVMError, relay
from tvm.contrib import utils
from tvm.ir.instrument import pass_instrument
from tvm.ir.module import IRModule
from tvm.micro import export_model_library_format
from tvm.micro import model_library_format as mlf
from tvm.micro.testing.aot_test_utils import AOT_DEFAULT_RUNNER, parametrize_aot_options
from tvm.micro.testing.utils import get_conv2d_relay_module
from tvm.relay import testing, transform
from tvm.relay.backend import Executor, Runtime
from tvm.relay.op.annotation import compiler_begin, compiler_end
from tvm.relay.testing import byoc
from tvm.testing.aot import (
AOTTestModel,
compile_and_run,
compile_models,
create_relay_module_and_inputs_from_tflite_file,
generate_ref_data,
)
def test_error_c_interface_with_packed_api():
"""Checks that an error occurs when using the packed API in combination with C interface"""
interface_api = "c"
use_unpacked_api = False
test_runner = AOT_DEFAULT_RUNNER
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
with pytest.raises(
tvm.TVMError,
match=re.escape(
'Either need interface_api == "packed" (got: c) or '
"unpacked-api == true (got: 0) when targeting "
"c runtime"
),
):
compile_and_run(
AOTTestModel(
module=IRModule.from_expr(func), inputs={}, outputs=generate_ref_data(func, {})
),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_conv_with_params(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of convolution with parameters"""
mod = get_conv2d_relay_module()
main_func = mod["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.ones(shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params = {"weight": weight_data}
inputs = {"data": input_data}
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_add_with_params(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of add with parameters"""
input_x = relay.var("x", shape=(1, 10))
input_y = relay.var("y", shape=(1, 10))
input_z = relay.add(input_x, input_y)
func = relay.Function([input_x, input_y], input_z)
input_x_data = np.ones((1, 10)).astype("float32")
input_y_data = np.random.uniform(size=(1, 10)).astype("float32")
params = {"x": input_x_data}
inputs = {"y": input_y_data}
output_list = generate_ref_data(func, inputs, params)
compile_and_run(
AOTTestModel(
module=IRModule.from_expr(func),
inputs=inputs,
outputs=output_list,
params=params,
),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
@pytest.mark.parametrize("groups,weight_shape", [(1, 32), (32, 1)])
def test_conv2d(interface_api, use_unpacked_api, test_runner, groups, weight_shape):
"""Test a subgraph with a single conv2d operator."""
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, weight_shape, 3, 3)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=groups)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
def test_packed_global_variables():
"""Check packed global variables in codegen output."""
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, 32, 3, 3)
interface_api = "packed"
use_unpacked_api = False
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=1)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
compiled_models_list = compile_models(
models=AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
workspace_byte_alignment=8,
enable_op_fusion=True,
pass_config=AOT_DEFAULT_RUNNER.pass_config,
use_runtime_executor=True,
target=tvm.target.Target("c"),
)
compiled_model = compiled_models_list[0]
tmp_path = utils.tempdir()
base_path = tmp_path.temp_dir
model = compiled_model.model
tar_file = os.path.join(base_path, f"{model.name}.tar")
export_model_library_format(compiled_model.executor_factory, tar_file)
t = tarfile.open(tar_file)
t.extractall(base_path)
file_list = []
for path in (pathlib.Path(base_path) / "codegen" / "host" / "src").iterdir():
if path.is_file():
file_list.append(path)
assert len(file_list) > 0
for path in file_list:
with open(path, "r") as lib_f:
lib1 = lib_f.readlines()
tvmgen_names = []
tvmgen_funcs = []
for line in lib1:
for item in line.split(" "):
# Find all names starting with tvmgen_default
if item.startswith("tvmgen_default"):
# Collect any name starting with tvmgen_default
tvmgen_names.append(item)
# Collect all functions starting with tvmgen_default
tvmgen_funcs += re.findall(r"(?<=).*(?=\()", item)
# Check if any function name has a packed variable name in all
# items that start with tvmgen_default
for func in tvmgen_funcs:
assert f"{func}_packed" not in tvmgen_names
def test_io_size_definition():
"""Check network IO size definitions in the codegen output."""
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, 32, 3, 3)
interface_api = "c"
use_unpacked_api = True
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=1)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w_data)])
output_list = generate_ref_data(mod, inputs)
compiled_models_list = compile_models(
models=AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
interface_api=interface_api,
use_unpacked_api=use_unpacked_api,
workspace_byte_alignment=8,
enable_op_fusion=True,
pass_config=AOT_DEFAULT_RUNNER.pass_config,
use_runtime_executor=True,
target=tvm.target.Target("c"),
)
dtype_itemsize = np.dtype(dtype).itemsize
ref_input_size = i_data.size * dtype_itemsize
ref_weight_size = w_data.size * dtype_itemsize
ref_output_size = output_list["output"].size * dtype_itemsize
compiled_model = compiled_models_list[0]
tmp_path = utils.tempdir()
base_path = tmp_path.temp_dir
model = compiled_model.model
tar_file = os.path.join(base_path, f"{model.name}.tar")
export_model_library_format(compiled_model.executor_factory, tar_file)
t = tarfile.open(tar_file)
t.extractall(base_path)
header_path = f"{base_path}/codegen/host/include/tvmgen_{model.name}.h"
with open(header_path, "r") as header:
contents = header.readlines()
contents = "".join(map(str, contents))
assert contents.count("_SIZE") == 4
assert f"TVMGEN_DEFAULT_DATA_SIZE {ref_input_size}" in contents
assert f"TVMGEN_DEFAULT_WEIGHT_SIZE {ref_weight_size}" in contents
assert f"TVMGEN_DEFAULT_OUTPUT_SIZE {ref_output_size}" in contents
@parametrize_aot_options
def test_concatenate(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of concatenate"""
dtype = "float32"
input_x = relay.var("x", shape=(10, 5), dtype=dtype)
input_y = relay.var("y", shape=(10, 5), dtype=dtype)
input_z = relay.var("z", shape=(), dtype=dtype)
concat_inputs = relay.concatenate((input_x, input_y), axis=1)
func_output = relay.add(input_z, concat_inputs)
# Check result.
func = relay.Function([input_x, input_y, input_z], func_output)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
inputs = OrderedDict([("x", x_data), ("y", y_data), ("z", t_data)])
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_nested_tuples(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of functions with nested tuple outputs"""
input_x = relay.var("x", shape=(10,))
output_1 = input_x + relay.const(1.0)
output_2 = output_1 + relay.const(1.0)
output_3 = output_2 + relay.const(1.0)
output_4 = output_3 + relay.const(1.0)
full_output = relay.Tuple(
[output_1, relay.Tuple([relay.Tuple([output_2, output_3]), output_4])]
)
func = relay.Function([input_x], full_output)
x_data = np.random.uniform(size=(10,)).astype(np.float32)
inputs = {"x": x_data}
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_tuple_getitem(interface_api, use_unpacked_api, test_runner):
func = relay.Function([], relay.TupleGetItem(relay.Tuple([relay.const(1), relay.const(2)]), 0))
output_list = generate_ref_data(func, {})
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs={}, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_id(interface_api, use_unpacked_api, test_runner):
x = relay.var("x", "float32")
ident = relay.Function([x], x)
one = np.array(1.0, "float32")
inputs = {"x": one}
output_list = generate_ref_data(ident, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(ident), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_add_const(interface_api, use_unpacked_api, test_runner):
two = relay.add(relay.const(1), relay.const(1))
func = relay.Function([], two)
output_list = generate_ref_data(func, {})
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs={}, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_multiply(interface_api, use_unpacked_api, test_runner):
"""Tests compilation of multiply"""
x = relay.var("x", shape=(10, 10))
y = relay.var("y", shape=(1, 10))
func = relay.Function([x, y], relay.multiply(x, y))
x_data = np.random.rand(10, 10).astype("float32")
y_data = np.random.rand(1, 10).astype("float32")
inputs = OrderedDict([("x", x_data), ("y", y_data)])
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_subtract(interface_api, use_unpacked_api, test_runner):
i = relay.var("i", shape=[], dtype="int32")
sub = relay.subtract(i, relay.const(1, dtype="int32"))
func = relay.Function([i], sub, ret_type=relay.TensorType([], "int32"))
i_data = np.array(1, dtype="int32")
inputs = {"i": i_data}
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_tuple_output(interface_api, use_unpacked_api, test_runner):
"""Tests getting items from tuples"""
x = relay.var("x", shape=(6, 9))
y = relay.split(x, 3).astuple()
a = relay.TupleGetItem(y, 0)
b = relay.TupleGetItem(y, 1)
out = relay.Tuple([a, b])
func = relay.Function([x], out)
x_data = np.random.rand(6, 9).astype("float32")
inputs = {"x": x_data}
output_list = generate_ref_data(func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@pytest.mark.parametrize(
["debug_calculated_workspaces", "workspace_byte_alignment"], [(True, 1), (True, 16), (False, 1)]
)
def test_mobilenet(debug_calculated_workspaces, workspace_byte_alignment):
"""Full network test with Mobilenet"""
use_unpacked_api = True
interface_api = "c"
test_runner = AOT_DEFAULT_RUNNER
# TODO(@Mousius) - Enable memory planning to take into account debug information
debugging_memory_overhead = 1024 * 1024
mod, params = testing.mobilenet.get_workload(batch_size=1)
data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype("float32")
inputs = {"data": data}
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(
module=mod,
inputs=inputs,
outputs=output_list,
params=params,
extra_memory_in_bytes=debugging_memory_overhead,
),
test_runner,
interface_api,
use_unpacked_api,
workspace_byte_alignment=workspace_byte_alignment,
debug_calculated_workspaces=debug_calculated_workspaces,
)
@pytest.mark.parametrize("merge_compiler_regions", [False, True])
def test_byoc_microtvm(merge_compiler_regions):
"""
This is a simple test to check BYOC capabilities of AOT
with and without merging compiler regions to test for https://github.com/apache/tvm/issues/9036
"""
use_unpacked_api = False
interface_api = "packed"
test_runner = AOT_DEFAULT_RUNNER
input_x = relay.var("x", shape=(10, 10))
input_w0 = relay.var("w0", shape=(10, 10))
input_w1 = relay.var("w1", shape=(10, 10))
# z0 = x + w0
marked_input_x = compiler_begin(input_x, "ccompiler")
marked_input_w0 = compiler_begin(input_w0, "ccompiler")
add_x_and_w0 = relay.add(marked_input_x, marked_input_w0)
end_inner_add = compiler_end(add_x_and_w0, "ccompiler")
# z1 = z0 + w1
marked_inner_add = compiler_begin(end_inner_add, "ccompiler")
marked_w1 = compiler_begin(input_w1, "ccompiler")
add_nested_and_w1 = relay.add(marked_inner_add, marked_w1)
end_outer_add = compiler_end(add_nested_and_w1, "ccompiler")
# z2 = z0 + z1
final_add = relay.add(end_inner_add, end_outer_add)
relay_func = relay.Function([input_x, input_w0, input_w1], final_add)
mod = tvm.IRModule()
mod["main"] = relay_func
if merge_compiler_regions:
mod = transform.MergeCompilerRegions()(mod)
mod = transform.PartitionGraph("mod_name")(mod)
mod = transform.InferType()(mod)
x_data = [("x", np.random.rand(10, 10).astype("float32"))]
w_data = [("w{}".format(i), np.random.rand(10, 10).astype("float32")) for i in range(2)]
map_inputs = OrderedDict(x_data + w_data)
output_list = generate_ref_data(mod, map_inputs)
compile_and_run(
AOTTestModel(name="my_mod", module=mod, inputs=map_inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@pytest.mark.parametrize("merge_compiler_regions", [False, True])
def test_byoc_microtvm_multiple_subgraphs(merge_compiler_regions):
"""This is a test case to check BYOC capabilities of AOT with multiple sub graphs"""
use_unpacked_api = False
interface_api = "packed"
test_runner = AOT_DEFAULT_RUNNER
input_x = relay.var("x", shape=(10, 10))
input_w0 = relay.var("w0", shape=(10, 10))
input_w1 = relay.var("w1", shape=(10, 10))
input_w2 = relay.var("w2", shape=(10, 10))
input_w3 = relay.var("w3", shape=(10, 10))
input_w4 = relay.var("w4", shape=(10, 10))
input_w5 = relay.var("w5", shape=(10, 10))
input_w6 = relay.var("w6", shape=(10, 10))
input_w7 = relay.var("w7", shape=(10, 10))
# C compiler
ccompiler_add_1 = relay.add(input_x, input_w0)
ccompiler_sub_1 = relay.subtract(ccompiler_add_1, input_w1)
ccompiler_mul_1 = relay.multiply(ccompiler_sub_1, input_w2)
ccompiler_add_2 = relay.add(input_x, input_w3)
ccompiler_sub_2 = relay.subtract(ccompiler_add_2, input_w4)
ccompiler_mul_2 = relay.multiply(ccompiler_sub_2, input_w5)
# Other parts on TVM
tvm_add = relay.add(input_x, input_w6)
tvm_sub = relay.subtract(tvm_add, input_w7)
concat_outputs = relay.concatenate((ccompiler_mul_1, ccompiler_mul_2, tvm_sub), axis=0)
relay_func = relay.Function(
[input_x, input_w0, input_w1, input_w2, input_w3, input_w4, input_w5, input_w6, input_w7],
concat_outputs,
)
mod = tvm.IRModule()
ann = byoc.CcompilerAnnotator()
mod["main"] = ann.visit(relay_func)
if merge_compiler_regions:
mod = transform.MergeCompilerRegions()(mod)
mod = tvm.relay.transform.PartitionGraph("mod_name")(mod)
mod = tvm.relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = OrderedDict([("x", x_data)] + [("w{}".format(i), w_data[i]) for i in range(8)])
output_list = generate_ref_data(mod, map_inputs)
input_list = [map_inputs["x"]]
input_list.extend([map_inputs["w{}".format(i)] for i in range(8)])
compile_and_run(
AOTTestModel(name="my_mod", module=mod, inputs=map_inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_add_name_mangling_with_params(interface_api, use_unpacked_api, test_runner):
"""Checks name mangling works with parameters"""
input_x = relay.var("x", shape=(1, 10))
input_y = relay.var("y", shape=(1, 10))
func_add = relay.add(input_x, input_y)
relay_func = relay.Function([input_x, input_y], func_add)
x_in = np.ones((1, 10)).astype("float32")
y_in = np.random.uniform(size=(1, 10)).astype("float32")
params = {"x": x_in}
inputs = {"y": y_in}
output_list = generate_ref_data(relay_func, inputs, params)
compile_and_run(
AOTTestModel(
name="my_mod",
module=relay_func,
inputs=inputs,
outputs=output_list,
params=params,
),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_multiple_models(interface_api, use_unpacked_api, test_runner):
"""Compiles multiple models to ensure both can be compiled into one output"""
# Identity model without params
x = relay.var("x", "float32")
mod1 = relay.Function([x], x)
one = np.array(1.0, "float32")
inputs1 = {"x": one}
output_list1 = generate_ref_data(mod1, inputs1)
params1 = None
# Convolution model
mod2 = get_conv2d_relay_module()
main_func = mod2["main"]
shape_dict = {p.name_hint: p.checked_type.concrete_shape for p in main_func.params}
type_dict = {p.name_hint: p.checked_type.dtype for p in main_func.params}
weight_data = np.ones(shape_dict["weight"]).astype(type_dict["weight"])
input_data = np.ones(shape_dict["data"]).astype(type_dict["data"])
params2 = {"weight": weight_data}
inputs2 = {"data": input_data}
output_list2 = generate_ref_data(mod2, inputs2, params2)
compile_and_run(
[
AOTTestModel(
name="mod1",
module=mod1,
inputs=inputs1,
outputs=output_list1,
params=params1,
),
AOTTestModel(
name="mod2",
module=mod2,
inputs=inputs2,
outputs=output_list2,
params=params2,
),
],
test_runner,
interface_api,
use_unpacked_api,
)
def test_quant_mobilenet_tfl():
"""Since in AOT we pass directly the output buffer from the user,
in quantized networks sharing the output buffers is not possible.
This is because the output data type is int8 and the intermediate
buffer are int32 or int16. We use mobilenet quantized to stress this
situation and verify that the output buffer sharing is disabled in AOT."""
pytest.importorskip("tflite")
import tvm.relay.testing.tf as tf_testing # pylint: disable=import-outside-toplevel
use_unpacked_api = True
interface_api = "c"
test_runner = AOT_DEFAULT_RUNNER
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/"
"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
mod, inputs, params = create_relay_module_and_inputs_from_tflite_file(tflite_model_file)
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
@parametrize_aot_options
def test_transpose(interface_api, use_unpacked_api, test_runner):
"""Test that non-inpleaceable operations (e.g., transpose) do not happen in-place."""
dtype = "float32"
input_x = relay.var("x", shape=(10, 5), dtype=dtype)
input_y = relay.var("y", shape=(10, 5), dtype=dtype)
input_z = relay.var("z", shape=(), dtype=dtype)
first_add = relay.add(input_x, input_y)
transpose_add = relay.transpose(first_add)
final_add = relay.add(transpose_add, input_z)
# Check result.
relay_func = relay.Function([input_x, input_y, input_z], final_add)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
inputs = {"x": x_data, "y": y_data, "z": t_data}
output_list = generate_ref_data(relay_func, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(relay_func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
enable_op_fusion=False,
)
def test_name_sanitiser():
"""Test that input tensors with special characters in the name don't break compilation"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_DEFAULT_RUNNER
func = relay.var("input-x::2", "float32")
ident = relay.Function([func], func)
one = np.array(1.0, "float32")
inputs = {"input-x::2": one}
output_list = generate_ref_data(ident, inputs)
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
enable_op_fusion=False,
)
def test_name_sanitiser_name_clash():
"""Test that 2 input tensors with names that clash once sanitized, generates an error"""
interface_api = "c"
use_unpacked_api = True
test_runner = AOT_DEFAULT_RUNNER
dtype = "float32"
input_non_clashing = relay.var("input::-1", shape=(10, 5), dtype=dtype)
# Next 2 input tensor names will clash once sanitized.
input_clashing_1 = relay.var("input::-2", shape=(10, 5), dtype=dtype)
input_clashing_2 = relay.var("input:--2", shape=(), dtype=dtype)
inner_add = relay.add(input_non_clashing, input_clashing_1)
transpose_add = relay.transpose(inner_add)
final_add = relay.add(transpose_add, input_clashing_2)
# Check result.
func = relay.Function([input_non_clashing, input_clashing_1, input_clashing_2], final_add)
x_data = np.random.rand(10, 5).astype(dtype)
y_data = np.random.rand(10, 5).astype(dtype)
t_data = np.random.uniform(size=()).astype(dtype)
inputs = {"input::-1": x_data, "input::-2": y_data, "input:--2": t_data}
output_list = generate_ref_data(func, inputs)
with pytest.raises(TVMError, match="Sanitized input tensor name clash"):
compile_and_run(
AOTTestModel(module=IRModule.from_expr(func), inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
enable_op_fusion=False,
)
def test_aot_codegen_backend_alloc_workspace_calls():
"""This test checks whether AoT lowering creates TVMBackendAllocWorkspace calls"""
# The %data and %weight shapes in the following primitive Relay should create
# small tensors that would get lowered to stack allocations in the CPU PrimFuncs.
# However, the AoT executor codegen should retain them as TVMBAW calls
# pylint: disable=line-too-long
relay_mod = tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 4, 4, 4), float32], %weight: Tensor[(4, 4, 3, 3), float32], src_layout="OIHW", dst_layout="OIHW4i4o") -> Tensor[(1, 4, 4, 4), float32] {
%0 = fn (%p02: Tensor[(1, 4, 4, 4), float32], Primitive=1, hash="9332b3872fb5292c", src_layout="NCHW", dst_layout="NCHW4c") -> Tensor[(1, 1, 4, 4, 4), float32] {
layout_transform(%p02, src_layout="NCHW", dst_layout="NCHW4c") /* ty=Tensor[(1, 1, 4, 4, 4), float32] */
};
%1 = fn (%p03: Tensor[(4, 4, 3, 3), float32], Primitive=1, hash="9f0b2b8a24a4dab3", src_layout="OIHW", dst_layout="OIHW4i4o") -> Tensor[(1, 1, 3, 3, 4, 4), float32] {
layout_transform(%p03, src_layout="OIHW", dst_layout="OIHW4i4o") /* ty=Tensor[(1, 1, 3, 3, 4, 4), float32] */
};
%2 = %0(%data) /* ty=Tensor[(1, 1, 4, 4, 4), float32] */;
%3 = %1(%weight) /* ty=Tensor[(1, 1, 3, 3, 4, 4), float32] */;
%4 = fn (%p01: Tensor[(1, 1, 4, 4, 4), float32], %p1: Tensor[(1, 1, 3, 3, 4, 4), float32], out_layout="NCHW4c", kernel_layout="OIHW4i4o", Primitive=1, data_layout="NCHW4c") -> Tensor[(1, 1, 4, 4, 4), float32] {
nn.contrib_conv2d_NCHWc(%p01, %p1, padding=[1, 1, 1, 1], channels=4, kernel_size=[3, 3], data_layout="NCHW4c", kernel_layout="OIHW4i4o", out_layout="NCHW4c") /* ty=Tensor[(1, 1, 4, 4, 4), float32] */
};
%5 = %4(%2, %3) /* ty=Tensor[(1, 1, 4, 4, 4), float32] */;
%6 = fn (%p0: Tensor[(1, 1, 4, 4, 4), float32], Primitive=1, src_layout="NCHW4c", dst_layout="NCHW") -> Tensor[(1, 4, 4, 4), float32] {
layout_transform(%p0, src_layout="NCHW4c", dst_layout="NCHW") /* ty=Tensor[(1, 4, 4, 4), float32] */
};
%6(%5) /* ty=Tensor[(1, 4, 4, 4), float32] */
}
"""
)
# pylint: enable=line-too-long
compiled_test_mods = compile_models(
models=AOTTestModel(module=relay_mod, inputs=None, outputs=None),
interface_api="c",
use_unpacked_api=True,
pass_config={"tir.usmp.enable": False},
)
source = compiled_test_mods[0].executor_factory.lib.imported_modules[0].get_source()
# There should be three allocates created for three primitive relay function
# calls in the main for the above relay snippet.
assert source.count("TVMBackendAllocWorkspace") == 3
@pytest.mark.parametrize("constants_byte_alignment", [8, 16, 32])
def test_constants_alignment(constants_byte_alignment):
"""Test that constants_byte_alignment correctly sets constants byte alignment"""
use_unpacked_api = True
interface_api = "c"
mod, params = testing.mobilenet.get_workload(batch_size=1)
data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype("float32")
inputs = {"data": data}
output_list = generate_ref_data(mod, inputs, params)
target = f"c -constants-byte-alignment={constants_byte_alignment}"
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api,
use_unpacked_api,
target=tvm.target.Target(target, host=target),
pass_config={"tir.usmp.enable": False},
)
source = compiled_test_mods[0].executor_factory.lib.imported_modules[0].get_source()
assert f'__attribute__((section(".rodata.tvm"), aligned({constants_byte_alignment})))' in source
def test_output_tensor_names():
"""Test that the output names generated match those in the model"""
pytest.importorskip("tflite")
# pylint: disable=import-outside-toplevel
import tensorflow as tf
import tflite.Model
# pylint: enable=import-outside-toplevel
ifm_shape = (1, 299, 299, 3)
padding = "VALID"
strides = (1, 1)
dilation = (1, 1)
kernel_shape = (3, 2)
def create_tflite_graph_two_outs():
"""Create a model with 2 output tensors"""
class Model(tf.Module):
"""Simple TFLite test model"""
@tf.function
def tf_function(self, tf_input_x):
"""Single TFLite function with two convolutions"""
tf_strides = [1, strides[0], strides[1], 1]
filter_shape = [kernel_shape[0], kernel_shape[1], 3, 3]
filter1 = tf.constant(
np.arange(np.prod(filter_shape)).reshape(filter_shape),
dtype=tf.float32,
)
first_conv2d = tf.nn.conv2d(
tf_input_x,
filters=filter1,
strides=tf_strides,
padding=padding,
dilations=dilation,
)
first_conv2d = tf.nn.relu(first_conv2d)
filter2 = tf.constant(
1000 + np.arange(np.prod(filter_shape)).reshape(filter_shape),
dtype=tf.float32,
)
second_conv2d = tf.nn.conv2d(
tf_input_x,
filters=filter2,
strides=strides,
padding=padding,
data_format="NHWC",
dilations=dilation,
)
second_conv2d = tf.nn.relu(second_conv2d)
return first_conv2d, second_conv2d
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
tflite_graph = create_tflite_graph_two_outs()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": "int8"},
)
use_unpacked_api = True
interface_api = "c"
test_runner = AOT_DEFAULT_RUNNER
in_min, in_max = (-128, 127)
data = np.random.randint(in_min, high=in_max, size=ifm_shape, dtype="int8")
input_name = mod["main"].params[0].name_hint
inputs = {input_name: data}
output_list = generate_ref_data(mod, inputs, params)
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
test_runner,
interface_api,
use_unpacked_api,
)
compiled_test_mods = compile_models(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list, params=params),
interface_api,
use_unpacked_api,
)
# Check that the names of the output tensors occur in the source code
source = compiled_test_mods[0].executor_factory.lib.get_source()
for output_name in output_list.keys():
assert output_name in source
@pytest.mark.parametrize(
"workspace_byte_alignment,main_workspace_size",
[
(8, 14880),
(16, 14880),
(256, 15616),
],
)
def test_workspace_calculation(workspace_byte_alignment, main_workspace_size):
"""Checks calculated workspace against known values"""
mod, params = tvm.relay.testing.synthetic.get_workload()
target = "c"
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": workspace_byte_alignment,
},
)
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.disable_vectorize": True,
"tir.usmp.enable": False,
},
):
lib = tvm.relay.build(mod, target, executor=executor, runtime=runtime, params=params)
mlf_memory_map = mlf._build_function_memory_map(lib.function_metadata)
assert mlf_memory_map["main"][0]["workspace_size_bytes"] == main_workspace_size
@tvm.testing.requires_package("tflite")
@tvm.testing.requires_cmsisnn
def test_workspace_calculation_cmsis_nn():
"""This tests cmsis_nn codegen for workspace calculation.
This is tested specially because cmsis-nn codegen creates
multiple PrimFuncs per offloaded relay function in a non
-hierarchical manner."""
pytest.importorskip("tflite")
# pylint: disable=import-outside-toplevel
from tvm.contrib.download import download_testdata
from tvm.relay.op.contrib import cmsisnn
# pylint: enable=import-outside-toplevel
target = "c"
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": 16,
"interface-api": "c",
"unpacked-api": True,
},
)
base_url = (
"https://github.com/ARM-software/ML-zoo/raw/"
"48a22ee22325d15d2371a6df24eb7d67e21dcc97"
"/models/keyword_spotting/cnn_small/tflite_int8"
)
file_to_download = "cnn_s_quantized.tflite"
file_saved = "cnn_s_quantized_15Dec2021.tflite"
model_file = download_testdata("{}/{}".format(base_url, file_to_download), file_saved)
mod, _, params = create_relay_module_and_inputs_from_tflite_file(model_file)
mod = cmsisnn.partition_for_cmsisnn(mod, params)
with tvm.transform.PassContext(
opt_level=3,
config={
"tir.disable_vectorize": True,
},
):
lib = tvm.relay.build(mod, target, executor=executor, runtime=runtime, params=params)
mlf_memory_map = mlf._build_function_memory_map(lib.function_metadata)
assert mlf_memory_map["main"][0]["workspace_size_bytes"] == 14256
def test_aot_codegen_checks_returns():
"""This test checks whether AoT lowering creates calls that check the return value correctly"""
input_x = relay.var("x", shape=(1, 10))
input_y = relay.var("y", shape=(1, 10))
func_add = relay.add(input_x, input_y)
func = relay.Function([input_x, input_y], func_add)
compiled_test_mods = compile_models(
models=AOTTestModel(module=IRModule.from_expr(func), inputs=None, outputs=None),
interface_api="c",
use_unpacked_api=True,
)
source = compiled_test_mods[0].executor_factory.lib.imported_modules[0].get_source()
main_ir_module = compiled_test_mods[0].executor_factory.lowered_ir_mods.items()[0][1]
main_func = main_ir_module["__tvm_main__"]
# Check operator call is wrapped properly
body = main_func.body.value
assert (
repr(body)
== 'T.tvm_check_return(0, -1, T.call_extern("int32", "tvmgen_default_fused_add",'
+ " x_buffer_var, y_buffer_var, output_buffer_var))"
)
# TODO(Mousius) - Create a better place for C codegen tests
assert (
"if (tvmgen_default_fused_add(x_buffer_var, y_buffer_var, output_buffer_var) != 0 ) return -1;" # pylint: disable=line-too-long
in source
)
def test_aot_uses_anf():
"""Checks that A-Normal Form is being used in the AOT lowering pipeline."""
input_x = relay.var("x", shape=(1, 10, 10, 10))
input_y = relay.var("y", shape=(1, 10, 10, 10))
func_add = relay.add(input_x, input_y)
func = relay.Function([input_x, input_y], func_add)
@pass_instrument
class CheckANFRuns:
def __init__(self):
self.did_run_anf = False
def run_before_pass(self, _, info):
if info.name == "ToANormalForm":
self.did_run_anf = True
if info.name == "LowerTE":
assert self.did_run_anf, "ToANormalForm pass should run before LowerTE."
check_run_anf = CheckANFRuns()
model = AOTTestModel(module=IRModule.from_expr(func), inputs=None, outputs=None)
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": 8,
"interface-api": "c",
"unpacked-api": True,
},
)
config = {"tir.disable_vectorize": True}
with tvm.transform.PassContext(opt_level=3, config=config, instruments=[check_run_anf]):
tvm.relay.build(
model.module,
tvm.target.Target("c"),
executor=executor,
runtime=runtime,
workspace_memory_pools=None,
params=model.params,
mod_name=model.name,
)
assert check_run_anf.did_run_anf, "Expected ToANormalForm pass to have run."
if __name__ == "__main__":
tvm.testing.main()
| 40,782 | 35.874322 | 445 | py |
tvm | tvm-main/tests/python/relay/dyn/test_dynamic_op_level4.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import numpy as np
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import run_infer_type
import tvm.topi.testing
@tvm.testing.uses_gpu
def test_dynamic_strided_slice():
def verify(dshape, begin, end, strides, slice_mode="end", test_ref=True, dtype="int32"):
x = relay.var("x", relay.TensorType(dshape, "float32"))
ndim = len(dshape)
slice_dim = len(begin)
begin = begin if begin else [0] * ndim
end = end if end else list(dshape)[:slice_dim]
if strides:
if len(strides) == 1:
strides = strides * slice_dim
else:
strides = [1] * slice_dim
num_static_axes = len(dshape) - len(begin)
# target numpy result
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.strided_slice_python(x_data, begin, end, strides, slice_mode)
data = [x_data, np.array(begin, dtype=dtype), np.array(end, dtype=dtype)]
begin = relay.var("begin", shape=[len(begin)], dtype=dtype)
end = relay.var("end", shape=[len(end)], dtype=dtype)
inputs = [x, begin, end]
if strides:
data.append(np.array(strides, dtype=dtype))
strides = relay.var("strides", shape=[len(strides)], dtype=dtype)
inputs.append(strides)
z = relay.strided_slice(x, begin=begin, end=end, strides=strides, slice_mode=slice_mode)
else:
z = relay.strided_slice(x, begin=begin, end=end, slice_mode=slice_mode)
func = relay.Function(inputs, z)
func = run_infer_type(func)
if num_static_axes > 0:
oshape = run_infer_type(z).checked_type.shape
assert tuple(oshape[-num_static_axes:]) == dshape[-num_static_axes:]
if not test_ref:
return
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()(
*data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res)
verify(
(1, 224, 224, 3),
[0, 20, 20, 0],
[1, 140, 140, 3],
[1, 1, 1, 1],
dtype="int64",
)
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1], dtype="int16")
verify((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None)
verify((3, 4, 3), [1, 1, 0], [4, 1000, 3], None)
verify((3, 4, 3), [1, 1, 0], [4, 4, 4], None)
verify((3, 4, 3), [1, 1, 0], [4, 4, 3], None)
verify((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1])
verify((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1])
verify((20, 10, 5), [20, 10, 4], [0, 0, 1], [-1, -3, -2])
verify((3, 4, 3), [1, 0, 0], [3, -1, 3], [1, 1, 1], slice_mode="size", test_ref=False)
verify((3, 4, 3), [1, 0, 0], [-1, 2, 3], [1, 1, 1], slice_mode="size", test_ref=True)
# Slicing along first few axes, where the rest of axes remain static
verify((3, 4, 3), [0], [2], None)
verify((3, 4, 3), [1], [4], [2])
verify((3, 4, 3), [1, 0], [4, 2], [2, 1])
if __name__ == "__main__":
test_dynamic_strided_slice()
| 4,051 | 39.118812 | 100 | py |
tvm | tvm-main/tests/python/relay/dyn/test_dynamic_op_level3.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level3 operator test cases.
"""
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay, te
from tvm.relay.testing import check_grad, run_infer_type
executor_kind = tvm.testing.parameter("debug", "vm")
def verify_func(executor_kind, func, data, ref_res, target_device=tvm.testing.enabled_targets()):
assert isinstance(data, list)
for target, dev in target_device:
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(*data)
if isinstance(op_res, tvm.runtime.container.ADT):
assert len(op_res) == len(
ref_res
), "Outputs from TVM and Python implementation must be equal "
for op_result, ref_result in zip(op_res, ref_res):
tvm.testing.assert_allclose(op_result.numpy(), ref_result, rtol=1e-5)
else:
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
relay.backend.te_compiler.get().clear()
def check_on_vm(target, dev, args, expected_result, mod):
"""
Check that evaluating `expr` applied to the arguments produces
`result` on Relay VM.
"""
rts_result = relay.create_executor("vm", device=dev, target=target, mod=mod).evaluate()(*args)
tvm.testing.assert_allclose(expected_result, rts_result.numpy())
@tvm.testing.uses_gpu
def test_dyn_reshape(executor_kind):
def verify_reshape(shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType((len(newshape),), "int64"))
z = relay.reshape(x, y)
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
x_data = np.ones(shape).astype("float32")
ref_res = np.reshape(x_data, oshape)
check_grad(
run_infer_type(func),
inputs=[x_data, np.array(newshape).astype("int64")],
test_inputs=[x_data],
eps=1e-3,
)
verify_func(executor_kind, func, [x_data, np.array(newshape).astype("int64")], ref_res)
verify_reshape((2, 3, 4), (8, 3), (8, 3))
verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))
verify_reshape((2, 3, 4), (4, 0, 2), (4, 3, 2))
verify_reshape((2, 3, 4), (2, 0, 0), (2, 3, 4))
verify_reshape((2, 3, 4), (0, -1), (2, 12))
verify_reshape((2, 3, 4), (-1, 0), (8, 3))
verify_reshape((2, 3, 4), (-3, 4), (6, 4))
verify_reshape((2, 3, 4, 5), (-3, -3), (6, 20))
verify_reshape((2, 3, 4), (0, -3), (2, 12))
@tvm.testing.uses_gpu
def test_dyn_shape_reshape(executor_kind):
def verify_reshape(shape, newshape, oshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(newshape, "float32"))
z = relay.reshape(x, relay.shape_of(y))
func = relay.Function([x, y], z)
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
y_data = np.random.uniform(low=-1, high=1, size=newshape).astype("float32")
ref_res = np.reshape(x_data, oshape)
check_grad(run_infer_type(func), inputs=[x_data, y_data], eps=1e-3)
verify_func(executor_kind, func, [x_data, y_data], ref_res)
verify_reshape((2, 3, 4), (8, 3), (8, 3))
verify_reshape((4, 7), (2, 7, 2), (2, 7, 2))
def test_squeeze(executor_kind):
def verify_squeeze(shape, dtype, axis):
x = relay.var("x", relay.TensorType(shape, dtype))
assert axis is not None
np_axis = tuple(axis)
axis = relay.var("axis", relay.TensorType([len(axis)], "int64"))
squeeze = relay.squeeze(x, axis=axis)
func = relay.Function([x, axis], squeeze)
x_data = np.random.random_sample(shape).astype(dtype)
ref_res = np.squeeze(x_data, axis=np_axis)
verify_func(executor_kind, func, [x_data, np.array(np_axis).astype("int64")], ref_res)
verify_squeeze((1, 3, 1), "float32", [0])
verify_squeeze((1, 2, 1, 2, 1), "float32", [0, 2])
@tvm.testing.uses_gpu
def test_dyn_expand_dims(executor_kind):
def verify_expand_dims(
dshape, dtype, oshape, axis, num_newaxis, target_device=tvm.testing.enabled_targets()
):
# Use 1 to avoid issues with invalid buffer sizes
x = relay.Var("x", relay.TensorType(dshape, dtype))
y = relay.var("axis", shape=[], dtype="int64")
z = relay.expand_dims(x, axis=y, num_newaxis=num_newaxis)
func = relay.Function([x, y], z)
data_np = np.random.uniform(size=dshape).astype(dtype)
axis_np = np.array(axis).astype("int64")
ref_res = data_np.reshape(oshape)
verify_func(executor_kind, func, [data_np, axis_np], ref_res, target_device=target_device)
for dtype in ["float16", "float32"]:
verify_expand_dims((2, 2), dtype, (2, 2, 1), 2, 1)
verify_expand_dims((2, 2), dtype, (2, 1, 2), 1, 1)
verify_expand_dims((2, 2), dtype, (1, 2, 2), 0, 1)
# TODO (AndrewZhaoLuo): investigate why runtimes in non-llvm are extremely slow
# for multiple new axis
llvm_target_only = [x for x in tvm.testing.enabled_targets() if "llvm" in x]
verify_expand_dims((2, 2), dtype, (2, 2, 1, 1), 2, 2, target_device=llvm_target_only)
verify_expand_dims((2, 2), dtype, (2, 1, 1, 1, 2), 1, 3, target_device=llvm_target_only)
verify_expand_dims((2, 2), dtype, (1, 1, 1, 1, 2, 2), 0, 4, target_device=llvm_target_only)
@tvm.testing.uses_gpu
def test_dyn_tile(executor_kind):
def verify_tile(dshape, reps):
x = relay.var("x", relay.TensorType(dshape, "float32"))
r = relay.var("reps", relay.TensorType((len(reps),), "float32"))
z = relay.tile(x, r)
func = relay.Function([x, r], z)
x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
ref_res = np.tile(x_data, reps=reps)
reps_data = np.array(reps).astype("float32")
verify_func(executor_kind, func, [x_data, np.array(reps).astype("float32")], ref_res)
verify_tile((2, 3, 4), (3, 2, 1))
verify_tile((2, 3, 4), (1, 2))
verify_tile((2, 3), (3, 2, 1))
@tvm.testing.uses_gpu
def test_dyn_zeros_ones(executor_kind):
def verify_zeros_ones(shape, dtype):
for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
rank = len(shape)
dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), "int64"))
y = op(dyn_shape, dtype)
yy = run_infer_type(y)
assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype)
func = relay.Function([dyn_shape], y)
ref_res = ref(shape, dtype)
verify_func(
executor_kind, func, [np.array(shape).astype("int64")], ref_res.astype("int64")
)
verify_zeros_ones((1, 3), "int64")
verify_zeros_ones((8, 9, 1, 2), "float32")
@tvm.testing.uses_gpu
def test_dyn_full(executor_kind):
def verify_full(fill_value, src_shape, dtype):
x = relay.var("x", relay.scalar_type(dtype))
rank = len(src_shape)
dyn_src_shape = relay.var("dyn_scr_shape", relay.ty.TensorType((rank,), "int64"))
z = relay.full(x, dyn_src_shape, dtype)
func = relay.Function([x, dyn_src_shape], z)
ref_res = np.full(src_shape, fill_value).astype(dtype)
verify_func(
executor_kind,
func,
[np.array(fill_value).astype(dtype), np.array(src_shape).astype("int64")],
ref_res,
)
verify_full(4, (1, 3, 4, 4), "int32")
verify_full(4, (1, 3, 4, 4), "int64")
verify_full(4.0, (2, 50), "float32")
@tvm.testing.uses_gpu
def test_dyn_sparse_to_dense(executor_kind):
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
default_value_data = np.array(default_value)
output_shape_data = np.array(output_shape)
a = relay.var(
"a", relay.TensorType(sparse_indices_data.shape, str(sparse_indices_data.dtype))
)
b = relay.var(
"b", relay.TensorType(sparse_values_data.shape, str(sparse_values_data.dtype))
)
output_shape_var = relay.var(
"output_shape", relay.TensorType(output_shape_data.shape, str(output_shape_data.dtype))
)
if default_value is None:
args = [a, b, output_shape_var]
d = relay.sparse_to_dense(a, output_shape_var, b)
else:
c = relay.var(
"c", relay.TensorType(default_value_data.shape, str(default_value_data.dtype))
)
args = [a, b, c, output_shape_var]
d = relay.sparse_to_dense(a, output_shape_var, b, c)
zz = run_infer_type(d)
assert len(zz.checked_type.shape) == len(output_shape)
func = relay.Function(args, d)
if default_value is None:
arguments = [sparse_indices_data, sparse_values_data, output_shape_data]
else:
arguments = [
sparse_indices_data,
sparse_values_data,
default_value_data,
output_shape_data,
]
verify_func(executor_kind, func, arguments, xpected)
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0]) # scalar
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3]) # vector
verify_sparse_to_dense(
[[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]
) # nXd
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]],
) # nXd
verify_sparse_to_dense(
[0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]
) # floats
# default value not specified
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0])
@pytest.mark.parametrize(
"sparse_indices, sparse_values, dense_shape, default_value",
[
(
np.array([[0, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
np.array([10], dtype=np.int64),
),
(
np.array([[1, 1, 1], [1, 3, 1], [2, 0, 5], [3, 1, 6]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([7, 7, 7], dtype=np.int64),
np.array([5], dtype=np.int64),
),
(
np.array([[1], [2]], dtype=np.int64),
np.array([7, 8], dtype=np.int64),
np.array([5], dtype=np.int64),
np.array([4], dtype=np.int64),
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([5], dtype=np.int64),
np.array([4], dtype=np.int64),
),
(
np.ones((0, 3), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([9, 3, 7], dtype=np.int64),
np.array([100], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("dtype", [np.int64, np.int32])
@pytest.mark.parametrize("use_dyn", [True, False])
def test_sparse_fill_empty_rows(
sparse_indices, sparse_values, dense_shape, default_value, dtype, use_dyn, executor_kind
):
def ref_sparse_fill_empty_rows(
sparse_indices: np.ndarray,
sparse_values: np.ndarray,
dense_shape: np.ndarray,
default_value: np.ndarray,
) -> None:
"""
This function calculates the expected output of sparse_fill_empty_rows operator given the
inputs.
"""
def check_add_rows(current_idx, limit_idx):
while current_idx < limit_idx:
new_sparse_indices.append([current_idx] + [0] * (num_cols - 1))
new_sparse_values.append(default_value[0])
empty_row_indicator[current_idx] = True
current_idx += 1
return current_idx
current_idx = 0
new_sparse_indices = []
new_sparse_values = []
empty_row_indicator = [False for _ in range(dense_shape[0])]
num_cols = sparse_indices.shape[1]
for sparse_row, sparse_value in zip(sparse_indices, sparse_values):
limit_idx = sparse_row[0]
current_idx = check_add_rows(current_idx, limit_idx)
new_sparse_indices.append(list(sparse_row))
new_sparse_values.append(sparse_value)
current_idx = limit_idx + 1
check_add_rows(current_idx, dense_shape[0])
return new_sparse_indices, new_sparse_values, empty_row_indicator
def verify_sparse_fill_empty_rows(
sparse_indices_np: np.ndarray,
sparse_values_np: np.ndarray,
dense_shape_np: np.ndarray,
default_value_np: np.ndarray,
) -> None:
"""
This function verifies the relay output of sparse_fill_empty_rows with its expected output.
"""
if use_dyn:
sparse_indices = relay.var(
"sparse_indices",
shape=[relay.Any(), relay.Any()],
dtype=str(sparse_indices_np.dtype),
)
sparse_values = relay.var(
"sparse_values",
shape=[relay.Any()],
dtype=str(sparse_values_np.dtype),
)
dense_shape = relay.var(
"dense_shape",
shape=[relay.Any()],
dtype=str(dense_shape_np.dtype),
)
default_value = relay.var(
"default_value",
shape=[relay.Any()],
dtype=str(default_value_np.dtype),
)
else:
sparse_indices = relay.var(
"sparse_indices",
relay.TensorType(sparse_indices_np.shape, str(sparse_indices_np.dtype)),
)
sparse_values = relay.var(
"sparse_values",
relay.TensorType(sparse_values_np.shape, str(sparse_values_np.dtype)),
)
dense_shape = relay.var(
"dense_shape",
relay.TensorType(dense_shape_np.shape, str(dense_shape_np.dtype)),
)
default_value = relay.var(
"default_value",
relay.TensorType(default_value_np.shape, str(default_value_np.dtype)),
)
z = relay.sparse_fill_empty_rows(sparse_indices, sparse_values, dense_shape, default_value)
func = relay.Function([sparse_indices, sparse_values, dense_shape, default_value], z)
ref_res = ref_sparse_fill_empty_rows(
sparse_indices_np,
sparse_values_np,
dense_shape_np,
default_value_np,
)
(
new_sparse_indices_infer_type,
new_sparse_values_infer_type,
empty_row_indicator_infer_type,
) = run_infer_type(z)
assert new_sparse_indices_infer_type.checked_type.dtype == sparse_indices_np.dtype
assert new_sparse_values_infer_type.checked_type.dtype == sparse_indices_np.dtype
assert empty_row_indicator_infer_type.checked_type.dtype == "bool"
verify_func(
executor_kind,
func,
[sparse_indices_np, sparse_values_np, dense_shape_np, default_value_np],
ref_res,
[("llvm", tvm.cpu())],
)
verify_sparse_fill_empty_rows(
sparse_indices.astype(dtype),
sparse_values.astype(dtype),
dense_shape.astype(dtype),
default_value.astype(dtype),
)
def test_dyn_copy():
target = tvm.target.Target("llvm")
dev = tvm.cpu()
mod = tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(?, 3), int64]) -> Tensor[(?, 3), int64] {
copy(%x)
}
"""
)
x_data = np.random.rand(15, 3).astype("int64")
expected = x_data
check_on_vm(target, dev, [x_data], expected, mod)
def test_dyn_copy_scalar():
target = tvm.target.Target("llvm")
dev = tvm.cpu()
mod = tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%x: int32, %y: Tensor[(?), int32]) -> Tensor[(?), int32] {
%0 = copy(%x);
%1 = expand_dims(%0, axis=0);
%2 = (%y, %1);
concatenate(%2)
}
"""
)
x_data = 3
y_data = np.random.rand(7).astype("int32")
expected = np.concatenate((y_data, np.expand_dims(x_data, axis=0)))
check_on_vm(target, dev, [x_data, y_data], expected, mod)
def test_dyn_cast():
target = tvm.target.Target("llvm")
dev = tvm.cpu()
mod = tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(?, 3), int64]) -> Tensor[(?, 3), int32] {
cast(%x, dtype="int32")
}
"""
)
x_data = np.random.rand(15, 3).astype("int64")
expected = x_data.astype("int32")
check_on_vm(target, dev, [x_data], expected, mod)
if __name__ == "__main__":
tvm.testing.main()
| 18,117 | 36.589212 | 100 | py |
tvm | tvm-main/tests/python/relay/dyn/test_dynamic_op_level10.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Support level10 operator test cases.
"""
import numpy as np
import tvm
from tvm import relay
from tvm.relay.testing import run_infer_type
import tvm.topi.testing
import random
import tvm.testing
executor_kind = tvm.testing.parameter("debug", "vm")
@tvm.testing.uses_gpu
def test_broadcast_to(executor_kind):
def verify_more_dynamic_broadcast_to(x_shape, out_shape):
rank = len(out_shape)
dtype = "float32"
shape_type = "int64"
reshape_shape = relay.Var("shape", relay.ty.TensorType((len(x_shape),), shape_type))
broadcast_shape = relay.Var("shape", relay.ty.TensorType((rank,), shape_type))
x = relay.Var("x", relay.ty.TensorType((np.prod(x_shape),), dtype))
r = relay.reshape(x, reshape_shape)
z = relay.broadcast_to(r, broadcast_shape)
func = relay.Function([x, reshape_shape, broadcast_shape], z)
x = np.random.uniform(size=np.prod(x_shape)).astype(dtype)
ref_res = np.broadcast_to(np.reshape(x, x_shape), out_shape)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate(func)(
x, np.array(x_shape).astype(shape_type), np.array(out_shape).astype(shape_type)
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_more_dynamic_broadcast_to((4, 3), (3, 4, 3))
def verify_broadcast_to(x_shape, out_shape):
rank = len(out_shape)
dtype = "float32"
shape_type = "int64"
dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), shape_type))
x = relay.Var("x", relay.ty.TensorType(x_shape, dtype))
z = relay.broadcast_to(x, dyn_shape)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype)
func = relay.Function([x, dyn_shape], z)
x = np.random.uniform(size=x_shape).astype(dtype)
ref_res = np.broadcast_to(x, out_shape)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate(func)(x, np.array(out_shape).astype(shape_type))
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_broadcast_to((1,), (1, 1, 1))
verify_broadcast_to((1, 1), (4, 1, 1))
verify_broadcast_to((4, 1), (1, 4, 3))
@tvm.testing.uses_gpu
def test_dyn_broadcast_to(executor_kind):
dtype = "uint8"
rank = 3
shape_type = "int64"
dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), shape_type))
x_shape = (1,)
x = relay.Var("x", relay.ty.TensorType(x_shape, dtype))
z = relay.broadcast_to(x, dyn_shape)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype)
func = relay.Function([x, dyn_shape], z)
x = np.random.uniform(size=x_shape).astype(dtype)
dyn_shape = (1,) * rank
ref_res = np.broadcast_to(x, dyn_shape)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(executor_kind, mod=mod, device=dev, target=target).evaluate(
func
)(x, np.array(dyn_shape).astype(shape_type))
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@tvm.testing.uses_gpu
def test_dyn_one_hot(executor_kind):
def _get_oshape(indices_shape, depth, axis):
oshape = []
true_axis = len(indices_shape) if axis == -1 else axis
ndim = len(indices_shape) + 1
indices_index = 0
for i in range(0, ndim):
if i == true_axis:
oshape.append(depth)
else:
oshape.append(indices_shape[indices_index])
indices_index += 1
return oshape
def _verify(indices_shape, depth, on_value, off_value, axis, dtype):
indices = relay.var("indices", relay.TensorType(indices_shape, "int32"))
depth_var = relay.var("depth", relay.TensorType((), "int32"))
on_value_const = relay.const(on_value)
off_value_const = relay.const(off_value)
out = relay.one_hot(indices, on_value_const, off_value_const, depth_var, axis, dtype)
func = relay.Function([indices, depth_var], out)
indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32")
out_np = tvm.topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
out_relay = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(indices_np, np.array(depth).astype("int32"))
tvm.testing.assert_allclose(out_relay.numpy(), out_np)
_verify((3,), 3, 1, 0, -1, "int32")
_verify((3,), 3, 1.0, 0.0, -1, "float32")
_verify((2, 2), 5, 2, -2, 0, "int32")
_verify((2, 2), 5, 0.5, -0.5, 1, "float32")
_verify((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_verify((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
if __name__ == "__main__":
tvm.testing.main()
| 6,168 | 38.8 | 99 | py |
tvm | tvm-main/tests/python/relay/dyn/test_dynamic_op_level6.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level6 operator test cases.
"""
import numpy as np
import tvm
from tvm import te
from tvm import relay
import tvm.testing
executor_kind = tvm.testing.parameter("debug", "vm")
@tvm.testing.uses_gpu
def test_dynamic_topk(executor_kind):
def verify_topk(k, axis, ret_type, is_ascend, dtype):
shape = (20, 100)
x = relay.var("x", relay.TensorType(shape, "float32"))
k_var = relay.var("x", relay.TensorType((1,), "float32"))
out = relay.topk(x, k_var, axis, ret_type, is_ascend, dtype)
if isinstance(out, relay.expr.TupleWrapper):
out = out.astuple()
func = relay.Function([x, k_var], out)
np_data = np.random.uniform(size=shape).astype("float32")
if is_ascend:
np_indices = np.argsort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
kk = k if k >= 1 else shape[axis]
if axis == 0:
np_indices = np_indices[:kk, :]
np_values = np.zeros(np_indices.shape).astype("float32")
for i in range(shape[1]):
np_values[:, i] = np_data[np_indices[:, i], i]
else:
np_indices = np_indices[:, :kk]
np_values = np.zeros(np_indices.shape).astype("float32")
for i in range(shape[0]):
np_values[i, :] = np_data[i, np_indices[i, :]]
np_indices = np_indices.astype(dtype)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(np_data, np.array([k]).astype("float32"))
if ret_type == "both":
tvm.testing.assert_allclose(op_res[0].numpy(), np_values)
tvm.testing.assert_allclose(op_res[1].numpy(), np_indices)
elif ret_type == "values":
tvm.testing.assert_allclose(op_res.numpy(), np_values)
else:
tvm.testing.assert_allclose(op_res.numpy(), np_indices)
np.random.seed(0)
for k in [0, 1, 5]:
for axis in [0, -1, 1]:
for ret_type in ["both", "values", "indices"]:
verify_topk(k, axis, ret_type, True, "int64")
verify_topk(k, axis, ret_type, False, "float32")
if __name__ == "__main__":
test_dynamic_topk()
| 3,205 | 39.075 | 74 | py |
tvm | tvm-main/tests/python/relay/dyn/test_dynamic_op_level5.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level5 operator test cases.
"""
import math
import numpy as np
import tvm
from tvm import te
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import run_infer_type
import tvm.topi.testing
import tvm.testing
executor_kind = tvm.testing.parameter("debug", "vm")
def test_resize2d_infer_type():
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
size = relay.var("size", relay.TensorType((2,), "int8"))
z = relay.image.resize2d(x, size)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, relay.Any(), relay.Any()), "int8")
@tvm.testing.uses_gpu
def test_resize2d(executor_kind):
def verify_resize2d(dshape, scale, method, layout):
if layout == "NHWC":
size = (dshape[1] * scale, dshape[2] * scale)
else:
size = (dshape[2] * scale, dshape[3] * scale)
size = np.array(size).astype("int64")
x_data = np.random.uniform(size=dshape).astype("float32")
x = relay.var("x", relay.TensorType(dshape, "float32"))
size_var = relay.var("size", relay.TensorType((2,), "int64"))
coord_trans = "asymmetric" if method == "nearest_neighbor" else "align_corners"
z = relay.image.resize2d(
x, size_var, None, layout, method, coordinate_transformation_mode=coord_trans
)
zz = run_infer_type(z)
func = relay.Function([x, size_var], z)
ref_res = tvm.topi.testing.resize2d_python(
x_data, (scale, scale), layout, method, coord_trans
)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(x_data, size)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6)
for method in ["linear", "nearest_neighbor"]:
for layout in ["NCHW", "NHWC"]:
verify_resize2d((1, 4, 4, 4), 2, method, layout)
verify_resize2d((2, 8, 17, 20), 7, method, layout)
if __name__ == "__main__":
test_resize2d_infer_type()
test_resize2d()
| 3,076 | 36.52439 | 89 | py |
tvm | tvm-main/tests/python/relay/dyn/test_dynamic_op_level2.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level2 dynamic operator test cases.
"""
import numpy as np
import tvm
from tvm import relay
from tvm import te
from tvm.relay.testing import enabled_targets
import random
from test_dynamic_op_level3 import verify_func
import tvm.topi.testing
from tvm.relay.testing import run_infer_type
executor_kind = tvm.testing.parameter("debug", "vm")
@tvm.testing.uses_gpu
def test_dyn_upsampling_run(executor_kind):
def verify_upsampling(dshape, scale_h, scale_w, layout, method, align_corners=False):
if layout == "NCHW":
(n, c, h, w) = dshape
x_data = np.random.uniform(size=(n, c, h, w)).astype("float32")
elif layout == "NHWC":
(n, h, w, c) = dshape
x_data = np.random.uniform(size=(n, h, w, c)).astype("float32")
ref_res = tvm.topi.testing.resize2d_python(
x_data,
(scale_h, scale_w),
layout,
method[2:] if method[0:2] == "bi" else method,
"align_corners" if align_corners else "asymmetric",
)
x = relay.Var("x", relay.TensorType(dshape, "float32"))
scale_h_var = relay.var("scale_h", relay.TensorType((), "float32"))
scale_w_var = relay.var("scale_h", relay.TensorType((), "float32"))
z = relay.nn.upsampling(
x, scale_h_var, scale_w_var, method=method, layout=layout, align_corners=align_corners
)
zz = run_infer_type(z)
func = relay.Function([x, scale_h_var, scale_w_var], z)
for target, dev in tvm.testing.enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(
x_data, np.array(scale_h).astype("float32"), np.array(scale_w).astype("float32")
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6)
verify_upsampling((1, 16, 32, 32), 3, 2.0, "NCHW", "nearest_neighbor")
verify_upsampling((1, 16, 32, 32), 5, 2.0, "NCHW", "bilinear", True)
verify_upsampling((1, 16, 32, 32), 2.0, 6, "NHWC", "nearest_neighbor")
verify_upsampling((1, 16, 32, 32), 2.0, 2.0, "NHWC", "bilinear", True)
# tests upsampling type inference with scale_h passed in as a constant and scale_w as a variable
@tvm.testing.uses_gpu
def test_dyn_upsampling_infer_type_const():
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
data = relay.var("data", relay.TensorType((n, c, h, w), "int8"))
scale_w = relay.Var("scale_w", relay.TensorType((), "float32"))
z = relay.nn.upsampling(data, 2.0, scale_w)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, relay.Any(), relay.Any()), "int8")
@tvm.testing.uses_gpu
def test_dyn_upsampling3d_run(executor_kind):
def verify_upsampling3d(
dshape, scale_d, scale_h, scale_w, layout, method, coord_trans="asymmetric"
):
if layout == "NCDHW":
(n, c, d, h, w) = dshape
x_data = np.random.uniform(size=(n, c, d, h, w)).astype("float32")
elif layout == "NDHWC":
(n, d, h, w, c) = dshape
x_data = np.random.uniform(size=(n, d, h, w, c)).astype("float32")
ref_res = tvm.topi.testing.resize3d_python(
x_data,
(scale_d, scale_h, scale_w),
layout,
method[3:] if method[0:3] == "tri" else method,
coord_trans,
)
x = relay.Var("x", relay.TensorType(dshape, "float32"))
scale_d_var = relay.var("scale_d", relay.TensorType((), "float32"))
scale_h_var = relay.var("scale_h", relay.TensorType((), "float32"))
scale_w_var = relay.var("scale_h", relay.TensorType((), "float32"))
z = relay.nn.upsampling3d(
x,
scale_d_var,
scale_h_var,
scale_w_var,
method=method,
layout=layout,
coordinate_transformation_mode=coord_trans,
)
zz = run_infer_type(z)
func = relay.Function([x, scale_d_var, scale_h_var, scale_w_var], z)
for target, dev in enabled_targets():
mod = tvm.ir.IRModule.from_expr(func)
op_res = relay.create_executor(
executor_kind, mod=mod, device=dev, target=target
).evaluate()(
x_data,
np.array(scale_d).astype("float32"),
np.array(scale_h).astype("float32"),
np.array(scale_w).astype("float32"),
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6)
verify_upsampling3d((1, 1, 1, 1, 1), 2, 3, 4, "NCDHW", "nearest_neighbor")
verify_upsampling3d((1, 8, 16, 16, 16), 2.0, 3.0, 4.0, "NCDHW", "nearest_neighbor")
verify_upsampling3d((1, 8, 16, 16, 16), 2.0, 5.0, 1.0, "NCDHW", "trilinear", "align_corners")
verify_upsampling3d((1, 20, 3, 4, 16), 2.0, 2.0, 2.0, "NDHWC", "nearest_neighbor")
verify_upsampling3d((1, 8, 4, 16, 15), 2.0, 2.0, 2.0, "NDHWC", "trilinear", "align_corners")
# tests upsampling type inference with scale_h passed in as a constant and scale_w as a variable
def test_dyn_upsampling3d_infer_type_const():
n, c, d, h, w = (
te.size_var("n"),
te.size_var("c"),
te.size_var("d"),
te.size_var("h"),
te.size_var("w"),
)
data = relay.var("data", relay.TensorType((n, c, d, h, w), "int8"))
scale_d = relay.Var("scale_h", relay.TensorType((), "float32"))
scale_w = relay.Var("scale_w", relay.TensorType((), "float32"))
z = relay.nn.upsampling3d(data, scale_d, 2.0, scale_w, layout="NCDHW", method="trilinear")
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(
(n, c, relay.Any(), relay.Any(), relay.Any()), "int8"
)
@tvm.testing.uses_gpu
def test_dyn_pad(executor_kind):
def verify_pad(dshape, pad_width, pad_val, dtype):
x = relay.var("x", relay.TensorType(dshape, dtype))
ndim = len(dshape)
pad_width_var = relay.var("pad_width_var", relay.TensorType((ndim, 2), "int64"))
pad_val_var = relay.var("pad_val_var", relay.TensorType((), dtype))
y = relay.nn.pad(x, pad_width_var, pad_val_var)
yy = run_infer_type(y)
assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * ndim, dtype)
func = relay.Function([x, pad_width_var, pad_val_var], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = np.pad(data, pad_width, "constant", constant_values=(((pad_val,) * 2),) * ndim)
pad_width = np.array(pad_width).astype("int64")
verify_func(
executor_kind, func, [data, pad_width, np.array(pad_val).astype(dtype)], ref_res
)
def verify_pad_default_fill(dshape, pad_width, dtype):
x = relay.var("x", relay.TensorType(dshape, dtype))
ndim = len(dshape)
pad_width_var = relay.var("pad_width_var", relay.TensorType((ndim, 2), "int64"))
y = relay.nn.pad(x, pad_width_var)
yy = run_infer_type(y)
assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * ndim, dtype)
func = relay.Function([x, pad_width_var], y)
data = np.random.uniform(size=dshape).astype(dtype)
ref_res = np.pad(data, pad_width)
pad_width = np.array(pad_width).astype("int64")
verify_func(executor_kind, func, [data, pad_width], ref_res)
verify_pad((4, 10, 7, 7), ((1, 1), (2, 2), (3, 3), (4, 4)), 2.0, "int32")
verify_pad((2, 7), ((1, 4), (2, 2)), 4.0, "float64")
verify_pad_default_fill((4, 10, 7, 7), ((1, 1), (2, 2), (3, 3), (4, 4)), "float64")
verify_pad_default_fill((2, 7), ((1, 4), (2, 2)), "int32")
if __name__ == "__main__":
tvm.testing.main()
| 8,625 | 39.688679 | 98 | py |
tvm | tvm-main/tests/python/relay/op/test_tensor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for tensor helpers."""
import tvm
import tvm.testing
from tvm import relay
import pytest
def test_device_copy_via_string():
x = relay.var("x")
call = relay.op.device_copy(x, "cuda", "cpu")
assert isinstance(call, relay.Call)
assert len(call.args) == 1
assert call.args[0] == x
assert call.attrs.src_virtual_device.device_type_int == 2 # ie kDLCUDA
assert call.attrs.src_virtual_device.virtual_device_id == 0
assert call.attrs.src_virtual_device.target is None
assert call.attrs.src_virtual_device.memory_scope == ""
assert call.attrs.dst_virtual_device.device_type_int == 1 # ie kDLCPU
assert call.attrs.dst_virtual_device.virtual_device_id == 0
assert call.attrs.dst_virtual_device.target is None
assert call.attrs.dst_virtual_device.memory_scope == ""
def test_device_copy_via_device():
x = relay.var("x")
call = relay.op.device_copy(x, tvm.device("cuda"), tvm.device("cpu"))
assert isinstance(call, relay.Call)
assert len(call.args) == 1
assert call.args[0] == x
assert call.attrs.src_virtual_device.device_type_int == 2 # ie kDLCUDA
assert call.attrs.dst_virtual_device.device_type_int == 1 # ie kDLCPU
if __name__ == "__main__":
tvm.testing.main()
| 2,046 | 38.365385 | 75 | py |
tvm | tvm-main/tests/python/relay/op/annotation/test_annotation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for annotations."""
import tvm
import tvm.testing
from tvm import relay
import pytest
def test_on_device_via_string():
x = relay.Var("x")
call = relay.annotation.on_device(x, "cuda")
assert isinstance(call, relay.Call)
assert len(call.args) == 1
assert call.args[0] == x
assert call.attrs.virtual_device.device_type_int == 2 # ie kDLCUDA
assert call.attrs.virtual_device.virtual_device_id == 0
assert call.attrs.virtual_device.target is None
assert call.attrs.virtual_device.memory_scope == ""
assert call.attrs.constrain_body
assert not call.attrs.constrain_result
def test_on_device_via_device():
x = relay.Var("x")
call = relay.annotation.on_device(x, tvm.device("cpu"))
assert call.attrs.virtual_device.device_type_int == 1 # ie kDLCPU
def test_on_device_invalid_device():
x = relay.Var("x")
pytest.raises(ValueError, lambda: relay.annotation.on_device(x, "bogus"))
def test_on_device_fixed():
x = relay.Var("x")
call = relay.annotation.on_device(x, "cuda", constrain_result=True)
assert call.attrs.virtual_device.device_type_int == 2 # ie kDLCUDA
assert call.attrs.constrain_body
assert call.attrs.constrain_result
def test_on_device_free():
x = relay.Var("x")
call = relay.annotation.on_device(x, "cuda", constrain_result=False, constrain_body=False)
assert call.attrs.virtual_device.device_type_int == -1 # ie kInvalidDeviceType
assert not call.attrs.constrain_body
assert not call.attrs.constrain_result
if __name__ == "__main__":
tvm.testing.main()
| 2,381 | 34.552239 | 94 | py |
tvm | tvm-main/tests/python/relay/utils/external_codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Utilities for testing external code generation"""
import os
import sys
import pytest
import tvm
from tvm import relay, runtime, testing
from tvm.contrib import utils
skip_windows = pytest.mark.skipif(sys.platform == "win32", reason="Skip test on Windows for now")
skip_micro = pytest.mark.skipif(
tvm.support.libinfo().get("USE_MICRO", "OFF") != "ON",
reason="MicroTVM support not enabled. Set USE_MICRO=ON in config.cmake to enable.",
)
def parametrize_external_codegen_checks(test):
"""Parametrize over the various check_result functions which are available"""
return pytest.mark.parametrize(
"check_result",
[
pytest.param(check_aot_executor_result, marks=[skip_windows, skip_micro]),
pytest.param(check_graph_executor_result, marks=[skip_windows]),
pytest.param(check_vm_result, marks=[skip_windows]),
],
)(test)
def parametrize_external_json_codegen_checks(test):
"""Parametrize over the various check_result functions which are available for JSON"""
return pytest.mark.parametrize(
"check_result",
[
pytest.param(check_graph_executor_result, marks=[skip_windows]),
pytest.param(check_vm_result, marks=[skip_windows]),
],
)(test)
def update_lib(lib):
test_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__)))
source_dir = os.path.join(test_dir, "..", "..", "..", "..")
contrib_path = os.path.join(source_dir, "src", "runtime", "contrib")
kwargs = {}
kwargs["options"] = ["-O2", "-std=c++17", "-I" + contrib_path]
tmp_path = utils.tempdir()
lib_name = "lib.so"
lib_path = tmp_path.relpath(lib_name)
lib.export_library(lib_path, fcompile=False, **kwargs)
lib = tvm.runtime.load_module(lib_path)
return lib
def check_vm_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu()):
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
exe = relay.vm.compile(mod, target=target)
code, lib = exe.save()
lib = update_lib(lib)
exe = runtime.vm.Executable.load_exec(code, lib)
vm = runtime.vm.VirtualMachine(exe, device)
out = vm.run(**map_inputs)
tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol)
def check_graph_executor_result(
mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu()
):
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
executor_factory = relay.build(mod, target=target)
lib = update_lib(executor_factory.lib)
rt_mod = tvm.contrib.graph_executor.create(executor_factory.graph_json, lib, device)
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.run()
out = tvm.nd.empty(out_shape, device=device)
out = rt_mod.get_output(0, out)
tvm.testing.assert_allclose(out.numpy(), result, rtol=tol, atol=tol)
def check_aot_executor_result(
mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", device=tvm.cpu()
):
# Late import to avoid breaking test with USE_MICRO=OFF.
from tvm.testing.aot import AOTTestModel, compile_and_run
from tvm.micro.testing.aot_test_utils import AOT_DEFAULT_RUNNER
interface_api = "packed"
use_unpacked_api = False
test_runner = AOT_DEFAULT_RUNNER
compile_and_run(
AOTTestModel(module=mod, inputs=map_inputs, outputs={"output": result}),
test_runner,
interface_api,
use_unpacked_api,
)
def set_external_func_attr(func, compiler, ext_symbol):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compiler)
func = func.with_attr("global_symbol", ext_symbol)
return func
| 4,580 | 35.357143 | 99 | py |
tvm | tvm-main/tests/python/relay/utils/ref_funcs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
def gather_nd(data_np, indices_np, batch_dims=0):
"""gather_nd implemented using numpy"""
data_shape = data_np.shape
indices_shape = indices_np.shape
def gather_nd_batch_dims_1_ref(data, indices):
res = []
for i, row in enumerate(data):
indices_tuple = tuple(indices[:, i]) # the indices for the i-th batch
res.append(row[indices_tuple])
# stack on the batch dim
return np.stack(res, 0)
if batch_dims > 1:
data_np_reshape = np.reshape(data_np, (-1,) + data_shape[batch_dims:])
indices_np_reshape = np.reshape(
indices_np, (indices_shape[0], -1) + indices_shape[(batch_dims + 1) :]
)
ref_res = gather_nd_batch_dims_1_ref(data_np_reshape, indices_np_reshape)
out_shape = indices_shape[1 : (batch_dims + 1)] + ref_res.shape[1:]
ref_res = np.reshape(ref_res, out_shape)
elif batch_dims == 1:
ref_res = gather_nd_batch_dims_1_ref(data_np, indices_np)
else:
ref_res = data_np[tuple(indices_np)]
return ref_res
| 1,880 | 37.387755 | 82 | py |
tvm | tvm-main/tests/python/relay/utils/assert_diagnostic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import IRModule, get_global_func, register_func, relay
from tvm.error import DiagnosticError
from tvm.ir.diagnostics import get_renderer, override_renderer
from tvm.relay import SpanCheck
from tvm.relay.transform import AnnotateSpans
from tvm.runtime import Object
DEFAULT_RENDERER = get_renderer()
__TESTING__ = None
def testing_renderer(diag_ctx):
global __TESTING__
if __TESTING__ and __TESTING__.mirror:
DEFAULT_RENDERER.render(diag_ctx)
if __TESTING__:
__TESTING__._render(diag_ctx)
class DiagnosticTesting:
def __init__(self, mirror=False):
self.mirror = mirror
self.messages = []
def __enter__(self):
global __TESTING__
__TESTING__ = self
override_renderer(testing_renderer)
return self
def __exit__(self, type, value, traceback):
global __TESTING__
__TESTING__ = None
override_renderer(None)
if type is DiagnosticError and self.matches:
return True
def assert_message(self, in_message):
self.messages.append(in_message)
def _render(self, diag_ctx):
self.matches = False
for diagnostic in diag_ctx.diagnostics:
message = diagnostic.message
for partial_msg in self.messages:
if partial_msg in message:
self.matches = True
| 2,168 | 31.373134 | 63 | py |
tvm | tvm-main/tests/python/relay/utils/tag_span.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay, tir
from tvm.relay import expr as _expr
from tvm.relay.expr_functor import ExprVisitor
def _set_span(expr, src):
if isinstance(expr, _expr.Call):
return _expr.CallWithFields(
expr, expr.op, expr.args, expr.attrs, expr.type_args, None, _create_span(src)
)
elif isinstance(expr, _expr.Var):
return _expr.VarWithFields(expr, expr.vid, expr.type_annotation, None, _create_span(src))
elif isinstance(expr, _expr.TupleGetItem):
return _expr.TupleGetItemWithFields(
expr, expr.tuple_value, expr.index, None, _create_span(src)
)
elif isinstance(expr, _expr.Constant):
return _expr.ConstantWithFields(expr, expr.data, None, _create_span(src))
elif isinstance(expr, _expr.Tuple):
return _expr.TupleWithFields(expr, expr.fields, None, _create_span(src))
elif isinstance(expr, _expr.TupleWrapper):
return _expr.TupleWrapper(_set_span(expr.tuple_value, src), expr.size)
assert False, f"unsupported type {type(expr)}"
def _create_span(src):
if isinstance(src, list):
tmp_list = []
for s in src:
if isinstance(s, str):
tmp_list.append(_create_span(s))
elif isinstance(s, relay.Span):
tmp_list.append(s)
elif isinstance(s, relay.SequentialSpan):
tmp_list.extend(s.spans)
elif s is None:
tmp_list.append(s)
else:
assert False, f"unsupported type {type(s)}"
return relay.SequentialSpan(tmp_list)
return relay.Span(relay.SourceName(src), 0, 0, 0, 0)
def _collect_spans(objref):
class Collector:
def __init__(self):
self._spans = []
def collect(self, objref):
if hasattr(objref, "span"):
self._spans.append(objref.span)
@property
def get_spans(self):
return self._spans
pov = None
if isinstance(objref, relay.Expr):
pov = relay.analysis.post_order_visit
elif isinstance(objref, (tir.Stmt, tir.expr.PrimExprWithOp)):
pov = tir.stmt_functor.post_order_visit
else:
assert False, f"unsupported type {type(objref)}"
c = Collector()
pov(objref, c.collect)
return c.get_spans
def _verify_span(lhs, rhs):
lhs_spans, rhs_spans = _collect_spans(lhs), _collect_spans(rhs)
assert len(lhs_spans) == len(rhs_spans)
for i in range(len(lhs_spans)):
assert tvm.ir.structural_equal(lhs_spans[i], rhs_spans[i])
def _verify_structural_equal_with_span(lhs, rhs, assert_mode=False, map_free_vars=False):
if isinstance(lhs, relay.Var) and isinstance(rhs, relay.Var):
# SEqualReduce compares the vid of Var type. Threrfore we only compare span here.
_verify_span(lhs, rhs)
return
if assert_mode:
tvm.ir.assert_structural_equal(lhs, rhs, map_free_vars)
else:
assert tvm.ir.structural_equal(lhs, rhs, map_free_vars)
_verify_span(lhs, rhs)
| 3,835 | 34.192661 | 97 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.