repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_ethosu_part_performance.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
from functools import reduce
import numpy as np
import math
import tvm.contrib.ethosu.cascader as cs
from tvm.contrib.ethosu.cascader.device_config import _Shape
from .infra import make_matrices
@pytest.mark.parametrize(
"acc_config, expected",
[
("ethos-u55-256", (1, 0.125, 0.75, 0.375, 0.75)),
("ethos-u55-128", (1, 0.25, 1.5, 0.75, 0.75)),
("ethos-u55-64", (1, 0.5, 3, 1.5, 1.5)),
("ethos-u55-32", (2, 1, 6, 3, 3)),
],
)
def test_device_config_cycles(acc_config, expected):
device_config = cs.EthosuDeviceConfig(acc_config)
conv_type = "ethosu_conv2d"
conv_str = None
conv_ifm_dtype = "int8"
conv_ofm_dtype = "int8"
conv_activation = "LUT"
conv_cycles = device_config._get_output_cycles(
conv_type, conv_str, conv_ifm_dtype, conv_ofm_dtype, conv_activation
)
assert conv_cycles == expected[0]
pool_type = "ethosu_pooling"
pool_str = "MAX"
pool_ifm_dtype = "int8"
pool_ofm_dtype = "int8"
pool_activation = "NONE"
pool_cycles = device_config._get_output_cycles(
pool_type, pool_str, pool_ifm_dtype, pool_ofm_dtype, pool_activation
)
assert pool_cycles == expected[1]
add_type = "ethosu_binary_elementwise"
add_str = "ADD"
add_ifm_dtype = "int8"
add_ofm_dtype = "int8"
add_activation = "NONE"
add_cycles = device_config._get_output_cycles(
add_type, add_str, add_ifm_dtype, add_ofm_dtype, add_activation
)
assert add_cycles == expected[2]
mul_type = "ethosu_binary_elementwise"
mul_str = "MUL"
mul_ifm_dtype = "int8"
mul_ofm_dtype = "int8"
mul_activation = "NONE"
mul_cycles = device_config._get_output_cycles(
mul_type, mul_str, mul_ifm_dtype, mul_ofm_dtype, mul_activation
)
assert mul_cycles == expected[3]
mul_32_type = "ethosu_binary_elementwise"
mul_32_str = "MUL"
mul_32_ifm_dtype = "int8"
mul_32_ofm_dtype = "int32"
mul_32_activation = "NONE"
mul_32_cycles = device_config._get_output_cycles(
mul_32_type, mul_32_str, mul_32_ifm_dtype, mul_32_ofm_dtype, mul_32_activation
)
assert mul_32_cycles == expected[4]
@pytest.mark.parametrize(
"accelerator, op_type, activation, kernel, stride, dilation, padding, in_shape, out_shape, block_shape, input_block_shape, expected",
[
(
"ethos-u55-128",
"ethosu_conv2d",
"NONE",
(3, 3),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 16, 16, 96),
(1, 16, 16, 96),
(1, 8, 8, 16),
(1, 10, 10, 32),
167733,
),
(
"ethos-u55-128",
"ethosu_conv2d",
"NONE",
(10, 4),
(2, 1),
(1, 1),
(0, 0, 0, 0),
(1, 58, 13, 1),
(1, 25, 10, 276),
(1, 6, 10, 32),
(1, 18, 14, 8),
174105,
),
(
"ethos-u55-128",
"ethosu_depthwise_conv2d",
"NONE",
(3, 3),
(2, 2),
(1, 1),
(1, 1, 1, 1),
(1, 25, 10, 276),
(1, 13, 5, 276),
(1, 7, 6, 16),
(1, 15, 14, 16),
17590,
),
(
"ethos-u55-128",
"ethosu_depthwise_conv2d",
"NONE",
(4, 9),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 28, 81, 42),
(1, 25, 73, 41),
(1, 4, 16, 16),
(1, 7, 24, 16),
173414,
),
],
)
def test_conv_performance(
accelerator,
op_type,
activation,
kernel,
stride,
dilation,
padding,
in_shape,
out_shape,
block_shape,
input_block_shape,
expected,
):
ifm_channels = in_shape[3]
ifm_matrix, ifm_offset, weight_matrix, weight_offset, _, _ = make_matrices(
op_type,
kernel,
stride,
padding,
"NHWC",
"NHWC",
dilation,
ifm_channels,
)
propagator = cs.Propagator(ifm_matrix, ifm_offset)
weight_propagator = cs.Propagator(weight_matrix, weight_offset)
subkernels = ((kernel[0] + 7) // 8) * ((kernel[1] + 7) // 8)
device_config = cs.EthosuDeviceConfig(accelerator)
output_cycles = device_config._get_output_cycles(op_type, "", "int8", "int8", activation)
output_cycles *= reduce(lambda a, b: a * b, block_shape, 1)
is_partkernel = device_config.is_partkernel(
op_type, ifm_channels, "int8", kernel[0] * kernel[1]
)
compute_cycles = device_config._estimate_compute_cycles_per_block(
op_type,
_Shape(block_shape),
_Shape(input_block_shape),
kernel[0],
kernel[1],
ifm_channels,
"int8",
is_partkernel,
)
block_configs = [
cs.BlockConfig(input_block_shape, block_shape, compute_cycles, int(output_cycles))
]
output_quantum = [1, 1, 2, 8]
te_subgraph = cs.TESubgraph([], None)
part = cs.EthosuPart(
te_subgraph,
[propagator, weight_propagator],
output_quantum,
subkernels,
block_configs,
1,
)
part.set_input(0, cs.Tensor(in_shape, "int8"))
part.set_input(1, cs.Tensor([ifm_channels, kernel[0], kernel[1], out_shape[-1]], "int8"))
part.set_output(cs.Tensor(out_shape, "int8"))
stripes = [1] * len(output_quantum)
offset = [0] * len(output_quantum)
order = [1, 2, 3, 4]
stripe_config = cs.StripeConfig(out_shape, out_shape, out_shape, order, stripes, offset)
compute_cycles = part.get_performance_info(stripe_config, cs.BufferMode.ROLLING).compute_cycles
tolerance = expected * 0.1
assert expected - tolerance <= compute_cycles <= expected + tolerance
if __name__ == "__main__":
tvm.testing.main()
| 6,754 | 27.744681 | 137 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
ethosu_enabled = True
try:
import ethosu.vela
except ImportError:
ethosu_enabled = False
import tvm.contrib.ethosu.cascader as cs
@pytest.fixture
def FLASH():
return cs.MemoryRegion(
name="FLASH",
size=10**7,
read_bandwidth=4,
write_bandwidth=4,
read_latency=0,
write_latency=0,
burst_length=1,
)
@pytest.fixture
def DRAM():
return cs.MemoryRegion(
name="DRAM",
size=10**9,
read_bandwidth=8,
write_bandwidth=8,
read_latency=0,
write_latency=0,
burst_length=1,
)
@pytest.fixture
def SRAM():
return cs.MemoryRegion(
name="SRAM",
size=10**6,
read_bandwidth=16,
write_bandwidth=16,
read_latency=0,
write_latency=0,
burst_length=1,
)
if ethosu_enabled:
import tvm
from tvm import relay
from tvm.relay.testing import run_opt_pass
from .infra import create_te_graph
from ..infra import (
make_ethosu_conv2d,
make_ethosu_depthwise_conv2d,
make_ethosu_binary_elementwise,
)
def make_TwoConv2DTE():
def _get_func():
ifm = relay.var("ifm", shape=(1, 12, 12, 8), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm=ifm,
ifm_channels=8,
ofm_channels=32,
kernel_shape=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHCWB16",
)
conv2 = make_ethosu_conv2d(
ifm=conv1,
ifm_channels=32,
ofm_channels=16,
kernel_shape=(3, 3),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
activation="NONE",
ifm_layout="NHCWB16",
ofm_layout="NHWC",
)
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
te_graph, const_dict = create_te_graph(func)
sch = tvm.te.create_schedule([t.op for t in te_graph.outputs])
return sch, te_graph, const_dict
@pytest.fixture
def TwoConv2DTE():
return make_TwoConv2DTE()
@pytest.fixture
def TwoConv2DGraph():
_, te_graph, const_dict = make_TwoConv2DTE()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
return cs.create_cascader_graph(te_graph, const_dict, device_config)
def make_TwoConv2DWithSliceTE():
def _get_func():
ifm = relay.var("ifm", shape=(1, 12, 12, 8), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm=ifm,
ifm_channels=8,
ofm_channels=64,
kernel_shape=(1, 1),
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHWC",
)
strided_slice = relay.strided_slice(conv1, [0, 0, 0, 0], [1, 6, 6, 128])
conv2 = make_ethosu_conv2d(
ifm=strided_slice,
ifm_channels=64,
ofm_channels=16,
kernel_shape=(3, 3),
padding=(1, 1),
strides=(1, 1),
dilation=(1, 1),
activation="NONE",
ifm_layout="NHWC",
ofm_layout="NHCWB16",
)
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
te_graph, const_dict = create_te_graph(func)
sch = tvm.te.create_schedule([t.op for t in te_graph.outputs])
return sch, te_graph, const_dict
@pytest.fixture
def TwoConv2DWithSliceTE():
return make_TwoConv2DWithSliceTE()
@pytest.fixture
def TwoConv2DWithSliceGraph():
_, te_graph, const_dict = make_TwoConv2DWithSliceTE()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
return cs.create_cascader_graph(te_graph, const_dict, device_config)
def make_MobileNetv2DiamondTE():
def _get_func():
ifm = relay.var("ifm", shape=(1, 56, 56, 96), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm=ifm,
ifm_channels=96,
ofm_channels=24,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
conv2 = make_ethosu_conv2d(
ifm=conv1,
ifm_channels=24,
ofm_channels=144,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth1 = make_ethosu_depthwise_conv2d(
ifm=conv2,
channels=144,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv3 = make_ethosu_conv2d(
ifm=depth1,
ifm_channels=144,
ofm_channels=24,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
add1 = make_ethosu_binary_elementwise(
ifm=conv1,
ifm2=conv3,
ifm_channels=24,
ifm2_channels=24,
operator_type="ADD",
ofm_dtype="int8",
)
func = relay.Function(relay.analysis.free_vars(add1), add1)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
te_graph, const_dict = create_te_graph(func)
sch = tvm.te.create_schedule([t.op for t in te_graph.outputs])
return sch, te_graph, const_dict
@pytest.fixture
def MobileNetv2DiamondTE():
return make_MobileNetv2DiamondTE()
@pytest.fixture
def MobileNetv2DiamondGraph():
_, te_graph, const_dict = make_MobileNetv2DiamondTE()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
return cs.create_cascader_graph(te_graph, const_dict, device_config)
def make_BinaryTE():
def _get_func():
ifm_a = relay.var("ifm_a", shape=(1, 8, 8, 8), dtype="int8")
ifm_b = relay.var("ifm_b", shape=(1, 8, 8, 8), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm=ifm_a,
ifm_channels=8,
ofm_channels=8,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
conv2 = make_ethosu_conv2d(
ifm=ifm_b,
ifm_channels=8,
ofm_channels=8,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
add1 = make_ethosu_binary_elementwise(
ifm=conv1,
ifm2=conv2,
ifm_channels=8,
ifm2_channels=8,
operator_type="ADD",
ofm_dtype="int8",
)
func = relay.Function(relay.analysis.free_vars(add1), add1)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
te_graph, const_dict = create_te_graph(func)
sch = tvm.te.create_schedule([t.op for t in te_graph.outputs])
return sch, te_graph, const_dict
@pytest.fixture
def BinaryTE():
return make_BinaryTE()
@pytest.fixture
def BinaryGraph():
_, te_graph, const_dict = make_BinaryTE()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
return cs.create_cascader_graph(te_graph, const_dict, device_config)
def make_MobileNetv1StartTE():
def _get_func():
ifm = relay.var("ifm", shape=(1, 224, 224, 3), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm=ifm,
ifm_channels=3,
ofm_channels=32,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
depth1 = make_ethosu_depthwise_conv2d(
ifm=conv1,
channels=32,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv2 = make_ethosu_conv2d(
ifm=depth1,
ifm_channels=32,
ofm_channels=64,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth2 = make_ethosu_depthwise_conv2d(
ifm=conv2,
channels=64,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
conv3 = make_ethosu_conv2d(
ifm=depth2,
ifm_channels=64,
ofm_channels=128,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth3 = make_ethosu_depthwise_conv2d(
ifm=conv3,
channels=128,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv4 = make_ethosu_conv2d(
ifm=depth3,
ifm_channels=128,
ofm_channels=128,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth4 = make_ethosu_depthwise_conv2d(
ifm=conv4,
channels=128,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
func = relay.Function(relay.analysis.free_vars(depth4), depth4)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
te_graph, const_dict = create_te_graph(func)
sch = tvm.te.create_schedule([t.op for t in te_graph.outputs])
return sch, te_graph, const_dict
@pytest.fixture
def MobileNetv1StartTE():
return make_MobileNetv1StartTE()
@pytest.fixture
def MobileNetv1StartGraph():
_, te_graph, const_dict = make_MobileNetv1StartTE()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
return cs.create_cascader_graph(te_graph, const_dict, device_config)
def make_MobileNetv1TE():
def _get_func():
ifm = relay.var("ifm", shape=(1, 224, 224, 3), dtype="int8")
conv1 = make_ethosu_conv2d(
ifm=ifm,
ifm_channels=3,
ofm_channels=32,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
depth1 = make_ethosu_depthwise_conv2d(
ifm=conv1,
channels=32,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv2 = make_ethosu_conv2d(
ifm=depth1,
ifm_channels=32,
ofm_channels=64,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth2 = make_ethosu_depthwise_conv2d(
ifm=conv2,
channels=64,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
conv3 = make_ethosu_conv2d(
ifm=depth2,
ifm_channels=64,
ofm_channels=128,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth3 = make_ethosu_depthwise_conv2d(
ifm=conv3,
channels=128,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv4 = make_ethosu_conv2d(
ifm=depth3,
ifm_channels=128,
ofm_channels=128,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth4 = make_ethosu_depthwise_conv2d(
ifm=conv4,
channels=128,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
conv5 = make_ethosu_conv2d(
ifm=depth4,
ifm_channels=128,
ofm_channels=256,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth5 = make_ethosu_depthwise_conv2d(
ifm=conv5,
channels=256,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv6 = make_ethosu_conv2d(
ifm=depth5,
ifm_channels=256,
ofm_channels=256,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth6 = make_ethosu_depthwise_conv2d(
ifm=conv6,
channels=256,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
conv7 = make_ethosu_conv2d(
ifm=depth6,
ifm_channels=256,
ofm_channels=512,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth7 = make_ethosu_depthwise_conv2d(
ifm=conv7,
channels=512,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv8 = make_ethosu_conv2d(
ifm=depth7,
ifm_channels=512,
ofm_channels=512,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth8 = make_ethosu_depthwise_conv2d(
ifm=conv8,
channels=512,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv9 = make_ethosu_conv2d(
ifm=depth8,
ifm_channels=512,
ofm_channels=512,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth9 = make_ethosu_depthwise_conv2d(
ifm=conv9,
channels=512,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv10 = make_ethosu_conv2d(
ifm=depth9,
ifm_channels=512,
ofm_channels=512,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth10 = make_ethosu_depthwise_conv2d(
ifm=conv10,
channels=512,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv11 = make_ethosu_conv2d(
ifm=depth10,
ifm_channels=512,
ofm_channels=512,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth11 = make_ethosu_depthwise_conv2d(
ifm=conv11,
channels=512,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv12 = make_ethosu_conv2d(
ifm=depth11,
ifm_channels=512,
ofm_channels=512,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth12 = make_ethosu_depthwise_conv2d(
ifm=conv12,
channels=512,
kernel_shape=(3, 3),
padding=(0, 0, 1, 1),
strides=(2, 2),
dilation=(1, 1),
)
conv13 = make_ethosu_conv2d(
ifm=depth12,
ifm_channels=512,
ofm_channels=1024,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
depth13 = make_ethosu_depthwise_conv2d(
ifm=conv13,
channels=1024,
kernel_shape=(3, 3),
padding=(1, 1, 1, 1),
strides=(1, 1),
dilation=(1, 1),
)
conv14 = make_ethosu_conv2d(
ifm=depth13,
ifm_channels=1024,
ofm_channels=1024,
kernel_shape=(1, 1),
padding=(0, 0, 0, 0),
strides=(1, 1),
dilation=(1, 1),
)
func = relay.Function(relay.analysis.free_vars(conv14), conv14)
func = run_opt_pass(func, relay.transform.InferType())
return func
func = _get_func()
te_graph, const_dict = create_te_graph(func)
sch = tvm.te.create_schedule([t.op for t in te_graph.outputs])
return sch, te_graph, const_dict
@pytest.fixture
def MobileNetv1TE():
return make_MobileNetv1TE()
@pytest.fixture
def MobileNetv1Graph():
_, te_graph, const_dict = make_MobileNetv1TE()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
return cs.create_cascader_graph(te_graph, const_dict, device_config)
| 20,450 | 31.617225 | 84 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_ethosu_inline_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
from tvm import te
from tvm.topi.transform import reshape
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.inline import match_ethosu_inline
def test_ethosu_inline_matcher():
ifm_shape = (2, 5, 6)
new_shape = (2, 30)
ifm = te.placeholder(ifm_shape, dtype="int8")
out = reshape(ifm, new_shape)
ifm_transform = [
[0, 0, ifm_shape[0]],
[0, 0, ifm_shape[1]],
[0, 0, ifm_shape[2]],
[0, 0, 1],
]
ifm_offset = [0, 0, 0]
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_inline(out, device_config)
assert isinstance(part, cs.InlinePart)
assert len(part.propagators) == 1
assert part.propagators[0].transform == ifm_transform
assert part.propagators[0].offset == ifm_offset
if __name__ == "__main__":
tvm.testing.main()
| 1,699 | 32.333333 | 74 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_memory_reduction.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import tensorflow as tf
import tflite.Model
from tvm import relay
from tvm.relay.backend import Executor, Runtime
from tvm.micro import model_library_format as mlf
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
import tvm
from tvm import WorkspaceMemoryPools, WorkspacePoolInfo, PoolInfoProperties
from tvm.relay.backend.contrib.ethosu.codegen import extract_memory_info
from .. import infra
def _get_compilation_config(accel_type, enable_cascader, enable_striping):
enable_usmp = True
target = tvm.target.Target("c")
ethosu_target = tvm.target.Target("ethos-u")
runtime = Runtime("crt")
executor = Executor(
"aot",
{
"workspace-byte-alignment": 16,
"interface-api": "c",
"unpacked-api": True,
},
)
pass_config = {
"tir.disable_vectorize": True,
"relay.ext.ethos-u.options": {
"accelerator_config": accel_type,
"enable_cascader": enable_cascader,
"enable_striping": enable_striping,
},
"tir.usmp.enable": enable_usmp,
"tir.usmp.algorithm": "hill_climb",
"tir.disable_storage_rewrite": enable_usmp,
}
return target, ethosu_target, runtime, executor, pass_config
def _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader, enable_striping
):
target, ethosu_target, runtime, executor, pass_config = _get_compilation_config(
accel_type, enable_cascader, enable_striping
)
workspace_memory_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"SRAM",
[target, ethosu_target],
PoolInfoProperties(
size_hint_bytes=pool_size,
read_bandwidth_bytes_per_cycle=16,
write_bandwidth_bytes_per_cycle=16,
target_burst_bytes={ethosu_target: 1},
),
),
]
)
with tvm.transform.PassContext(opt_level=3, config=pass_config):
lib = tvm.relay.build(
mod,
target,
executor=executor,
runtime=runtime,
workspace_memory_pools=workspace_memory_pools,
params=params,
)
mlf_memory_map = mlf._build_function_memory_map(lib.function_metadata)
return mlf_memory_map["main"][0]["workspace_size_bytes"]
@pytest.mark.parametrize(
"accel_type, expected_ws_size_without_striping, expected_ws_size_with_striping",
[
("ethos-u55-256", 1067520, 14208),
("ethos-u55-128", 1067520, 4080),
("ethos-u55-64", 1067520, 4080),
("ethos-u55-32", 1067504, 4064),
],
)
def test_double_conv2d(
accel_type, expected_ws_size_without_striping, expected_ws_size_with_striping
):
np.random.seed(1)
ifm_shape = (1, 321, 212, 6)
@tf.function
def tf_graph(x):
ofm_channels = 10
conv2d = tf.nn.conv2d(
x,
filters=tf.constant(
np.random.uniform(size=[3, 2, ifm_shape[3], ofm_channels]), # HWIO
dtype=tf.float32,
),
strides=(1, 1),
padding="VALID",
dilations=(2, 1),
)
conv2d = tf.nn.conv2d(
conv2d,
filters=tf.constant(
np.random.uniform(size=(1, 1, ofm_channels, 3)), # HWIO
dtype=tf.float32,
),
strides=(3, 2),
padding="SAME",
dilations=(1, 1),
)
return conv2d
_, tflite_graph = infra.get_tflite_graph(tf_graph, [ifm_shape])
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(tflite_model)
mod = partition_for_ethosu(relay_module, params)
# Run the graph without the cascader, with lots of memory
pool_size = 2000000
workspace_size_cascader_disabled = _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader=False, enable_striping=False
)
workspace_size_cascader_enabled_striping_disabled = _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader=True, enable_striping=False
)
# if striping is not done, it should be same as cacader disabled
assert workspace_size_cascader_disabled == workspace_size_cascader_enabled_striping_disabled
# Run the same graph with the cascader, giving it less memory to persuade cascder to cascade
pool_size = 600000
workspace_size_cascader_enabled_striping_enabled = _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader=True, enable_striping=True
)
assert workspace_size_cascader_disabled == expected_ws_size_without_striping
assert workspace_size_cascader_enabled_striping_enabled == expected_ws_size_with_striping
@pytest.mark.parametrize(
"accel_type, expected_ws_size_without_striping, expected_ws_size_with_striping",
[
("ethos-u55-256", 180288, 15200),
("ethos-u55-128", 180288, 15200),
("ethos-u55-64", 180288, 14432),
("ethos-u55-32", 180272, 14416),
],
)
def test_depthwise2d_conv2d_pooling(
accel_type, expected_ws_size_without_striping, expected_ws_size_with_striping
):
np.random.seed(2)
ifm_shape = (1, 80, 75, 3)
@tf.function
def tf_graph(x):
# This graph will execute as one cascade
ofm_channels = 7
conv2d = tf.nn.conv2d(
x,
filters=tf.constant(
np.random.uniform(size=[3, 2, ifm_shape[3], ofm_channels]), # HWIO
dtype=tf.float32,
),
strides=(1, 1),
padding="VALID",
dilations=(1, 1),
)
depthwise2d = tf.nn.depthwise_conv2d(
conv2d,
tf.constant(np.random.uniform(size=(3, 3, ofm_channels, 1)), dtype=tf.float32), # HWC1
strides=(1, 1, 1, 1),
padding="VALID",
dilations=(1, 1),
)
relu = tf.nn.relu(depthwise2d)
conv2d = tf.nn.conv2d(
relu,
filters=tf.constant(
np.random.uniform(size=[3, 2, ofm_channels, 2]), # HWIO
dtype=tf.float32,
),
strides=(1, 1),
padding="SAME",
dilations=(1, 1),
)
max_pool = tf.nn.max_pool(conv2d, (3, 3), (1, 1), "SAME")
return max_pool
_, tflite_graph = infra.get_tflite_graph(tf_graph, [ifm_shape])
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(tflite_model)
mod = partition_for_ethosu(relay_module, params)
# Run the graph without the cascader, with lots of memory
pool_size = 10**6
workspace_size_cascader_disabled = _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader=False, enable_striping=False
)
workspace_size_cascader_enabled_striping_disabled = _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader=True, enable_striping=False
)
# if striping is not done, it should be same as cacader disabled
assert workspace_size_cascader_disabled == workspace_size_cascader_enabled_striping_disabled
# Run the same graph with the cascader, giving it less memory to persuade cascder to cascade
pool_size = 50000
workspace_size_cascader_enabled_striping_enabled = _get_ethosu_workspace_size(
mod, params, accel_type, pool_size, enable_cascader=True, enable_striping=True
)
assert workspace_size_cascader_disabled == expected_ws_size_without_striping
assert workspace_size_cascader_enabled_striping_enabled == expected_ws_size_with_striping
def test_multiple_memory_pools():
"""
The cascader does not support multiple workspace memory
pools. Check the correct error is thrown.
"""
np.random.seed(2)
ifm_shape = (1, 80, 75, 3)
target, ethosu_target, runtime, executor, pass_config = _get_compilation_config(
"ethos-u55-256", True, True
)
workspace_memory_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"SRAM",
[target, ethosu_target],
PoolInfoProperties(
size_hint_bytes=1,
read_bandwidth_bytes_per_cycle=16,
write_bandwidth_bytes_per_cycle=16,
target_burst_bytes={ethosu_target: 1},
),
),
WorkspacePoolInfo(
"SRAM",
[target, ethosu_target],
PoolInfoProperties(
size_hint_bytes=1,
read_bandwidth_bytes_per_cycle=16,
write_bandwidth_bytes_per_cycle=16,
target_burst_bytes={ethosu_target: 1},
),
),
]
)
@tf.function
def tf_graph(x):
return tf.nn.max_pool(x, (3, 3), (1, 1), "SAME")
_, tflite_graph = infra.get_tflite_graph(tf_graph, [ifm_shape])
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(tflite_model)
mod = partition_for_ethosu(relay_module, params)
with pytest.raises(ValueError) as e:
with tvm.transform.PassContext(opt_level=3, config=pass_config):
tvm.relay.build(
mod,
target,
executor=executor,
runtime=runtime,
workspace_memory_pools=workspace_memory_pools,
params=params,
)
expected_reason = "Exactly one workspace pool needs to be provided for the U55 cascader"
on_error = "A ValueError was caught but its reason is not the expected one."
assert expected_reason in str(e.value), on_error
def test_missing_memory_pools():
"""
The cascader requires memory pools to be present, check the correct error
is thrown when there aren't any.
"""
np.random.seed(2)
ifm_shape = (1, 80, 75, 3)
target, _, runtime, executor, pass_config = _get_compilation_config("ethos-u55-256", True, True)
@tf.function
def tf_graph(x):
return tf.nn.max_pool(x, (3, 3), (1, 1), "SAME")
_, tflite_graph = infra.get_tflite_graph(tf_graph, [ifm_shape])
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(tflite_model)
mod = partition_for_ethosu(relay_module, params)
with pytest.raises(ValueError) as e:
with tvm.transform.PassContext(opt_level=3, config=pass_config):
tvm.relay.build(
mod,
target,
executor=executor,
runtime=runtime,
workspace_memory_pools=None,
params=params,
)
expected_reason = "Workspace memory pool needs to be provided for the U55 cascader"
on_error = "A ValueError was caught but its reason is not the expected one."
assert expected_reason in str(e.value), on_error
def test_invalid_accelerator():
"""
Check an error is thrown when an unsupported accelerator configuration
is used.
"""
np.random.seed(2)
ifm_shape = (1, 80, 75, 3)
target, ethosu_target, runtime, executor, pass_config = _get_compilation_config(
"ethos-u65-256", True, True
)
workspace_memory_pools = WorkspaceMemoryPools(
[
WorkspacePoolInfo(
"SRAM",
[target, ethosu_target],
PoolInfoProperties(
size_hint_bytes=1,
read_bandwidth_bytes_per_cycle=16,
write_bandwidth_bytes_per_cycle=16,
target_burst_bytes={ethosu_target: 1},
),
),
]
)
@tf.function
def tf_graph(x):
return tf.nn.max_pool(x, (3, 3), (1, 1), "SAME")
_, tflite_graph = infra.get_tflite_graph(tf_graph, [ifm_shape])
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, params = relay.frontend.from_tflite(tflite_model)
mod = partition_for_ethosu(relay_module, params)
with pytest.raises(ValueError) as e:
with tvm.transform.PassContext(opt_level=3, config=pass_config):
tvm.relay.build(
mod,
target,
executor=executor,
runtime=runtime,
workspace_memory_pools=workspace_memory_pools,
params=params,
)
expected_reason = "Cascading is not supported for the U65 accelerator"
on_error = "A ValueError was caught but its reason is not the expected one."
assert expected_reason in str(e.value), on_error
| 13,774 | 33.961929 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_ethosu_depthwise2d_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
from tvm import te
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.depthwise import (
match_ethosu_depthwise_conv2d,
depthwise_conv2d_compute,
)
from .infra import make_matrices
@pytest.mark.parametrize("kernel", [(3, 3), (2, 1), (3, 5)])
@pytest.mark.parametrize("stride", [(1, 1), (2, 1), (3, 2)])
@pytest.mark.parametrize("dilation", [(1, 1), (2, 1), (3, 2)])
@pytest.mark.parametrize("padding", [(0, 0, 0, 0), (3, 2, 3, 2), (2, 1, 0, 1)])
@pytest.mark.parametrize("ifm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("ofm_layout", ["NHWC", "NHCWB16"])
def test_ethosu_depthwise2d_matcher(kernel, stride, dilation, padding, ifm_layout, ofm_layout):
ofm_channels = 57
if ifm_layout == "NHWC":
ifm_shape = (1, 12, 15, ofm_channels)
else:
ifm_shape = (1, 12, 1 + ((ofm_channels - 1) // 16), 15, 16)
kernel_h, kernel_w = kernel
ifm = te.placeholder(ifm_shape, dtype="int8")
weight = te.placeholder((ofm_channels, kernel_h, kernel_w, 1), dtype="int8")
scale_bias = te.placeholder((ofm_channels, 10), dtype="uint8")
lut = te.placeholder((), dtype="uint8")
out = depthwise_conv2d_compute(
ifm=ifm,
weight=weight,
scale_bias=scale_bias,
lut=lut,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
weight_zero_point=0,
strides=stride,
padding=padding,
dilation=dilation,
activation="NONE",
clip_min=0,
clip_max=0,
rounding_mode="TFL",
upscale="NONE",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
ofm_dtype=ifm.dtype,
)
(
ifm_transform,
ifm_offset,
weight_transform,
weight_offset,
scale_bias_transform,
scale_bias_offset,
) = make_matrices(
"ethosu_depthwise_conv2d",
kernel,
stride,
padding,
ifm_layout,
ofm_layout,
dilation,
ofm_channels=ofm_channels,
)
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_depthwise_conv2d(out, device_config)
assert isinstance(part, cs.EthosuPart)
assert len(part.propagators) == 3
assert part.propagators[0].transform == ifm_transform
assert part.propagators[0].offset == ifm_offset
assert part.propagators[1].transform == weight_transform
assert part.propagators[1].offset == weight_offset
assert part.propagators[2].transform == scale_bias_transform
assert part.propagators[2].offset == scale_bias_offset
if __name__ == "__main__":
tvm.testing.main()
| 3,514 | 32.798077 | 95 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_ethosu_binary_elementwise_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import math
from tvm import te
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.binary_elementwise import (
match_ethosu_binary_elementwise,
binary_elementwise_compute,
)
from tvm.relay.backend.contrib.ethosu.te.common import get_layout_transform_matrices
def _make_matrices(broadcast, ifm_layout, ifm2_layout, ofm_layout, ofm_channels):
broadcast_h, broadcast_w, broadcast_c = broadcast
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(ofm_channels)
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
ifm2_matrix = [
[1, 0, 0, 0, 0],
[0, (1 - broadcast_h), 0, 0, broadcast_h],
[0, 0, (1 - broadcast_w), 0, broadcast_w],
[0, 0, 0, (1 - broadcast_c), broadcast_c],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
ifm2_matrix = np.matmul(ifm2_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
if ifm2_layout == "NHCWB16":
ifm2_matrix = np.matmul(nhwc_to_nhcwb16, ifm2_matrix).tolist()
return (ifm_matrix, ifm2_matrix)
@pytest.mark.parametrize(
"ofm_shape",
[
[1, 12, 15, 128],
[1, 16, 16, 16],
[1, 1, 1, 1024],
[1, 73, 51, 20],
[1, 124, 172, 5],
],
)
@pytest.mark.parametrize("ifm2_broadcast", [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]])
@pytest.mark.parametrize("ifm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("ifm2_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("ofm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("op_type", ["MUL", "ADD", "MIN"])
def test_ethosu_binary_elementwise_matcher(
ofm_shape, ifm2_broadcast, ifm_layout, ifm2_layout, ofm_layout, op_type
):
ifm_shape = ofm_shape.copy()
ifm2_shape = [1] + [1 if (b == 1) else a for a, b in zip(ofm_shape[1:], ifm2_broadcast)]
ifm_channels = ifm_shape[3]
ifm2_channels = ifm2_shape[3]
ofm_channels = ofm_shape[3]
nhwc_to_nhcwb16, _ = get_layout_transform_matrices(ofm_channels)
broadcast = [1 if a == 1 else 0 for a in ifm2_shape[1:]]
if ifm_layout == "NHCWB16":
ifm_shape = [
int(math.ceil(n))
for n in np.matmul(
nhwc_to_nhcwb16,
ifm_shape
+ [
1,
],
).tolist()[:-1]
]
if ifm2_layout == "NHCWB16":
ifm2_shape = [
int(math.ceil(n))
for n in np.matmul(
nhwc_to_nhcwb16,
ifm2_shape
+ [
1,
],
).tolist()[:-1]
]
if ofm_layout == "NHCWB16":
ofm_shape = [
int(math.ceil(n))
for n in np.matmul(
nhwc_to_nhcwb16,
ofm_shape
+ [
1,
],
).tolist()[:-1]
]
order = [1, 2, 4, 3, 0]
else:
order = [1, 2, 3, 4]
ifm = te.placeholder(ifm_shape, dtype="int8")
ifm2 = te.placeholder(ifm2_shape, dtype="int8")
lut = te.placeholder((), dtype="uint8")
out = binary_elementwise_compute(
ifm=ifm,
ifm2=ifm2,
lut=lut,
operator_type=op_type,
ifm_scale=1,
ifm_zero_point=0,
ifm2_scale=1,
ifm2_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
ifm_channels=ifm_channels,
ifm2_channels=ifm2_channels,
reversed_operands=False,
activation="NONE",
clip_min=0,
clip_max=0,
rounding_mode="TFL",
ifm_layout=ifm_layout,
ifm2_layout=ifm2_layout,
ofm_layout=ofm_layout,
ofm_dtype="int8",
use_rescale=False,
rescale_scale=0,
rescale_shift=0,
)
ifm_propagator = out.op.attrs["ifm_propagator"]
ifm2_propagator = out.op.attrs["ifm2_propagator"]
offset = [0] * len(ofm_shape)
stripes = [0] * len(ofm_shape)
output_stripe_config = cs.StripeConfig(ofm_shape, ofm_shape, ofm_shape, order, stripes, offset)
(ifm_transform, ifm2_transform) = _make_matrices(
broadcast, ifm_layout, ifm2_layout, ofm_layout, ofm_channels
)
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_binary_elementwise(out, device_config)
assert isinstance(part, cs.EthosuPart)
assert len(part.propagators) == 2
assert part.propagators[0].transform == ifm_transform
assert part.propagators[1].transform == ifm2_transform
propagated_ifm = ifm_propagator.propagate(output_stripe_config).shape
propagated_ifm2 = ifm2_propagator.propagate(output_stripe_config).shape
# The layout transforms that have the exact number of output channels in them
# will lose no information about the number of channels
assert ifm_shape == propagated_ifm
assert ifm2_shape == propagated_ifm2
if __name__ == "__main__":
tvm.testing.main()
| 6,031 | 32.142857 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wrong-import-position,invalid-name
"""
Test the cascader in the compilation flow.
"""
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.codegen import _create_cascader
from tvm.relay.backend.contrib.ethosu.tir.compiler import _lower_to_tir
from tvm.contrib.ethosu.cascader import MemoryRegion, EthosuDeviceConfig
from .. import infra as test_infra
from . import infra as cascader_test_infra
def _ethos_u55_cascader():
sram = MemoryRegion(
name="SRAM",
size=10**6,
read_bandwidth=16,
write_bandwidth=16,
read_latency=0,
write_latency=0,
burst_length=1,
)
flash = MemoryRegion(name="FLASH", size=10**7, read_bandwidth=4, write_bandwidth=4)
device_config = EthosuDeviceConfig("ethos-u55-256")
cascader_options = cascader_test_infra.make_options(
cascade_region=sram,
max_proposals=64,
stripe_factors=4,
max_plan_size=10,
max_open_plans=8,
max_closed_plans=32,
always_copy_size=1024,
disable_pareto_plans=False,
disable_pareto_proposals=False,
enable_striping=False,
)
return _create_cascader(
options=cascader_options,
io_region=sram,
constant_region=flash,
working_regions=[sram],
device_config=device_config,
)
def _compile_model(relay_function):
mod = tvm.IRModule()
mod["main"] = relay_function
mod = relay.transform.InferType()(mod)
tir_mod = _lower_to_tir(mod["main"], _ethos_u55_cascader())[0]
return tir_mod["main"]
def _create_single_conv2d():
ifm = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
conv1 = test_infra.make_ethosu_conv2d(ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
func = relay.Function(relay.analysis.free_vars(conv1), conv1)
return func
def _create_double_conv2d():
ifm = relay.var("x", shape=(1, 8, 8, 4), dtype="int8")
conv1 = test_infra.make_ethosu_conv2d(ifm, 4, 4, (3, 3), (1, 1), (1, 1), (1, 1))
conv2 = test_infra.make_ethosu_conv2d(conv1, 4, 4, (1, 3), (1, 1), (1, 1), (1, 1))
func = relay.Function(relay.analysis.free_vars(conv2), conv2)
return func
def _create_scalar_add():
ifm = relay.var("x", shape=(1, 5, 4, 3), dtype="int8")
ifm2 = relay.const(np.ones((1, 1, 1, 1)), dtype="int8")
add = test_infra.make_ethosu_binary_elementwise(
ifm, ifm2, ifm_channels=3, ifm2_channels=1, operator_type="ADD", ofm_dtype="int8"
)
func = relay.Function(relay.analysis.free_vars(add), add)
return func
def test_single_conv_compute_cycles_hint():
"""
Check the "compute_cycles_hint" annotation remains in the lowering flow
for single convolution.
"""
primfunc = _compile_model(_create_single_conv2d())
ops = primfunc.body.body.seq
compute_cycles_hints = [2944, 320]
for op, compute_cycle_hint in zip(ops, compute_cycles_hints):
assert op.attr_key == "pragma_compute_cycles_hint"
assert op.value == compute_cycle_hint
def test_double_conv_compute_cycles_hint():
"""
Check the "compute_cycles_hint" annotation remains in the lowering flow
for double convolution.
"""
primfunc = _compile_model(_create_double_conv2d())
ops = primfunc.body.body.body.body.seq
compute_cycles_hints = [2944, 1408, 320, 240]
for op, compute_cycle_hint in zip(ops, compute_cycles_hints):
assert op.attr_key == "pragma_compute_cycles_hint"
assert op.value == compute_cycle_hint
def test_scalar_add_compute_cycles_hint():
"""
Check the "compute_cycles_hint" annotation remains in the lowering flow
for add with scalar values.
"""
primfunc = _compile_model(_create_scalar_add())
ops = primfunc.body.body.seq
compute_cycles_hints = [16, 24]
for op, compute_cycle_hint in zip(ops, compute_cycles_hints):
assert op.attr_key == "pragma_compute_cycles_hint"
assert op.value == compute_cycle_hint
| 4,838 | 32.604167 | 89 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_plan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm.contrib.ethosu.cascader as cs
import pytest
def test_plan(DRAM, SRAM):
subgraph = cs.TESubgraph([], None)
part = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
],
)
tensor_1 = cs.Tensor([10, 10], "uint8")
tensor_2 = cs.Tensor([10, 10], "uint8")
part.set_input(0, tensor_1)
part.set_output(tensor_2)
tensor_1.add_consumer(part)
tensor_2.add_producer(part)
output_stripe_config = cs.StripeConfig(
shape=[5, 5],
extent=[10, 10],
strides=[5, 5],
order=[1, 2],
stripes=[2, 2],
offset=[0, 0],
)
tensor_config_out = cs.TensorConfig(
tensor=tensor_2,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[output_stripe_config],
copy_tensor=False,
)
input_stripe_config = part.calculate_input_stripe_configs(output_stripe_config)[0]
tensor_config_in = cs.TensorConfig(
tensor=tensor_1,
home_region=DRAM,
state=cs.TensorConfigState.INTERIOR,
buffer_mode=cs.BufferMode.ROLLING,
stripe_configs=[input_stripe_config],
copy_tensor=False,
)
tensor_configs = {tensor_1: tensor_config_in, tensor_2: tensor_config_out}
open_configs = frozenset([tensor_config_in])
part_group = frozenset([part])
interior_region = SRAM
memory_usage = 100
cycles = 20
plan = cs.Plan(
tensor_configs=tensor_configs,
open_configs=open_configs,
output_config=tensor_config_out,
part_group=part_group,
interior_region=interior_region,
memory_usage=memory_usage,
cycles=cycles,
)
assert plan.tensor_configs == tensor_configs
assert plan.open_configs == open_configs
assert plan.output_config == tensor_config_out
assert plan.part_group == part_group
assert plan.interior_region == interior_region
assert plan.memory_usage == memory_usage
assert plan.cycles == cycles
def test_plan_merge(DRAM, SRAM):
subgraph = cs.TESubgraph([], None)
part_1 = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[2, 0, 0], [0, 2, 0], [0, 0, 1]],
[0, 0],
),
],
)
part_2 = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
cs.Propagator(
[[0, 0, 6], [0, 0, 6], [0, 0, 1]],
[0, 0],
),
cs.Propagator(
[[1, 0], [0, 1]],
[0],
),
],
)
tensor_1 = cs.Tensor([20, 20], "uint8")
tensor_2 = cs.Tensor([10, 10], "uint8")
tensor_3 = cs.Tensor([6, 6], "uint8")
tensor_4 = cs.Tensor([10], "uint8")
tensor_5 = cs.Tensor([10, 10], "uint8")
part_1.set_input(0, tensor_1)
part_1.set_output(tensor_2)
tensor_1.add_consumer(part_1)
tensor_2.add_producer(part_1)
part_2.set_input(0, tensor_2)
part_2.set_input(1, tensor_3)
part_2.set_input(2, tensor_4)
part_2.set_output(tensor_5)
tensor_2.add_consumer(part_2)
tensor_3.add_consumer(part_2)
tensor_4.add_consumer(part_2)
tensor_5.add_producer(part_2)
output_stripe_config = cs.StripeConfig(
shape=[5, 5],
extent=[10, 10],
strides=[5, 5],
order=[1, 2],
stripes=[2, 2],
offset=[0, 0],
)
tensor_config_5 = cs.TensorConfig(
tensor=tensor_5,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[output_stripe_config],
copy_tensor=False,
)
input_stripe_configs = part_2.calculate_input_stripe_configs(output_stripe_config)
tensor_config_4 = cs.TensorConfig(
tensor=tensor_4,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[input_stripe_configs[2]],
copy_tensor=False,
)
tensor_config_3 = cs.TensorConfig(
tensor=tensor_3,
home_region=SRAM,
state=cs.TensorConfigState.INTERIOR,
buffer_mode=cs.BufferMode.RECOMPUTE,
stripe_configs=[input_stripe_configs[1]],
copy_tensor=False,
)
tensor_config_2 = cs.TensorConfig(
tensor=tensor_2,
home_region=SRAM,
state=cs.TensorConfigState.INTERIOR,
buffer_mode=cs.BufferMode.ROLLING,
stripe_configs=[input_stripe_configs[0]],
copy_tensor=False,
)
input_stripe_config = part_1.calculate_input_stripe_configs(input_stripe_configs[0])[0]
tensor_config_1 = cs.TensorConfig(
tensor=tensor_1,
home_region=DRAM,
state=cs.TensorConfigState.BOUNDARY,
buffer_mode=cs.BufferMode.ROLLING,
stripe_configs=[input_stripe_config],
copy_tensor=False,
)
tensor_configs = {tensor_1: tensor_config_1, tensor_2: tensor_config_2}
open_configs = frozenset([tensor_config_2])
part_group = frozenset([part_1])
interior_region = SRAM
memory_usage = 100
cycles = 20
plan_1 = cs.Plan(
tensor_configs=tensor_configs,
open_configs=open_configs,
output_config=tensor_config_2,
part_group=part_group,
interior_region=interior_region,
memory_usage=memory_usage,
cycles=cycles,
)
tensor_configs = {
tensor_2: tensor_config_2,
tensor_3: tensor_config_3,
tensor_4: tensor_config_4,
tensor_5: tensor_config_5,
}
open_configs = frozenset([tensor_config_2, tensor_config_3])
part_group = frozenset([part_2])
interior_region = SRAM
memory_usage = 200
cycles = 30
plan_2 = cs.Plan(
tensor_configs=tensor_configs,
open_configs=open_configs,
output_config=tensor_config_5,
part_group=part_group,
interior_region=interior_region,
memory_usage=memory_usage,
cycles=cycles,
)
merged_plan = plan_1.merge(plan_2)
assert merged_plan.tensor_configs == {
tensor_1: tensor_config_1,
tensor_2: tensor_config_2,
tensor_3: tensor_config_3,
tensor_4: tensor_config_4,
tensor_5: tensor_config_5,
}
assert merged_plan.open_configs == frozenset([tensor_config_3])
assert merged_plan.output_config == tensor_config_5
assert merged_plan.part_group == frozenset([part_1, part_2])
assert merged_plan.interior_region == interior_region
assert merged_plan.memory_usage == plan_1.memory_usage + plan_2.memory_usage
assert merged_plan.cycles == plan_1.cycles + plan_2.cycles
if __name__ == "__main__":
tvm.testing.main()
| 7,703 | 30.444898 | 91 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_ethosu_identity_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
from tvm import te
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.identity import match_ethosu_identity, identity_compute
from .infra import make_matrices
def test_ethosu_identity_matcher():
ofm_channels = 21
ifm_shape = (1, 12, 15, ofm_channels)
ifm = te.placeholder(ifm_shape, dtype="int8")
lut = te.placeholder((), dtype="uint8")
out = identity_compute(
ifm=ifm,
lut=lut,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
activation="NONE",
)
length = len(ifm.shape)
ifm_transform = np.identity(length + 1).tolist()
ifm_offset = np.zeros(length, dtype="int64").tolist()
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_identity(out, device_config)
assert isinstance(part, cs.EthosuPart)
assert len(part.propagators) == 1
assert part.propagators[0].transform == ifm_transform
assert part.propagators[0].offset == ifm_offset
if __name__ == "__main__":
tvm.testing.main()
| 1,928 | 31.694915 | 96 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_tensor_config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.contrib.ethosu.cascader import (
StripeConfig,
Tensor,
MemoryRegion,
TensorConfig,
TensorConfigState,
BufferMode,
)
import pytest
def test_tensor_config(DRAM, SRAM):
stripe_config = StripeConfig(
shape=[1, 2, 3],
extent=[2, 3, 4],
strides=[3, 4, 5],
order=[4, 5, 6],
stripes=[5, 6, 7],
offset=[6, 7, 8],
)
tensor = Tensor(
shape=[10, 10, 10],
dtype="int8",
)
home_region = DRAM
state = TensorConfigState.BOUNDARY
buffer_mode = BufferMode.ROLLING
copy_tensor = True
copy_region = SRAM
tensor_config = TensorConfig(
tensor=tensor,
home_region=home_region,
state=state,
buffer_mode=buffer_mode,
stripe_configs=[stripe_config],
copy_tensor=copy_tensor,
copy_region=copy_region,
)
assert tensor_config.tensor == tensor
assert tensor_config.home_region == home_region
assert tensor_config.state == state
assert tensor_config.buffer_mode == buffer_mode
assert tensor_config.stripe_configs == [stripe_config]
assert tensor_config.copy_tensor == copy_tensor
assert tensor_config.copy_region == copy_region
assert hash(tensor_config) != 0
def test_get_rolling_buffer(DRAM):
stripe_config = StripeConfig(
shape=[9, 4, 7],
extent=[9, 16, 21],
strides=[3, 5, 7],
order=[1, 3, 2],
stripes=[1, 3, 3],
offset=[0, 0, 0],
)
tensor = Tensor(shape=[9, 16, 21], dtype="int32", compression_ratio=0.5)
tensor_config = TensorConfig(
tensor=tensor,
home_region=DRAM,
state=TensorConfigState.BOUNDARY,
buffer_mode=BufferMode.ROLLING,
stripe_configs=[stripe_config],
)
assert tensor_config.get_buffer_size() == 2016
def test_get_recompute_buffer(DRAM):
stripe_config = StripeConfig(
shape=[4, 5, 7],
extent=[6, 7, 14],
strides=[2, 3, 7],
order=[1, 3, 2],
stripes=[2, 2, 2],
offset=[0, 0, 0],
)
tensor = Tensor(shape=[6, 7, 14], dtype="int32", compression_ratio=0.5)
tensor_config = TensorConfig(
tensor=tensor,
home_region=DRAM,
state=TensorConfigState.BOUNDARY,
buffer_mode=BufferMode.RECOMPUTE,
stripe_configs=[stripe_config],
)
assert tensor_config.get_buffer_size() == 280
if __name__ == "__main__":
tvm.testing.main()
| 3,250 | 28.288288 | 76 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/infra.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
ethosu_enabled = True
try:
import ethosu.vela
except ImportError:
ethosu_enabled = False
import tvm
from tvm import relay
import tvm.contrib.ethosu.cascader as cs
import numpy as np
def make_options(
cascade_region: cs.MemoryRegion,
max_proposals: int = 1,
stripe_factors: int = 1,
max_plan_size: int = 1,
max_open_plans: int = 8,
max_closed_plans: int = 32,
always_copy_size: int = 1024,
disable_pareto_plans: bool = False,
disable_pareto_proposals: bool = False,
enable_striping: bool = True,
):
return cs.CascaderOptions(
cascade_region=cascade_region,
max_proposals=max_proposals,
stripe_factors=stripe_factors,
max_plan_size=max_plan_size,
max_open_plans=max_open_plans,
max_closed_plans=max_closed_plans,
always_copy_size=always_copy_size,
disable_pareto_plans=disable_pareto_plans,
disable_pareto_proposals=disable_pareto_proposals,
enable_striping=enable_striping,
)
def make_simple_home_map(graph, var_region, const_region):
home_map = {}
for tensor in graph.tensor_order:
if tensor.is_constant:
home_map[tensor] = [const_region]
else:
home_map[tensor] = [var_region]
return home_map
if ethosu_enabled:
from tvm.relay.backend.contrib.ethosu.tir.compiler import extract_constants, lower_to_te
from tvm.relay.backend.contrib.ethosu.te.common import get_layout_transform_matrices
def create_te_graph(func):
func, consts = extract_constants(func)
mod = tvm.IRModule.from_expr(func)
func = relay.transform.InferType()(mod)["main"]
te_graph = lower_to_te(func)
return te_graph, consts
def make_matrices(
op_type,
kernel,
stride,
padding,
ifm_layout,
ofm_layout,
dilation=(1, 1),
ifm_channels=1,
ofm_channels=1,
):
kernel_h, kernel_w = kernel
stride_h, stride_w = stride
dilation_h, dilation_w = dilation
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(ofm_channels)
if op_type == "ethosu_conv2d":
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, stride_h, 0, 0, (dilated_kernel_h - stride_h)],
[0, 0, stride_w, 0, (dilated_kernel_w - stride_w)],
[0, 0, 0, 0, ifm_channels],
[0, 0, 0, 0, 1],
]
weight_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, kernel_h],
[0, 0, 0, 0, kernel_w],
[0, 0, 0, 0, ifm_channels],
[0, 0, 0, 0, 1],
]
elif op_type == "ethosu_depthwise_conv2d":
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, stride_h, 0, 0, (dilated_kernel_h - stride_h)],
[0, 0, stride_w, 0, (dilated_kernel_w - stride_w)],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
weight_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, kernel_h],
[0, 0, 0, 0, kernel_w],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
]
elif op_type == "ethosu_pooling":
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, stride_h, 0, 0, (dilated_kernel_h - stride_h)],
[0, 0, stride_w, 0, (dilated_kernel_w - stride_w)],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
weight_matrix = [
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
]
scale_bias_matrix = [
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 10],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
weight_matrix = np.matmul(weight_matrix, nhcwb16_to_nhwc).tolist()
scale_bias_matrix = np.matmul(scale_bias_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
ifm_offset = (
[0, -padding[0], -padding[1], 0]
if ifm_layout == "NHWC"
else [0, -padding[0], 0, -padding[1], 0]
)
weight_offset = [0, 0, 0, 0]
scale_bias_offset = [0, 0]
return (
ifm_matrix,
ifm_offset,
weight_matrix,
weight_offset,
scale_bias_matrix,
scale_bias_offset,
)
| 5,633 | 32.535714 | 92 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_ethosu_part.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm.contrib.ethosu.cascader as cs
from tvm.contrib.ethosu.cascader.graph import BufferMode
from tvm.contrib.ethosu.cascader.parts import EthosuPart
def test_ethosu_part():
te_subgraph = cs.TESubgraph([], None)
output_quantum = [1, 2, 2, 8]
propagator = cs.Propagator(
[[1, 0, 0, 0, 2], [0, 1, 0, 0, 2], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1]],
[0, 0, 0, 0],
)
stripe_config = cs.StripeConfig(
[1, 4, 4, 16], [1, 64, 72, 96], [1, 4, 4, 16], [1, 2, 3, 4], [1, 16, 13, 6], [0, 0, 0, 0]
)
subkernels = 3
valid_block_configs = [cs.BlockConfig([1, 2, 4, 16], [1, 2, 4, 16], 15000, 7500)]
part = EthosuPart(
te_subgraph,
[propagator],
output_quantum,
subkernels,
valid_block_configs,
1,
)
input_tensor = cs.Tensor(shape=[1, 66, 74, 16], dtype="int8")
part.set_input(0, input_tensor)
output_tensor = cs.Tensor(shape=[1, 66, 74, 16], dtype="int8")
part.set_output(output_tensor)
assert part.get_stripe_align_hint() == output_quantum
# Check that the performance model runs, don't verify output
part.get_performance_info(stripe_config, BufferMode.ROLLING)
part.get_performance_info(stripe_config, BufferMode.RECOMPUTE)
if __name__ == "__main__":
tvm.testing.main()
| 2,164 | 34.491803 | 97 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_propagator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
from math import isclose
from tvm.contrib.ethosu.cascader import StripeConfig, Propagator
def test_propagator():
transform = [
[1, 0, 0, 0],
[0, 1 / 2, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1],
]
offset = [-1, 1, 2]
propagator = Propagator(
transform=transform,
offset=offset,
)
assert list(propagator.offset) == offset
for i, row in enumerate(transform):
for j, value in enumerate(row):
assert isclose(propagator.transform[i][j], value)
@pytest.mark.parametrize(
["propagator", "input_stripe_config", "output_stripe_config"],
[
(
Propagator(
transform=[
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1 / 16, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 16],
[0, 0, 0, 0, 1],
],
offset=[0, 0, 0, 0, 0],
),
StripeConfig(
shape=[1, 12, 14, 36],
extent=[1, 24, 18, 72],
strides=[1, 12, 14, 36],
order=[1, 2, 3, 4],
stripes=[1, 2, 2, 2],
offset=[0, 0, 0, 0],
),
StripeConfig(
shape=[1, 12, 3, 14, 16],
extent=[1, 24, 5, 18, 16],
strides=[1, 12, 2.25, 14, 0],
order=[1, 2, 4, 3, 0],
stripes=[1, 2, 2, 2, 1],
offset=[0, 0, 0, 0, 0],
),
),
(
Propagator(
transform=[
[0.5, 0, 0],
[0, 0.5, 0],
[0, 0, 1],
],
offset=[0, 0],
),
StripeConfig(
shape=[3, 5],
extent=[27, 50],
strides=[3, 5],
order=[1, 2],
stripes=[9, 10],
offset=[0, 0],
),
StripeConfig(
shape=[2, 3],
extent=[14, 25],
strides=[1.5, 2.5],
order=[1, 2],
stripes=[9, 10],
offset=[0, 0],
),
),
(
Propagator(
transform=[
[2, 0, 0, 4],
[0, 1, 0, 2],
[0, 0, 0, 8],
[0, 0, 0, 1],
],
offset=[-2, -1, 0],
),
StripeConfig(
shape=[4, 6, 32],
extent=[48, 60, 64],
strides=[4, 6, 32],
order=[1, 2, 3],
stripes=[12, 10, 2],
offset=[0, 0, 0],
),
StripeConfig(
shape=[12, 8, 8],
extent=[100, 62, 8],
strides=[8, 6, 0],
order=[1, 2, 0],
stripes=[12, 10, 1],
offset=[-2, -1, 0],
),
),
],
)
def test_propagate(propagator, input_stripe_config, output_stripe_config):
result_stripe_config = propagator.propagate(input_stripe_config)
assert result_stripe_config == output_stripe_config
if __name__ == "__main__":
tvm.testing.main()
| 4,150 | 29.29927 | 74 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_ethosu_conv2d_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
from tvm import te
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.convolution import match_ethosu_conv2d, conv2d_compute
from .infra import make_matrices
@pytest.mark.parametrize("kernel", [(3, 3), (2, 1), (3, 5)])
@pytest.mark.parametrize("stride", [(1, 1), (2, 1), (3, 2)])
@pytest.mark.parametrize("dilation", [(1, 1), (2, 1), (3, 2)])
@pytest.mark.parametrize("padding", [(0, 0, 0, 0), (3, 2, 3, 2), (2, 1, 0, 1)])
@pytest.mark.parametrize("ifm_channels", [8, 57])
@pytest.mark.parametrize("ifm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("ofm_layout", ["NHWC", "NHCWB16"])
def test_ethosu_conv2d_matcher(
kernel, stride, dilation, padding, ifm_channels, ifm_layout, ofm_layout
):
if ifm_layout == "NHWC":
ifm_shape = (1, 12, 15, ifm_channels)
else:
ifm_shape = (1, 12, 1 + ((ifm_channels - 1) // 16), 15, 16)
ofm_channels = 8
kernel_h, kernel_w = kernel
ifm = te.placeholder(ifm_shape, dtype="int8")
weight = te.placeholder((ofm_channels, kernel_h, kernel_w, ifm_channels), dtype="int8")
scale_bias = te.placeholder((ofm_channels, 10), dtype="uint8")
lut = te.placeholder((), dtype="uint8")
out = conv2d_compute(
ifm=ifm,
weight=weight,
scale_bias=scale_bias,
lut=lut,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
weight_zero_point=0,
strides=stride,
padding=padding,
dilation=dilation,
activation="NONE",
clip_min=0,
clip_max=0,
upscale="NONE",
rounding_mode="TFL",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
(
ifm_transform,
ifm_offset,
weight_transform,
weight_offset,
scale_bias_transform,
scale_bias_offset,
) = make_matrices(
"ethosu_conv2d",
kernel,
stride,
padding,
ifm_layout,
ofm_layout,
dilation,
ifm_channels,
ofm_channels,
)
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_conv2d(out, device_config)
assert isinstance(part, cs.EthosuPart)
assert len(part.propagators) == 3
assert part.propagators[0].transform == ifm_transform
assert part.propagators[0].offset == ifm_offset
assert part.propagators[1].transform == weight_transform
assert part.propagators[1].offset == weight_offset
assert part.propagators[2].transform == scale_bias_transform
assert part.propagators[2].offset == scale_bias_offset
@pytest.mark.parametrize(
"ifm_layout, ofm_layout, ifm_channels, expected_cycles",
[
("NHWC", "NHWC", 24, 2304),
("NHCWB16", "NHWC", 12, 2352),
("NHWC", "NHCWB16", 38, 7056),
("NHCWB16", "NHCWB16", 55, 4608),
],
)
def test_ethosu_conv2d_block_config_from_matcher(
ifm_layout, ofm_layout, ifm_channels, expected_cycles
):
ofm_channels = 10
ifm_height = 123
ifm_width = 155
ifm_shape = (
(1, ifm_height, ifm_width, ifm_channels)
if ifm_layout == "NHWC"
else (1, ifm_height, 1 + ((ifm_channels - 1) // 16), ifm_width, 16)
)
weight_shape = (ofm_channels, 3, 3, ifm_channels)
scale_bias_shape = (ofm_channels, 10)
ifm = te.placeholder(ifm_shape, dtype="int8")
weight = te.placeholder(weight_shape, dtype="int8")
scale_bias = te.placeholder(scale_bias_shape, dtype="uint8")
lut = te.placeholder((), dtype="uint8")
out = conv2d_compute(
ifm=ifm,
weight=weight,
scale_bias=scale_bias,
lut=lut,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
weight_zero_point=0,
strides=(1, 1),
padding=(0, 0, 0, 0),
dilation=(1, 1),
activation="NONE",
clip_min=0,
clip_max=0,
upscale="NONE",
rounding_mode="TFL",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_conv2d(out, device_config)
ofm_shape = [int(i) for i in part.subgraph.output_tensor.shape]
# Add inputs and outputs to the part
input_tensor = cs.Tensor(ifm_shape, "int8")
part.set_input(0, input_tensor)
weight_tensor = cs.Tensor(weight_shape, "int8")
part.set_input(1, weight_tensor)
scale_bias_tensor = cs.Tensor(scale_bias_shape, "int8")
part.set_input(2, scale_bias_tensor)
output_tensor = cs.Tensor(ofm_shape, "int8")
part.set_output(output_tensor)
# Create a stripe of a size of the output tensor
order = [1, 2, 3, 4] if ofm_layout == "NHWC" else [1, 2, 4, 3, 0]
stripes = [1] * len(order)
offset = [0] * len(order)
stripe_config = cs.StripeConfig(ofm_shape, ofm_shape, ofm_shape, order, stripes, offset)
block = part.get_block_config(stripe_config)
# Since we dont know the values of the variables we passed to the get_valid_block_configs in
# the matcher, best we can do is to verify the compute cycle count since the channels have a
# significant effect on it
assert block.compute_cycles == expected_cycles
if __name__ == "__main__":
tvm.testing.main()
| 6,107 | 32.377049 | 96 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_stripe_config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
from tvm.contrib.ethosu.cascader.stripe_config import StripeConfig, count_stripes
def test_stripe_config():
shape = [1, 2, 3]
extent = [2, 3, 4]
strides = [3, 4, 5]
order = [4, 5, 6]
stripes = [5, 6, 7]
offset = [6, 7, 8]
hash_value = 3107995860559090954
stripe_config = StripeConfig(
shape=shape,
extent=extent,
strides=strides,
order=order,
stripes=stripes,
offset=offset,
)
assert stripe_config.shape == shape
assert stripe_config.extent == extent
assert stripe_config.strides == strides
assert stripe_config.order == order
assert stripe_config.stripes == stripes
assert stripe_config.offset == offset
assert hash(stripe_config) == hash_value
@pytest.mark.parametrize(
"mismatch", [None, "shape", "extent", "strides", "order", "stripes", "offset"]
)
def test_stripe_config_equal(mismatch):
init_dict = {
"shape": [1, 2, 3],
"extent": [2, 3, 4],
"strides": [3, 4, 5],
"order": [4, 5, 6],
"stripes": [5, 6, 7],
"offset": [6, 7, 8],
}
stripe_config_a = StripeConfig(**init_dict)
if mismatch:
init_dict[mismatch] = [1, 1, 1]
stripe_config_b = StripeConfig(**init_dict)
if not mismatch:
assert stripe_config_a == stripe_config_b
else:
assert stripe_config_a != stripe_config_b
@pytest.mark.parametrize(
["stripe_config", "expected_stripe_counts"],
[
(
StripeConfig(
shape=[3, 3, 3],
extent=[9, 9, 9],
strides=[3, 3, 3],
order=[1, 2, 3],
stripes=[3, 3, 3],
offset=[0, 0, 0],
),
{
(3, 3, 3): 27,
},
),
(
StripeConfig(
shape=[3, 3],
extent=[10, 10],
strides=[2, 2],
order=[1, 2],
stripes=[5, 5],
offset=[0, 0],
),
{
(3, 3): 16,
(2, 3): 4,
(3, 2): 4,
(2, 2): 1,
},
),
(
StripeConfig(
shape=[3, 3, 9],
extent=[9, 9, 9],
strides=[3, 3, 0],
order=[1, 2, 3],
stripes=[3, 3, 1],
offset=[0, 0, 0],
),
{
(3, 3, 9): 9,
},
),
(
StripeConfig(
shape=[5, 5],
extent=[8, 8],
strides=[5, 5],
order=[1, 2],
stripes=[2, 2],
offset=[0, 0],
),
{
(5, 5): 1,
(3, 5): 1,
(5, 3): 1,
(3, 3): 1,
},
),
(
StripeConfig(
shape=[5, 5],
extent=[8, 8],
strides=[5, 5],
order=[1, 2],
stripes=[2, 2],
offset=[-1, -2],
),
{
(4, 3): 2,
(4, 5): 2,
},
),
(
StripeConfig(
shape=[13, 7],
extent=[128, 73],
strides=[13, 7],
order=[1, 2],
stripes=[11, 12],
offset=[-10, -5],
),
{
(3, 1): 1,
(3, 2): 1,
(8, 7): 10,
(8, 2): 1,
(13, 7): 90,
(13, 1): 9,
(8, 1): 1,
(3, 7): 10,
(13, 2): 9,
},
),
],
)
def test_count_stripes(stripe_config, expected_stripe_counts):
assert count_stripes(stripe_config) == expected_stripe_counts
@pytest.mark.parametrize(
["stripe_config", "expected_stripe_counts"],
[
(
StripeConfig(
shape=[4, 4],
extent=[16, 16],
strides=[2, 2],
order=[1, 2],
stripes=[7, 7],
offset=[0, 0],
),
{
(4, 4): 1,
(2, 4): 6,
(4, 2): 6,
(2, 2): 36,
},
),
(
StripeConfig(
shape=[4, 4],
extent=[8, 8],
strides=[2, 2],
order=[1, 2],
stripes=[6, 3],
offset=[-5, 0],
),
{
(1, 4): 2,
(2, 4): 3,
(2, 2): 6,
(1, 2): 4,
},
),
],
)
def test_count_stripes_sliding_window(stripe_config, expected_stripe_counts):
assert count_stripes(stripe_config, enable_sliding_window=True) == expected_stripe_counts
if __name__ == "__main__":
tvm.testing.main()
| 5,835 | 26.018519 | 93 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_pareto.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.tir import IntImm
from tvm.contrib.ethosu.cascader.pareto import (
_get_pareto_frontier,
_thin_vector,
_pareto_cull_plans,
)
from tvm.contrib.ethosu.cascader import (
Plan,
StripeConfig,
TensorConfig,
TensorConfigState,
BufferMode,
Tensor,
)
import pytest
import numpy as np
def _ref_get_pareto_frontier(costs):
is_efficient = np.ones(costs.shape[0], dtype=bool)
for i, c in enumerate(costs):
if is_efficient[i]:
is_efficient[is_efficient] = np.any(
costs[is_efficient] < c, axis=1
) # Keep any point with a lower cost
is_efficient[i] = True # And keep self
return is_efficient
def _ref_thin_vector(vec, max_size):
if max_size < 1:
return []
if len(vec) <= max_size or len(vec) == 0:
return vec
if max_size == 1:
return [vec[0]]
samples = np.linspace(0, len(vec), max_size - 1, endpoint=False).astype(int)
samples = np.append(samples, len(vec) - 1)
return vec[samples]
def _ref_pareto_cull_plans(plans, points):
if len(plans) <= points:
return plans
plans = np.array(sorted(plans, key=lambda x: x.memory_usage))
costs = []
for plan in plans:
costs.append(np.array([plan.memory_usage, plan.cycles]))
is_efficient = _ref_get_pareto_frontier(np.array(costs))
culled_plans = plans[is_efficient]
thinned_plans = (
culled_plans
if len(culled_plans) <= points
else _ref_thin_vector(np.array(culled_plans), points)
)
return thinned_plans
@pytest.mark.parametrize("num_costs", [1, 10, 30, 100, 300, 1000])
def test_get_pareto_frontier(num_costs):
cost_low = 1
cost_high = 100
dims = 2
costs = []
for i in range(num_costs):
costs.append(list(np.random.randint(cost_low, cost_high, size=(dims,))))
reference = list(_ref_get_pareto_frontier(np.array(costs)))
result = _get_pareto_frontier(costs)
assert result == reference
@pytest.mark.parametrize("vec_length", [0, 1, 10, 25, 100])
@pytest.mark.parametrize("max_size", [0, 1, 2, 5, 11, 51])
def test_thin_vector(vec_length, max_size):
def _make_vector(length):
vector = []
for i in range(length):
obj = IntImm("int32", i)
vector.append(obj)
return vector
vector = _make_vector(vec_length)
reference = list(_ref_thin_vector(np.array(vector), max_size))
result = _thin_vector(vector, max_size)
assert result == reference
@pytest.mark.parametrize("num_plans", [0, 1, 10, 25, 100])
@pytest.mark.parametrize("max_plans", [0, 1, 2, 5, 11, 51])
def test_pareto_cull_plans(num_plans, max_plans, SRAM):
memory_usage_low = 1
memory_usage_high = 1000
cycles_low = 100
cycles_high = 10000
def _make_plan(memory_usage, cycles):
output_config = TensorConfig(
tensor=Tensor([1], "int8"),
home_region=SRAM,
state=TensorConfigState.BOUNDARY,
buffer_mode=BufferMode.RECOMPUTE,
stripe_configs=[StripeConfig([1], [1], [1], [1], [1], [0])],
)
return Plan(
tensor_configs={},
open_configs=[],
output_config=output_config,
part_group=[],
interior_region=SRAM,
memory_usage=memory_usage,
cycles=cycles,
)
def _make_plans(num):
plans = []
for _ in range(num):
memory_usage = np.random.randint(memory_usage_low, memory_usage_high)
cycles = np.random.randint(cycles_low, cycles_high)
plan = _make_plan(memory_usage, cycles)
plans.append(plan)
return plans
plans = _make_plans(num_plans)
reference = list(_ref_pareto_cull_plans(plans, max_plans))
result = _pareto_cull_plans(plans, max_plans, False)
assert result == reference
if __name__ == "__main__":
tvm.testing.main()
| 4,733 | 30.56 | 81 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test infrastructure for the NPU cascader"""
| 832 | 45.277778 | 62 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_proposal_generator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tvm.contrib.ethosu.cascader.proposal_generator import generate_proposals
from .infra import make_simple_home_map, make_options, ethosu_enabled
if ethosu_enabled:
def test_generate_proposals(FLASH, SRAM, TwoConv2DGraph):
graph = TwoConv2DGraph
min_sram = 3700
max_sram = 11700
input_configs = 1
parts = 2
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
max_proposals=32,
stripe_factors=4,
max_plan_size=10,
)
proposals = generate_proposals(graph, home_map, options)
for proposal in proposals:
assert 0 < len(proposal.plans) <= parts
assert len(proposal.input_tensor_configs) == input_configs
assert len(proposal.part_group) == parts
assert min_sram < proposal.memory_usage < max_sram
assert proposal.cycles > 0
def test_generate_proposals_binary(FLASH, SRAM, BinaryGraph):
graph = BinaryGraph
input_configs = 2
parts = 3
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
max_proposals=32,
stripe_factors=4,
max_plan_size=10,
)
proposals = generate_proposals(graph, home_map, options)
for proposal in proposals:
assert 0 < len(proposal.plans) <= parts
assert len(proposal.input_tensor_configs) == input_configs
assert len(proposal.part_group) == parts
assert proposal.cycles > 0
def test_generate_proposals_mobilenetv1_start(FLASH, SRAM, MobileNetv1StartGraph):
graph = MobileNetv1StartGraph
min_sram = 200000
max_sram = 1300000
input_configs = 1
parts = 8
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
max_proposals=32,
stripe_factors=5,
max_plan_size=10,
)
proposals = generate_proposals(graph, home_map, options)
for proposal in proposals:
assert 0 < len(proposal.plans) <= parts
assert len(proposal.input_tensor_configs) == input_configs
assert len(proposal.part_group) == parts
assert min_sram < proposal.memory_usage < max_sram
assert proposal.cycles > 0
def test_generate_proposals_mobilenetv1(FLASH, SRAM, MobileNetv1Graph):
graph = MobileNetv1Graph
min_sram = 200000
max_sram = 1300000
input_configs = 1
parts = 27
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
max_proposals=32,
stripe_factors=5,
max_plan_size=10,
)
proposals = generate_proposals(graph, home_map, options)
for proposal in proposals:
assert 0 < len(proposal.plans) <= parts
assert len(proposal.input_tensor_configs) == input_configs
assert len(proposal.part_group) == parts
assert min_sram < proposal.memory_usage < max_sram
assert proposal.cycles > 0
def test_generate_proposals_mobilenetv2diamond(FLASH, SRAM, MobileNetv2DiamondGraph):
graph = MobileNetv2DiamondGraph
min_sram = 370000
max_sram = 990000
input_configs = 1
parts = 5
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
max_proposals=64,
stripe_factors=5,
max_plan_size=10,
)
proposals = generate_proposals(graph, home_map, options)
for proposal in proposals:
assert 0 < len(proposal.plans) <= parts
assert len(proposal.input_tensor_configs) == input_configs
assert len(proposal.part_group) == parts
assert min_sram < proposal.memory_usage < max_sram
assert proposal.cycles > 0
def test_generate_proposals_mobilenetv1_disable_striping(FLASH, SRAM, MobileNetv1Graph):
graph = MobileNetv1Graph
home_map = make_simple_home_map(graph, SRAM, FLASH)
options = make_options(
cascade_region=SRAM,
max_proposals=32,
stripe_factors=5,
max_plan_size=10,
enable_striping=False,
)
proposals = generate_proposals(graph, home_map, options)
assert len(proposals) == 1
proposal = proposals[0]
for plan in proposal.plans:
for stripe_config in plan.output_config.stripe_configs:
for shape_dim, stride_dim in list(zip(stripe_config.shape, stripe_config.strides)):
# The striding and shape sizes in each dimension should be the same
# if striping is disabled
assert int(shape_dim) == int(stride_dim)
if __name__ == "__main__":
tvm.testing.main()
| 5,886 | 35.565217 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm.contrib.ethosu.cascader as cs
def test_tensor():
shape = [1, 2, 3]
dtype = "uint8"
is_constant = True
compression_ratio = 0.5
size = 6
tensor = cs.Tensor(shape, dtype, is_constant, compression_ratio)
assert tensor.shape == shape
assert tensor.dtype == dtype
assert tensor.is_constant == is_constant
assert tensor.compression_ratio == compression_ratio
assert tensor.size == size
def test_inline_part():
subgraph = cs.TESubgraph([], None)
part = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[0, 1, 0], [1, 0, 0], [0, 0, 1]],
[0, 0],
),
],
)
output_stripe_config = cs.StripeConfig([2, 4], [8, 8], [2, 4], [1, 2], [4, 2], [0, 0])
input_stripe_config = cs.StripeConfig([4, 2], [8, 8], [4, 2], [2, 1], [2, 4], [0, 0])
assert part.input_tensors == [None]
assert part.output_tensor == None
assert len(part.propagators) == 1
assert part.in_line == True
assert part.get_stripe_align_hint() == [1, 1]
performance_info = part.get_performance_info(output_stripe_config, cs.BufferMode.RECOMPUTE)
assert performance_info.compute_cycles == 0
assert performance_info.read_bytes == [0]
assert performance_info.write_bytes == 0
input_stripe_configs = part.calculate_input_stripe_configs(output_stripe_config)
assert len(input_stripe_configs) == 1
assert input_stripe_configs[0] == input_stripe_config
def test_small_graph():
subgraph = cs.TESubgraph([], None)
part_a = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
cs.Propagator(
[[0, 1, 0], [1, 0, 0], [0, 0, 1]],
[-1, -1],
),
],
)
part_b = cs.InlinePart(
subgraph,
[
cs.Propagator(
[[1, 0, 0], [0, 1, 0], [0, 0, 1]],
[0, 0],
),
],
)
tensor_1 = cs.Tensor([10, 10], "uint8")
tensor_2 = cs.Tensor([9, 9], "uint8")
tensor_3 = cs.Tensor([10, 10], "uint8")
tensor_4 = cs.Tensor([10, 10], "uint8")
part_a.set_input(0, tensor_1)
part_a.set_input(1, tensor_2)
part_a.set_output(tensor_3)
tensor_1.add_consumer(part_a)
tensor_2.add_consumer(part_a)
tensor_3.add_producer(part_a)
part_b.set_input(0, tensor_3)
part_b.set_output(tensor_4)
tensor_3.add_consumer(part_b)
tensor_4.add_producer(part_b)
assert part_a.input_tensors == [tensor_1, tensor_2]
assert part_a.output_tensor == tensor_3
assert part_b.input_tensors == [tensor_3]
assert part_b.output_tensor == tensor_4
assert tensor_1.producers == []
assert tensor_1.consumers == [part_a]
assert tensor_2.producers == []
assert tensor_2.consumers == [part_a]
assert tensor_3.producers == [part_a]
assert tensor_3.consumers == [part_b]
assert tensor_4.producers == [part_b]
assert tensor_4.consumers == []
graph = cs.CascaderGraph([tensor_1, tensor_2], [tensor_4])
assert graph.input_tensors == [tensor_1, tensor_2]
assert graph.output_tensors == [tensor_4]
assert graph.part_order == [part_b, part_a]
for i, part in enumerate(graph.part_order):
assert graph.get_part_id(part) == i
def test_create_cascader_graph(TwoConv2DWithSliceTE):
_, te_graph, const_dict = TwoConv2DWithSliceTE
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
graph = cs.create_cascader_graph(te_graph, const_dict, device_config)
output_tensor = graph.output_tensors[0]
assert output_tensor.shape == [1, 6, 1, 6, 16]
assert len(output_tensor.producers) == 1
assert not output_tensor.is_constant
conv2_part = output_tensor.producers[0]
assert isinstance(conv2_part, cs.EthosuPart)
assert len(conv2_part.input_tensors) == 3
assert conv2_part.input_tensors[0].shape == [1, 6, 6, 64]
assert len(conv2_part.input_tensors[0].producers) == 1
assert not conv2_part.input_tensors[0].is_constant
assert conv2_part.input_tensors[1].shape == [16, 3, 3, 64]
assert len(conv2_part.input_tensors[1].producers) == 0
assert conv2_part.input_tensors[1].is_constant
assert conv2_part.input_tensors[2].shape == [16, 10]
assert len(conv2_part.input_tensors[2].producers) == 0
assert conv2_part.input_tensors[2].is_constant
slice_part = conv2_part.input_tensors[0].producers[0]
assert isinstance(slice_part, cs.InlinePart)
assert len(slice_part.input_tensors) == 1
assert slice_part.input_tensors[0].shape == [1, 12, 12, 64]
assert len(slice_part.input_tensors[0].producers) == 1
assert not slice_part.input_tensors[0].is_constant
conv1_part = slice_part.input_tensors[0].producers[0]
assert isinstance(conv1_part, cs.EthosuPart)
assert len(conv1_part.input_tensors) == 3
assert conv1_part.input_tensors[0].shape == [1, 12, 12, 8]
assert len(conv1_part.input_tensors[0].producers) == 0
assert not conv1_part.input_tensors[0].is_constant
assert conv1_part.input_tensors[1].shape == [64, 1, 1, 8]
assert len(conv1_part.input_tensors[1].producers) == 0
assert conv1_part.input_tensors[1].is_constant
assert conv1_part.input_tensors[2].shape == [64, 10]
assert len(conv1_part.input_tensors[2].producers) == 0
assert conv1_part.input_tensors[2].is_constant
def test_create_diamond_graph(MobileNetv2DiamondTE):
_, te_graph, const_dict = MobileNetv2DiamondTE
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
graph = cs.create_cascader_graph(te_graph, const_dict, device_config)
output_tensor = graph.output_tensors[0]
assert output_tensor.shape == [1, 56, 56, 24]
assert len(output_tensor.producers) == 1
assert not output_tensor.is_constant
add1_part = output_tensor.producers[0]
assert isinstance(add1_part, cs.EthosuPart)
assert len(add1_part.input_tensors) == 2
assert graph.get_part_id(add1_part) == 0
assert add1_part.input_tensors[0].shape == [1, 56, 56, 24]
assert len(add1_part.input_tensors[0].producers) == 1
assert not add1_part.input_tensors[0].is_constant
assert add1_part.input_tensors[1].shape == [1, 56, 56, 24]
assert len(add1_part.input_tensors[0].producers) == 1
assert not add1_part.input_tensors[0].is_constant
if __name__ == "__main__":
tvm.testing.main()
| 7,308 | 34.653659 | 95 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_scheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wrong-import-position, invalid-name
import pytest
pytest.importorskip("ethosu.vela")
import tvm.contrib.ethosu.cascader as cs
from . import infra
def test_cascade(SRAM, FLASH, TwoConv2DWithSliceTE, TwoConv2DTE, MobileNetv1StartTE, MobileNetv1TE):
fixtures = [
TwoConv2DTE,
TwoConv2DWithSliceTE,
MobileNetv1StartTE,
MobileNetv1TE,
]
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
for sch, te_graph, const_dict in fixtures:
options = infra.make_options(
cascade_region=SRAM,
max_proposals=64,
stripe_factors=4,
max_plan_size=10,
max_open_plans=8,
max_closed_plans=32,
always_copy_size=1024,
disable_pareto_plans=False,
disable_pareto_proposals=False,
)
cs.cascade(sch, te_graph, const_dict, options, SRAM, FLASH, [SRAM], device_config)
def test_compute_cycles_annotation(SRAM, FLASH, TwoConv2DTE):
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
options = infra.make_options(
cascade_region=SRAM,
max_proposals=64,
stripe_factors=4,
max_plan_size=10,
max_open_plans=8,
max_closed_plans=32,
always_copy_size=1024,
disable_pareto_plans=False,
disable_pareto_proposals=False,
enable_striping=False,
)
sch, te_graph, const_dict = TwoConv2DTE
cs.cascade(sch, te_graph, const_dict, options, SRAM, FLASH, [SRAM], device_config)
# Stages that should have compute cycle annotations
# [copy, copy, conv2d, copy, conv2d]
stages = [6, 8, 9, 18, 19]
# Expected hints for each operation
compute_cycles_hints = [4096, 5120, 1440, 2560, 3072]
for stage, compute_cycles_hint in zip(stages, compute_cycles_hints):
op = sch.stages[stage]
op_iter_vars = op.leaf_iter_vars[0]
op_attrs = op.iter_var_attrs[op_iter_vars]
assert op_attrs.pragma_keys[0] == "compute_cycles_hint"
assert op_attrs.pragma_values[0] == compute_cycles_hint
if __name__ == "__main__":
tvm.testing.main()
| 2,932 | 33.916667 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_ethosu_block_config.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import math
import tvm
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.common import get_layout_transform_matrices
from .infra import make_matrices
@pytest.mark.parametrize(
"test_id, op_type, activation, kernel, stride, dilation, padding, in_shape, out_shape",
[
# Conv2D
(
0,
"ethosu_conv2d",
"NONE",
(34, 19),
(2, 2),
(1, 1),
(0, 0, 0, 0),
(1, 266, 111, 15),
(1, 117, 47, 15),
),
(
1,
"ethosu_conv2d",
"NONE",
(14, 14),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 125, 63, 64),
(1, 112, 50, 128),
),
(
2,
"ethosu_conv2d",
"NONE",
(7, 1),
(2, 1),
(1, 1),
(0, 0, 0, 0),
(1, 13, 4, 12),
(1, 4, 4, 511),
),
(
3,
"ethosu_conv2d",
"NONE",
(5, 5),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 96, 16, 276),
(1, 92, 12, 16),
),
(
4,
"ethosu_conv2d",
"NONE",
(5, 5),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 96, 16, 276),
(1, 92, 12, 1),
),
(
5,
"ethosu_conv2d",
"NONE",
(3, 3),
(1, 1),
(2, 2),
(0, 0, 0, 0),
(1, 62, 94, 32),
(1, 58, 90, 16),
),
# Depthwise Conv2D
(
6,
"ethosu_depthwise_conv2d",
"NONE",
(3, 5),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 77, 23, 18),
(1, 75, 19, 18),
),
(
7,
"ethosu_depthwise_conv2d",
"NONE",
(3, 3),
(2, 2),
(1, 1),
(1, 1, 1, 1),
(1, 25, 10, 276),
(1, 13, 5, 276),
),
# Pooling
(
8,
"ethosu_pooling",
"NONE",
(13, 5),
(1, 1),
(1, 1),
(0, 0, 0, 0),
(1, 13, 5, 276),
(1, 1, 1, 276),
),
(
9,
"ethosu_pooling",
"NONE",
(7, 3),
(2, 1),
(1, 1),
(0, 0, 0, 0),
(1, 317, 14, 21),
(1, 156, 12, 21),
),
],
)
@pytest.mark.parametrize(
"layouts",
[
("NHWC", "NHWC"),
("NHCWB16", "NHCWB16"),
("NHWC", "NHCWB16"),
("NHCWB16", "NHWC"),
],
)
@pytest.mark.parametrize(
"acc_config, expected_block_configs",
[
(
"ethos-u55-32",
[
# Conv2D
((1, 8, 4, 16), (1, 8, 1, 4, 16)),
((1, 6, 5, 16), (1, 6, 1, 5, 16)),
((1, 4, 4, 96), (1, 4, 6, 4, 16)),
((1, 8, 4, 16), (1, 8, 1, 4, 16)),
((1, 10, 6, 4), (1, 5, 1, 12, 4), (1, 8, 1, 4, 16)),
((1, 6, 5, 16), (1, 6, 1, 5, 16)),
# Depthwise Conv2D
((1, 6, 10, 16), (1, 4, 1, 12, 16)),
((1, 8, 5, 16), (1, 6, 1, 5, 16)),
# Pooling
((1, 1, 1, 128), (1, 1, 4, 1, 16)),
((1, 9, 6, 16), (1, 8, 1, 4, 16)),
],
),
(
"ethos-u55-64",
[
# Conv2D
((1, 8, 4, 16), (1, 8, 1, 4, 16)),
((1, 6, 5, 16), (1, 6, 1, 5, 16)),
((1, 4, 4, 96), (1, 4, 6, 4, 16)),
((1, 8, 4, 16), (1, 8, 1, 4, 16)),
((1, 10, 6, 8), (1, 8, 1, 4, 16)),
((1, 6, 5, 16), (1, 6, 1, 5, 16)),
# Depthwise Conv2D
((1, 6, 10, 16), (1, 4, 1, 12, 16)),
((1, 8, 5, 16), (1, 6, 1, 5, 16)),
# Pooling
((1, 1, 1, 128), (1, 1, 4, 1, 16)),
((1, 9, 6, 16), (1, 8, 1, 4, 16)),
],
),
(
"ethos-u55-128",
[
# Conv2D
((1, 7, 6, 16), (1, 7, 1, 6, 16)),
((1, 5, 8, 16), (1, 5, 1, 8, 16)),
((1, 4, 4, 128), (1, 4, 8, 4, 16)),
((1, 16, 4, 16), (1, 16, 1, 4, 16)),
((1, 8, 12, 8), (1, 10, 1, 6, 16)),
((1, 10, 6, 16), (1, 10, 1, 6, 16), (1, 6, 1, 6, 16)),
# Depthwise Conv2D
((1, 7, 10, 16), (1, 7, 1, 10, 16), (1, 6, 1, 10, 16)),
((1, 10, 6, 16), (1, 10, 1, 6, 16), (1, 6, 1, 6, 16)),
# Pooling
# ((1, 1, 2, 16), (1, 1, 1, 2, 16)),
((1, 1, 2, 128), (1, 1, 4, 2, 16)),
((1, 10, 6, 16), (1, 9, 1, 6, 16)),
],
),
(
"ethos-u55-256",
[
# Conv2D
((1, 14, 8, 16), (1, 14, 1, 8, 16)),
((1, 16, 8, 16), (1, 16, 1, 8, 16)),
((1, 4, 4, 128), (1, 4, 8, 4, 16)),
((1, 32, 4, 16), (1, 10, 12, 16), (1, 32, 1, 4, 16), (1, 10, 1, 12, 16)),
((1, 20, 12, 8), (1, 10, 1, 12, 16)),
((1, 12, 10, 16), (1, 12, 1, 10, 16)),
# Depthwise Conv2D
((1, 8, 20, 16), (1, 6, 1, 20, 16), (1, 6, 2, 20, 16)),
((1, 14, 6, 16), (1, 12, 1, 6, 16)),
# Pooling
# ((1, 2, 2, 16), (1, 2, 1, 2, 16)),
((1, 2, 2, 128), (1, 2, 6, 2, 16)),
((1, 10, 12, 16), (1, 10, 1, 12, 16)),
],
),
],
)
def test_best_block_config(
test_id,
op_type,
activation,
kernel,
stride,
dilation,
padding,
in_shape,
out_shape,
layouts,
acc_config,
expected_block_configs,
):
ofm_channels = out_shape[3]
ifm_channels = in_shape[3]
nhwc_to_nhcwb16, _ = get_layout_transform_matrices(ofm_channels)
ifm_matrix, ifm_offset, weight_matrix, weight_offset, _, _ = make_matrices(
op_type,
kernel,
stride,
padding,
layouts[0],
layouts[1],
dilation,
ifm_channels,
ofm_channels,
)
if layouts[0] == "NHCWB16":
in_shape = [
int(math.ceil(n)) for n in np.matmul(nhwc_to_nhcwb16, in_shape + (1,)).tolist()[:-1]
]
if layouts[1] == "NHCWB16":
out_shape = [
int(math.ceil(n)) for n in np.matmul(nhwc_to_nhcwb16, out_shape + (1,)).tolist()[:-1]
]
propagator = cs.Propagator(ifm_matrix, ifm_offset)
weight_propagator = cs.Propagator(weight_matrix, weight_offset)
subkernels = ((kernel[0] + 7) // 8) * ((kernel[1] + 7) // 8)
op_attrs = {
"op": op_type,
"activation": activation,
"stride_h": stride[0],
"stride_w": stride[1],
"dilation_h": dilation[0],
"dilation_w": dilation[1],
}
device_config = cs.EthosuDeviceConfig(acc_config)
block_configs = device_config.get_valid_block_configs(
propagator,
op_attrs,
out_shape,
ofm_channels,
ifm_channels,
layouts[1],
layouts[0],
"int8",
"int8",
kernel[0],
kernel[1],
)
output_quantum = [1, 1, 2, 8]
if layouts[1] == "NHCWB16":
output_quantum = [1, 1, 1, 2, 8]
# Create EthosUPart
te_subgraph = cs.TESubgraph([], None)
part = cs.EthosuPart(
te_subgraph,
[propagator, weight_propagator],
output_quantum,
subkernels,
block_configs,
1,
)
# Add tensors
input_tensor = cs.Tensor(in_shape, "int8")
part.set_input(0, input_tensor)
if op_type == "ethosu_conv2d":
weight_tensor = cs.Tensor([ofm_channels, kernel[0], kernel[1], ifm_channels], "int8")
part.set_input(1, weight_tensor)
elif op_type == "ethosu_depthwise_conv2d":
weight_tensor = cs.Tensor([ofm_channels, kernel[0], kernel[1], 1], "int8")
part.set_input(1, weight_tensor)
output_tensor = cs.Tensor(out_shape, "int8")
part.set_output(output_tensor)
order = [1, 2, 3, 4] if layouts[1] == "NHCWB16" else [1, 2, 4, 3, 0]
stripes = [1] * len(output_quantum)
offset = [0] * len(output_quantum)
stripe_config = cs.StripeConfig(out_shape, out_shape, out_shape, order, stripes, offset)
block = part.get_block_config(stripe_config)
block_shape = tuple(int(a) for a in block.output_shape)
assert block_shape in expected_block_configs[test_id]
@pytest.mark.parametrize(
"ofm_layout, block_config_str, expected_block_shape",
[
("NHWC", "4x4x8", [1, 4, 4, 8]),
("NHCWB16", "4x4x8", [1, 4, 1, 4, 16]),
("NHCWB16", "4x4x24", [1, 4, 2, 4, 16]),
],
)
def test_force_block_config_kernelwise(ofm_layout, block_config_str, expected_block_shape):
op_type = "ethosu_pooling"
activation = "NONE"
kernel = (2, 2)
stride = (2, 2)
padding = (0, 0)
dilation = (1, 1)
ifm_channels = 32
out_shape = (1, 8, 10, 16)
ifm_matrix, ifm_offset, _, _, _, _ = make_matrices(
op_type, kernel, stride, padding, "NHWC", ofm_layout, dilation, ifm_channels
)
ofm_channels = out_shape[3]
propagator = cs.Propagator(ifm_matrix, ifm_offset)
op_attrs = {
"op": op_type,
"activation": activation,
"stride_h": stride[0],
"stride_w": stride[1],
"dilation_h": dilation[0],
"dilation_w": dilation[1],
}
config = {
"enable_cascader": True,
"dev_force_block_config": block_config_str,
}
with tvm.transform.PassContext(config={"relay.ext.ethos-u.options": config}):
device_config = cs.EthosuDeviceConfig("ethos-u55-128")
block_configs = device_config.get_valid_block_configs(
propagator,
op_attrs,
out_shape,
ofm_channels,
ifm_channels,
ofm_layout,
"NHWC",
"int8",
"int8",
kernel[0],
kernel[1],
)
assert len(block_configs) == 1
assert block_configs[0].output_shape == expected_block_shape
@pytest.mark.parametrize(
"ofm_layout, block_config_str, expected_block_shape",
[
("NHWC", "4x4x8", [1, 4, 4, 8]),
("NHCWB16", "4x4x8", [1, 4, 1, 4, 16]),
("NHCWB16", "4x4x24", [1, 4, 2, 4, 16]),
],
)
def test_force_block_config_elementwise(ofm_layout, block_config_str, expected_block_shape):
op_type = "ethosu_elementwise_unary"
op_str = "ABS"
activation = "NONE"
ofm_shape = (1, 8, 10, 16)
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
ifm_offset = [0, 0, 0, 0]
propagator = cs.Propagator(ifm_matrix, ifm_offset)
op_attrs = {
"op": op_type,
"operator_type": op_str,
"activation": activation,
"clip_min": 0,
"clip_max": 0,
"rounding_mode": "TFL",
}
config = {
"enable_cascader": True,
"dev_force_block_config": block_config_str,
}
with tvm.transform.PassContext(config={"relay.ext.ethos-u.options": config}):
device_config = cs.EthosuDeviceConfig("ethos-u55-128")
block_configs = device_config.get_elementwise_block_config(
propagator,
None,
op_attrs,
ofm_shape,
ofm_layout,
"NWHC",
None,
"int8",
"int8",
)
assert len(block_configs) == 1
assert block_configs[0].output_shape == expected_block_shape
if __name__ == "__main__":
tvm.testing.main()
| 12,930 | 27.049892 | 97 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_ethosu_unary_elementwise_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import math
from tvm import te
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.unary_elementwise import (
match_ethosu_unary_elementwise,
unary_elementwise_compute,
)
from tvm.relay.backend.contrib.ethosu.te.common import get_layout_transform_matrices
def _make_matrices(ifm_layout, ofm_layout, ofm_channels):
nhwc_to_nhcwb16, nhcwb16_to_nhwc = get_layout_transform_matrices(ofm_channels)
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
return ifm_matrix
@pytest.mark.parametrize(
"ofm_shape",
[
[1, 12, 15, 128],
[1, 16, 16, 16],
[1, 1, 1, 1024],
[1, 53, 91, 7],
[1, 182, 12, 72],
],
)
@pytest.mark.parametrize("ifm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("ofm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("op_type", ["ABS", "CLZ"])
def test_ethosu_unary_elementwise_matcher(ofm_shape, ifm_layout, ofm_layout, op_type):
ifm_shape = ofm_shape.copy()
ofm_channels = ofm_shape[3]
nhwc_to_nhcwb16, _ = get_layout_transform_matrices(ofm_channels)
if ifm_layout == "NHCWB16":
ifm_shape = [
int(math.ceil(n))
for n in np.matmul(
nhwc_to_nhcwb16,
ifm_shape
+ [
1,
],
).tolist()[:-1]
]
if ofm_layout == "NHCWB16":
ofm_shape = [
int(math.ceil(n))
for n in np.matmul(
nhwc_to_nhcwb16,
ofm_shape
+ [
1,
],
).tolist()[:-1]
]
order = [1, 2, 4, 3, 0]
else:
order = [1, 2, 3, 4]
ifm = te.placeholder(ifm_shape, dtype="int8")
lut = te.placeholder((), dtype="uint8")
out = unary_elementwise_compute(
ifm=ifm,
lut=lut,
operator_type=op_type,
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
ofm_channels=ofm_channels,
activation="NONE",
clip_min=0,
clip_max=0,
rounding_mode="TFL",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
ifm_propagator = out.op.attrs["ifm_propagator"]
offset = [0] * len(ofm_shape)
stripes = [0] * len(ofm_shape)
output_stripe_config = cs.StripeConfig(ofm_shape, ofm_shape, ofm_shape, order, stripes, offset)
ifm_transform = _make_matrices(ifm_layout, ofm_layout, ofm_channels)
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_unary_elementwise(out, device_config)
assert isinstance(part, cs.EthosuPart)
assert len(part.propagators) == 1
assert part.propagators[0].transform == ifm_transform
propagated_ifm = ifm_propagator.propagate(output_stripe_config).shape
# The layout transforms that have the exact number of output channels in them
# will lose no information about the number of channels
assert ifm_shape == propagated_ifm
if __name__ == "__main__":
tvm.testing.main()
| 4,225 | 30.303704 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/cascader/test_ethosu_pooling_matcher.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
from tvm import te
import tvm.contrib.ethosu.cascader as cs
from tvm.relay.backend.contrib.ethosu.te.pooling import match_ethosu_pooling, pooling_compute
from .infra import make_matrices
@pytest.mark.parametrize("pool_shape", [(3, 3), (2, 1), (3, 5)])
@pytest.mark.parametrize("stride", [(1, 1), (2, 1), (3, 2)])
@pytest.mark.parametrize("padding", [(0, 0, 0, 0), (3, 2, 3, 2), (2, 1, 0, 1)])
@pytest.mark.parametrize("ifm_layout", ["NHWC", "NHCWB16"])
@pytest.mark.parametrize("ofm_layout", ["NHWC", "NHCWB16"])
def test_ethosu_pooling_matcher(pool_shape, stride, padding, ifm_layout, ofm_layout):
ofm_channels = 21
if ifm_layout == "NHWC":
ifm_shape = (1, 12, 15, ofm_channels)
else:
ifm_shape = (1, 12, 1 + ((ofm_channels - 1) // 16), 15, 16)
ifm = te.placeholder(ifm_shape, dtype="int8")
lut = te.placeholder((), dtype="uint8")
out = pooling_compute(
ifm=ifm,
lut=lut,
pooling_type="MAX",
ifm_scale=1,
ifm_zero_point=0,
ofm_scale=1,
ofm_zero_point=0,
pool_shape=pool_shape,
ofm_channels=ofm_channels,
ofm_dtype="int8",
strides=stride,
padding=padding,
activation="NONE",
clip_min=0,
clip_max=0,
rounding_mode="TFL",
upscale="NONE",
ifm_layout=ifm_layout,
ofm_layout=ofm_layout,
)
(ifm_transform, ifm_offset, _, _, _, _) = make_matrices(
"ethosu_pooling",
pool_shape,
stride,
padding,
ifm_layout,
ofm_layout,
ofm_channels=ofm_channels,
)
device_config = cs.EthosuDeviceConfig("ethos-u55-256")
part = match_ethosu_pooling(out, device_config)
assert isinstance(part, cs.EthosuPart)
assert len(part.propagators) == 1
assert part.propagators[0].transform == ifm_transform
assert part.propagators[0].offset == ifm_offset
if __name__ == "__main__":
tvm.testing.main()
| 2,815 | 32.52381 | 93 | py |
tvm | tvm-main/tests/python/contrib/test_uma/test_target.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Union
import pytest
import tvm
from tests.python.contrib.test_uma.test_uma_vanilla_accelerator import VanillaAcceleratorBackend
from tvm.relay.backend.contrib.uma import uma_available
pytestmark = pytest.mark.skipif(not uma_available(), reason="UMA not available")
@pytest.mark.parametrize(
"target_name,target_attrs,target_args",
[
("my_hwa", {}, {}),
(
"my_hwa2",
{
"local_memory_size": 128 * 1024,
"variant": "version1",
},
{"local_memory_size": 256 * 1024, "variant": "version2"},
),
],
)
def test_uma_target(target_name, target_attrs, target_args):
registration_func = tvm.get_global_func("relay.backend.contrib.uma.RegisterTarget")
registration_func(target_name, target_attrs)
# Test Defaults
my_target = tvm.target.Target(target_name)
assert str(my_target.kind) == target_name
for attr in target_attrs.keys():
assert my_target.attrs[attr] == target_attrs[attr]
# Test with parameters overwritten
args = " ".join((f"--{k}={v}" for k, v in target_args.items()))
my_target = tvm.target.Target(f"{target_name} {args}")
for attr in target_args.keys():
assert my_target.attrs[attr] == target_args[attr]
@pytest.mark.parametrize(
"attr_name, target_attr",
[
("float_attr", 3.14),
("none_attr", None),
("model", "my_model"),
],
)
def test_invalid_attr_option(attr_name: str, target_attr: Union[str, int, bool, float, None]):
registration_func = tvm.get_global_func("relay.backend.contrib.uma.RegisterTarget")
if target_attr is None:
# None cannot be caught as TVMError, as it causes a SIGKILL, therefore it must be prevented to be
# entered into relay.backend.contrib.uma.RegisterTarget at Python level.
with pytest.raises(ValueError, match=r"Target attribute None is not supported."):
uma_backend = VanillaAcceleratorBackend()
uma_backend._target_attrs = {attr_name: target_attr}
uma_backend.register()
elif "model" in attr_name:
target_name = f"{attr_name}_{target_attr}"
target_attr = {attr_name: target_attr}
with pytest.raises(tvm.TVMError, match=r"Attribute is already registered: .*"):
registration_func(target_name, target_attr)
else:
target_name = f"{attr_name}_{target_attr}"
target_attr = {attr_name: target_attr}
with pytest.raises(TypeError, match=r"Only String, Integer, or Bool are supported. .*"):
registration_func(target_name, target_attr)
@pytest.mark.parametrize(
"target_name",
[
"llvm",
"c",
],
)
def test_target_duplication(target_name: str):
with pytest.raises(tvm.TVMError, match=r"TVM UMA Error: Target is already registered: .*"):
registration_func = tvm.get_global_func("relay.backend.contrib.uma.RegisterTarget")
registration_func(target_name, {})
if __name__ == "__main__":
tvm.testing.main()
| 3,849 | 35.666667 | 105 | py |
tvm | tvm-main/tests/python/contrib/test_uma/test_uma_pipeline.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("tflite")
pytest.importorskip("tensorflow")
import os
import tensorflow as tf
from tvm.micro.testing.aot_test_utils import AOT_DEFAULT_RUNNER
from tvm.relay import transform, testing
from tvm.testing.aot import (
AOTTestModel,
AOTTestRunner,
generate_ref_data,
compile_and_run,
create_relay_module_and_inputs_from_tflite_file,
)
import tvm
from test_uma_vanilla_accelerator import VanillaAcceleratorBackend
from tvm import relay
import numpy as np
from collections import OrderedDict
from tvm.relay.backend.contrib.uma.api.utils import uma_available
pytestmark = pytest.mark.skipif(not uma_available(), reason="UMA not available")
@pytest.mark.parametrize(
"interface_api,use_unpacked_api,test_runner,groups,weight_shape",
[("c", True, AOT_DEFAULT_RUNNER, 1, 32)],
)
def test_conv2d(interface_api, use_unpacked_api, test_runner, groups, weight_shape):
"""Test a subgraph with a single conv2d operator."""
mod, inputs, output_list, test_runner = create_conv2d(groups, test_runner, weight_shape)
uma_backend = VanillaAcceleratorBackend()
uma_backend.register()
mod = uma_backend.partition(mod)
target = tvm.target.Target("vanilla_accelerator", host=tvm.target.Target("c"))
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
target=target,
)
def create_conv2d(groups=1, test_runner=AOT_DEFAULT_RUNNER, weight_shape=32):
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, weight_shape, 3, 3)
pass_config = {"tir.usmp.enable": True}
test_runner = AOTTestRunner(
makefile=test_runner.makefile,
prologue=test_runner.prologue,
epilogue=test_runner.epilogue,
includes=test_runner.includes,
parameters=test_runner.parameters,
pass_config=pass_config,
)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=groups)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
return mod, inputs, output_list, test_runner
def _generate_runtime_data(input_shapes: dict, output_shapes: dict) -> [OrderedDict, OrderedDict]:
assert len(input_shapes) == 1
assert len(output_shapes) == 1
iname = list(input_shapes.keys())[0]
oname = list(output_shapes.keys())[0]
ishape = input_shapes[iname]
oshape = output_shapes[oname]
i_data = np.random.uniform(0, 1, ishape).astype("float32")
o_data = np.random.uniform(0, 1, oshape).astype("float32")
oname = "output" # name set by relay.build in executor_codegen_metadata.outputs
inputs = OrderedDict([(iname, i_data)])
outputs = OrderedDict([(oname, o_data)])
return inputs, outputs
def test_mobilenet():
"""Full network test with Mobilenet"""
use_unpacked_api = True
interface_api = "c"
test_runner = AOT_DEFAULT_RUNNER
mod, params = testing.mobilenet.get_workload(batch_size=1)
uma_backend = VanillaAcceleratorBackend()
uma_backend.register()
target = tvm.target.Target("vanilla_accelerator", host=tvm.target.Target("c"))
target_c = tvm.target.Target("c")
data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype("float32")
input_list = {"data": data}
output_list = generate_ref_data(mod, input_list, params)
mod = uma_backend.partition(mod)
aot_test_model = AOTTestModel(module=mod, inputs=input_list, outputs=output_list, params=params)
compile_and_run(
aot_test_model,
test_runner,
interface_api,
use_unpacked_api,
workspace_byte_alignment=1,
debug_calculated_workspaces=False,
target=[target_c, target],
)
def test_tflite_model():
"""
End-to-end test of TF-Lite file using UMA
"""
tflite_file = "/tmp/model.tflite"
if os.path.exists(tflite_file):
os.remove(tflite_file)
generate_tflite_file(tflite_file)
pytest.importorskip("tflite")
interpreter = tf.lite.Interpreter(model_path=tflite_file)
tf_model_details = interpreter.get_input_details()
mod, _, params = create_relay_module_and_inputs_from_tflite_file(
tflite_file, bind_params_by_name=False
)
uma_backend = VanillaAcceleratorBackend()
uma_backend.register()
target = tvm.target.Target("vanilla_accelerator", host=tvm.target.Target("c"))
target_c = tvm.target.Target("c")
# Generation of test input and output
data_shape = [int(x) for x in mod["main"].params[0].type_annotation.shape]
data = np.random.uniform(size=data_shape).astype("float32")
input_list = {str(tf_model_details[0]["name"]): data}
output_list = generate_ref_data(mod, input_list, params)
# UMA partitioning (needs to be done after generate_ref_data)
mod = uma_backend.partition(mod)
aot_test_model = AOTTestModel(module=mod, inputs=input_list, outputs=output_list, params=params)
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": "greedy_by_size"}
)
compile_and_run(
aot_test_model,
test_runner,
interface_api="c",
use_unpacked_api=True,
workspace_byte_alignment=1,
debug_calculated_workspaces=False,
target=[target_c, target],
)
def generate_tflite_file(tflite_filename):
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train, x_test = x_train.reshape(-1, 28, 28, 1), x_test.reshape(-1, 28, 28, 1)
tf_model = tf.keras.models.Sequential(
[
tf.keras.Input(shape=(28, 28, 1)),
tf.keras.layers.Conv2D(4, (3, 3), padding="same", activation="relu"),
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(32, activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10),
]
)
output = tf_model(x_train[:1])
output = output.numpy()
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss(y_train[:1], output).numpy()
tf_model.compile(metrics=["accuracy"], optimizer="adam", loss=loss)
tf_model.fit(x_train, y_train, epochs=1)
tflite_converter = tf.lite.TFLiteConverter.from_keras_model(tf_model)
tflite_model = tflite_converter.convert()
with open(tflite_filename, "wb") as f:
f.write(tflite_model)
if __name__ == "__main__":
tvm.testing.main()
| 7,831 | 34.762557 | 100 | py |
tvm | tvm-main/tests/python/contrib/test_uma/test_uma_vanilla_accelerator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""UMA testcase for the vanilla_accelerator accelerator"""
import pytest
from tvm.relay.backend.contrib.uma.api.utils import PassPhase
from tvm.relay.backend.contrib.uma.backend import UMABackend
from apps.uma._template.passes import (
MyAiHwConv2dPass as VanillaAcceleratorConv2dPass,
)
from apps.uma._template.codegen import gen_includes
from apps.uma._template.patterns import conv2d_pattern, dense_pattern
from tvm.relay.backend.contrib.uma import uma_available
pytestmark = pytest.mark.skipif(not uma_available(), reason="UMA not available")
class VanillaAcceleratorBackend(UMABackend):
"""UMA backend for the VanillaAccelerator accelerator."""
def __init__(self):
super().__init__()
#######################################################################
# Relay to Relay function registration
#######################################################################
self._register_pattern("conv2d", conv2d_pattern())
self._register_pattern("dense", dense_pattern())
#######################################################################
# Relay to TIR function registration
#######################################################################
self._register_tir_pass(PassPhase.TIR_PHASE_0, VanillaAcceleratorConv2dPass())
#######################################################################
# TIR to runtime function registration
#######################################################################
self._register_codegen(fmt="c", includes=gen_includes)
@property
def target_name(self):
return "vanilla_accelerator"
| 2,467 | 41.551724 | 86 | py |
tvm | tvm-main/tests/python/contrib/test_uma/test_uma_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import tvm
from tvm import topi, IRModule
import numpy as np
from tvm.contrib import utils, clang
import tvm.testing
from tvm import te
from typing import Union
def _create_schedule(
placeholder: list,
c_code: Union[str, io.TextIOWrapper] = "",
use_external_conv2d_impl: bool = True,
):
# How to do the same with TE
# Add pragma TE
# s = te.create_schedule(result.op)
# axis = result.op.axis
# s[result].pragma(axis[0], "import_llvm", c_to_llvm())
# with tvm.transform.PassContext(config={"tir.add_lower_pass": [(1, my_ai_hw_conv2d_pass)]}):
# mod = tvm.lower(s, [ifmap, weights, result], simple_mode=True)
#
# llvm_mod = tvm.build(mod, [ifmap, weights, result], target=target, name="test_external_conv2d")
# llvm_mod(ifmap_data, weight_data, result_data)
if isinstance(c_code, io.TextIOWrapper):
c_code_str = c_code.read()
elif isinstance(c_code, str):
c_code_str = c_code
else:
raise TypeError()
assert (
use_external_conv2d_impl
and c_code_str != ""
or not use_external_conv2d_impl
and c_code_str == ""
)
def _c_to_llvm(c_code: str) -> str:
temp = utils.tempdir()
ll_path = temp.relpath("conv2d.ll")
ll_code = clang.create_llvm([c_code], output=ll_path)
return ll_code
func_tir = te.create_prim_func(placeholder)
ir_module_from_te = IRModule({"main": func_tir})
sch_tir = tvm.tir.Schedule(ir_module_from_te)
if use_external_conv2d_impl:
conv2d_b = sch_tir.get_block("conv2d_nchw")
conv2d_l = sch_tir.get_loops(conv2d_b)
sch_tir.annotate(conv2d_l[0], "pragma_import_llvm", _c_to_llvm(c_code_str))
return sch_tir
def _generate_io_arrays(shapes: dict, dev):
n, w, h, ci, kw, kh, co = (
shapes["n"],
shapes["w"],
shapes["h"],
shapes["ci"],
shapes["kw"],
shapes["kh"],
shapes["co"],
)
ifmap_data = tvm.nd.array(np.random.uniform(size=(n, ci, w, h)).astype("float32"), dev)
weight_data = tvm.nd.array(np.random.uniform(size=(co, ci, kh, kw)).astype("float32"), dev)
result_data = tvm.nd.array(np.zeros((n, co, w, h)).astype("float32"), dev)
return ifmap_data, weight_data, result_data
| 3,078 | 33.988636 | 101 | py |
tvm | tvm-main/tests/python/contrib/test_uma/test_partition.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
import tvm.relay as relay
from tvm.relay.backend.contrib.uma import uma_available
from tvm.relay.backend.contrib.uma.api import UMAPartitioner
from tvm.relay.op.contrib.register import get_pattern_table
from tvm.relay.testing import mlp, resnet
pytestmark = pytest.mark.skipif(not uma_available(), reason="UMA not available")
def test_partition_table():
partitioner = UMAPartitioner("test_partition")
assert get_pattern_table("test_partition") is None
partitioner.register()
assert get_pattern_table("test_partition") is not None
@pytest.mark.parametrize(
"workload,backend,merge",
[
("resnet", "dnnl", False),
("resnet", "dnnl", True),
("mlp", "dnnl", False),
("mlp", "dnnl", True),
("resnet", "cutlass", False),
("resnet", "cutlass", True),
("mlp", "cutlass", False),
("mlp", "cutlass", True),
],
)
def test_existing_pattern_tables(workload, backend, merge):
"""Tests that uma partitioner creates the same partitions than default BYOC partitioning"""
partitioner = UMAPartitioner(backend, merge)
pattern_table = get_pattern_table(backend)
for entry in pattern_table:
partitioner.add_pattern(*entry)
if workload == "resnet":
net = resnet.get_net(1, 10)
elif workload == "mlp":
net = mlp.get_net(1, 10)
else:
assert False, f"don't know how to find workload for {workload}"
mod = tvm.ir.IRModule()
mod["main"] = net
partitioner.register()
partitioned_mod = partitioner.partition(mod)
def partition_default(mod):
"""partitions using default BYOC flow"""
sequence = [
relay.transform.MergeComposite(pattern_table),
relay.transform.AnnotateTarget(backend),
]
if merge:
sequence.append(relay.transform.MergeCompilerRegions())
sequence.append(relay.transform.PartitionGraph())
sequential = tvm.transform.Sequential(sequence)
return sequential(mod)
default_partitioned_mod = partition_default(mod)
assert len(partitioned_mod.functions) == len(default_partitioned_mod.functions)
if __name__ == "__main__":
tvm.testing.main()
| 3,024 | 30.842105 | 95 | py |
tvm | tvm-main/tests/python/contrib/test_uma/test_uma_lowering_with_umalower.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import pathlib
import tvm
from tests.python.contrib.test_uma.test_uma_utils import _create_schedule, _generate_io_arrays
from tvm import topi
from apps.uma._template.passes import MyAiHwConv2dPass
import tvm.testing
from tvm import te
from tvm.relay.backend.contrib.uma.api.lower import UMALower
from tvm.relay.backend.contrib.uma.api.utils import PassPhase
from tvm.relay.backend.contrib.uma import uma_available
pytestmark = pytest.mark.skipif(not uma_available(), reason="UMA not available")
def _conv2d_te_definition(shapes: dict) -> list:
n, w, h, ci, kw, kh, co = (
shapes["n"],
shapes["w"],
shapes["h"],
shapes["ci"],
shapes["kw"],
shapes["kh"],
shapes["co"],
)
ifmap = te.placeholder((n, ci, w, h), dtype="float32", name="ifmap")
weights = te.placeholder((co, ci, kw, kh), dtype="float32", name="weights")
result = topi.nn.conv2d_nchw(ifmap, weights, stride=1, padding=[kw // 2, kh // 2], dilation=1)
return [ifmap, weights, result]
def _pepare_conv2d_schedule(shapes, use_external_conv2d_impl=True):
placeholders = _conv2d_te_definition(shapes)
apps_path = (
pathlib.Path(str(__file__)).parent.parent.parent.parent.parent.joinpath("apps").absolute()
)
conv2d_file = apps_path / "uma" / "_template" / "conv2dnchw.cc"
with conv2d_file.open() as f:
sch_tir = _create_schedule(
placeholders, f, use_external_conv2d_impl=use_external_conv2d_impl
)
return placeholders, sch_tir
def _run_external_conv2d(dut_io_arrays, conv2d_shapes, target):
# Run conv2d with external function
placeholders, schedule = _pepare_conv2d_schedule(conv2d_shapes)
uma_lower = UMALower("lower_test")
uma_lower._tir_passes.append((PassPhase.TIR_PHASE_0, MyAiHwConv2dPass()))
with tvm.transform.PassContext():
tir_mod = uma_lower._lower_stir_to_nstir(schedule.mod["main"])
ifmap_data, weight_data, result_data = dut_io_arrays
llvm_conv2d_mod = tvm.build(tir_mod, placeholders, target=target, name="test_external_conv2d")
llvm_conv2d_mod(ifmap_data, weight_data, result_data)
def _run_reference_conv2d(reference_io_arrays, conv2d_shapes, target):
placeholders, schedule = _pepare_conv2d_schedule(conv2d_shapes)
ref_mod = tvm.build(schedule.mod, placeholders, target=target, name="test_reference_conv2d")
ifmap, weights, result = reference_io_arrays
ref_mod(ifmap, weights, result)
def _prepare_io_arrays(conv2d_shapes, dev):
dut_io_arrays = _generate_io_arrays(conv2d_shapes, dev)
_, _, ref_result = _generate_io_arrays(conv2d_shapes, dev)
reference_io_arrays = [dut_io_arrays[0], dut_io_arrays[1], ref_result]
return dut_io_arrays, reference_io_arrays
@pytest.mark.parametrize(
"n, w, h, ci, kw, kh, co",
[
(1, 224, 224, 3, 3, 3, 4),
(1, 224, 224, 3, 5, 5, 4),
(1, 224, 224, 3, 7, 7, 4),
(1, 224, 320, 3, 7, 7, 4),
(1, 224, 224, 3, 7, 7, 4),
],
)
def test_lower_with_uma(n, w, h, ci, kw, kh, co):
target = tvm.target.Target(target="llvm", host="llvm")
dev = tvm.device(target.kind.name, 0)
conv2d_shapes = dict(n=n, w=w, h=h, ci=ci, kw=kw, kh=kh, co=co)
dut_io_arrays, reference_io_arrays = _prepare_io_arrays(conv2d_shapes, dev)
_run_external_conv2d(dut_io_arrays, conv2d_shapes, target)
_run_reference_conv2d(reference_io_arrays, conv2d_shapes, target)
# compare results
dut_results = dut_io_arrays[2].numpy()
ref_results = reference_io_arrays[2].numpy()
tvm.testing.assert_allclose(dut_results, ref_results, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 4,478 | 35.713115 | 98 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_group_conv2d_NCHWc_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test for NCHW[x]c convolution"""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
import pytest
def _transform_data(data, bn):
# NCHW -> NCHW[x]c
batch_size, channel, height, width = data.shape
data = np.reshape(data, (batch_size, channel // bn, bn, height, width))
data = np.transpose(data, (0, 1, 3, 4, 2))
return data
def _transform_kernel(kernel, ic_bn, oc_bn):
# OIHW -> OIHW[x]i[x]o
out_channel, in_channel, kh, kw = kernel.shape
kernel = np.reshape(
kernel, (out_channel // oc_bn, oc_bn, in_channel // ic_bn, ic_bn // 4, kh, kw, 4)
)
kernel = np.transpose(kernel, (0, 2, 4, 5, 3, 1, 6))
return kernel
def verify_group_conv2d_NCHWc_int8(
batch,
in_channel,
groups,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
dtype="int32",
):
assert dilation == 1, "conv2d_NCHWc does not support dilation for now."
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, groups, in_size, num_filter, kernel, stride, padding)
)
in_height = in_width = in_size
# for testing functionality,
# we choose arbitrary block size that can divide the channel,
# regardless of the performance.
oc_block = 1
for bn in range(16, 0, -1):
if num_filter % bn == 0:
oc_block = bn
break
ic_block = 8
autotvm.GLOBAL_SCOPE.silent = True
A = te.placeholder(
(batch, in_channel // ic_block, in_height, in_width, ic_block), name="A", dtype="uint8"
)
W = te.placeholder(
(
num_filter // oc_block,
in_channel // ic_block // groups,
kernel,
kernel,
ic_block // 4,
oc_block,
4,
),
name="W",
dtype="int8",
)
@memoize("topi.tests.test_topi_conv2d_NCHWc_int8.verify_conv2d_NCHWc_int8")
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype("uint8")
w_np = np.random.uniform(size=(num_filter, in_channel // groups, kernel, kernel)).astype(
"int8"
)
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding, groups)
return (
_transform_data(a_np, ic_block),
_transform_kernel(w_np, ic_block, oc_block),
_transform_data(c_np, oc_block),
)
a_np, w_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(dev):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
C = topi.x86.conv2d_NCHWc(
A,
W,
(stride, stride),
(padding, padding),
(dilation, dilation),
"NCHW%dc" % ic_block,
"NCHW%dc" % oc_block,
dtype,
)
s = topi.x86.schedule_conv2d_NCHWc([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation),
)
# print(tvm.lower(s, [A, W, C], simple_mode=True))
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3)
# for device in ["llvm"]:
for device in ["llvm -mcpu=skylake-avx512"]:
with autotvm.tophub.context(device): # load tophub pre-tuned parameters
check_device(device)
autotvm.GLOBAL_SCOPE.silent = False
@tvm.testing.uses_gpu
@pytest.mark.skip
def test_conv2d_NCHWc():
# ResNet50 workloads
verify_group_conv2d_NCHWc_int8(1, 256, 32, 224, 64, 7, 2, 3)
if __name__ == "__main__":
# The test requires Skylake and newer Intel machines to generate the correct
# instruction. This test directly calls the topi operator, requiring correct
# kernel shape. For older generation of Intel machines, the kernel needs to
# be 6D. This test tests 7D kernel, that can only work on Skylake+ machines.
# So, disabling the test.
# test_conv2d_NCHWc()
pass
| 5,445 | 30.847953 | 97 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_basic.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import topi
from tvm.topi import utils
def test_util():
x = tvm.tir.const(100, "int32")
assert utils.get_const_int(x) == 100
assert utils.get_const_tuple((x, x)) == (100, 100)
def test_ewise():
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), name="A")
def test_apply(func, name):
B = func(A)
assert tuple(B.shape) == tuple(A.shape)
assert B.op.body[0].op.name == "tir." + name
test_apply(topi.exp, "exp")
test_apply(topi.erf, "erf")
test_apply(topi.tanh, "tanh")
test_apply(topi.sigmoid, "sigmoid")
test_apply(topi.log, "log")
test_apply(topi.sqrt, "sqrt")
test_apply(topi.rsqrt, "rsqrt")
test_apply(topi.sin, "sin")
test_apply(topi.cos, "cos")
test_apply(topi.tan, "tan")
test_apply(topi.atan, "atan")
if __name__ == "__main__":
test_util()
test_ewise()
| 1,702 | 29.963636 | 62 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv1d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for transposed convolution."""
import numpy as np
import itertools
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
_conv1d_ncw_implement = {
"generic": (topi.nn.conv1d_ncw, topi.generic.schedule_conv1d_ncw),
"cpu": (topi.nn.conv1d_ncw, topi.x86.schedule_conv1d_ncw),
"gpu": (topi.cuda.conv1d_ncw, topi.cuda.schedule_conv1d_ncw),
}
_conv1d_nwc_implement = {
"generic": (topi.nn.conv1d_nwc, topi.generic.schedule_conv1d_nwc),
"cpu": (topi.nn.conv1d_nwc, topi.x86.schedule_conv1d_nwc),
"gpu": (topi.cuda.conv1d_nwc, topi.cuda.schedule_conv1d_nwc),
}
_group_conv1d_implementations = {
"NCW": {
"generic": (topi.nn.group_conv1d_ncw, topi.generic.schedule_group_conv1d_ncw),
"cpu": (topi.nn.group_conv1d_ncw, topi.x86.schedule_group_conv1d_ncw),
"gpu": (topi.cuda.group_conv1d_ncw, topi.cuda.schedule_group_conv1d_ncw),
},
"NWC": {
"generic": (topi.nn.group_conv1d_nwc, topi.generic.schedule_group_conv1d_nwc),
"cpu": (topi.nn.group_conv1d_nwc, topi.x86.schedule_group_conv1d_nwc),
"gpu": (topi.cuda.group_conv1d_nwc, topi.cuda.schedule_group_conv1d_nwc),
},
}
def verify_conv1d(
batch,
in_channels,
in_width,
filters,
kernel_size=3,
stride=1,
dilation=1,
padding="VALID",
layout="NCW",
):
if layout == "NCW":
in_shape = [batch, in_channels, in_width]
kernel_shape = [filters, in_channels, kernel_size]
else:
in_shape = [batch, in_width, in_channels]
kernel_shape = [kernel_size, in_channels, filters]
dtype = "float32"
A = te.placeholder(in_shape, name="A", dtype=dtype)
W = te.placeholder(kernel_shape, name="W", dtype=dtype)
def get_ref_data(layout):
a_np = np.random.uniform(size=in_shape).astype(dtype)
w_np = np.random.uniform(size=kernel_shape).astype(dtype)
if layout == "NWC":
np_in = np.transpose(a_np, [0, 2, 1])
np_w = np.transpose(w_np, [2, 1, 0])
else:
np_in = a_np
np_w = w_np
b_np = tvm.topi.testing.conv1d_ncw_python(np_in, np_w, stride, padding, dilation)
if layout == "NWC":
b_np = np.transpose(b_np, [0, 2, 1])
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data(layout)
def check_target(target, dev):
if layout == "NCW":
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv1d_ncw_implement)
else:
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv1d_nwc_implement)
with tvm.target.Target(target):
B = fcompute(A, W, stride, padding, dilation, "float32")
s = fschedule([B])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
func = tvm.build(s, [A, W, B], target)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_conv1d():
for layout in ["NCW", "NWC"]:
# Most basic test case
verify_conv1d(1, 1, 8, 1, 3, 1, 1, "VALID", layout)
# With padding
verify_conv1d(1, 1, 8, 1, 3, 1, 1, "SAME", layout)
# Realistic dimensions
verify_conv1d(1, 16, 32, 16, 3, 1, 1, "SAME", layout)
# With stride
verify_conv1d(1, 16, 32, 16, 3, 2, 1, "SAME", layout)
# With dilation
verify_conv1d(1, 16, 32, 16, 3, 1, 2, "SAME", layout)
# Large batch size
verify_conv1d(8, 16, 32, 16, 3, 1, 1, "SAME", layout)
# Other kernel sizes
verify_conv1d(1, 16, 32, 16, 3, 1, 1, "SAME", layout)
verify_conv1d(1, 16, 32, 16, 2, 1, 1, "SAME", layout)
verify_conv1d(1, 16, 32, 16, 1, 1, 1, "SAME", layout)
# Non-power-of-two shape
verify_conv1d(1, 17, 12, 21, 3, 1, 1, "SAME", layout)
verify_conv1d(1, 5, 27, 18, 3, 1, 1, "VALID", layout)
layout = tvm.testing.parameter("NCW", "NWC")
padding = tvm.testing.parameter("SAME", "VALID")
dtype = tvm.testing.parameter("float32")
# batch, in_channels, in_width, filters, kernel_size, stride, dilation, groups
shape = tvm.testing.parameter(
[1, 4, 8, 4, 3, 1, 1, 4],
[1, 4, 8, 4, 3, 1, 1, 4],
[1, 16, 32, 16, 3, 1, 1, 4],
[1, 16, 32, 16, 3, 2, 1, 4],
[1, 16, 32, 16, 3, 1, 2, 4],
[8, 16, 32, 16, 3, 1, 1, 4],
[1, 16, 32, 16, 3, 1, 1, 4],
[1, 16, 32, 16, 2, 1, 1, 4],
[1, 16, 32, 16, 1, 1, 1, 4],
[1, 21, 12, 21, 3, 1, 1, 3],
[1, 20, 27, 20, 3, 1, 1, 5],
)
def test_group_conv1d(shape, layout, padding, target, dev, dtype):
batch, in_channels, in_width, filters, kernel_size, stride, dilation, groups = shape
if layout == "NCW":
in_shape = [batch, in_channels, in_width]
kernel_shape = [filters, in_channels // groups, kernel_size]
else:
in_shape = [batch, in_width, in_channels]
kernel_shape = [kernel_size, in_channels // groups, filters]
# reference data
a_np = np.random.uniform(size=in_shape).astype(dtype)
w_np = np.random.uniform(size=kernel_shape).astype(dtype)
if layout == "NWC":
np_in = np.transpose(a_np, [0, 2, 1])
np_w = np.transpose(w_np, [2, 1, 0])
else:
np_in = a_np
np_w = w_np
b_np = tvm.topi.testing.group_conv1d_ncw_python(np_in, np_w, stride, padding, dilation, groups)
if layout == "NWC":
b_np = np.transpose(b_np, [0, 2, 1])
A = te.placeholder(in_shape, name="A", dtype=dtype)
W = te.placeholder(kernel_shape, name="W", dtype=dtype)
fcompute, fschedule = tvm.topi.testing.dispatch(target, _group_conv1d_implementations[layout])
with tvm.target.Target(target):
B = fcompute(A, W, stride, padding, dilation, groups, "float32")
s = fschedule([B])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
print(tvm.lower(s, [A, W, B], target))
func = tvm.build(s, [A, W, B], target)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
if __name__ == "__main__":
test_conv1d()
| 7,200 | 35.005 | 99 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_prng.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.relay
import tvm.testing
import tvm.topi
import numpy as np
import scipy.stats
def threefry_split(target, dev, gen):
gen_placeholder = tvm.te.placeholder(gen.shape, name="gen", dtype="uint64")
left_placeholder, right_placeholder = tvm.topi.random.threefry_split(gen_placeholder)
s = tvm.topi.generic.schedule_extern([left_placeholder, right_placeholder])
f = tvm.build(s, [gen_placeholder, left_placeholder, right_placeholder])
left = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"))
right = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"))
f(tvm.nd.array(gen), left, right)
return left.numpy(), right.numpy()
def threefry_generate(target, dev, gen, size):
gen_placeholder = tvm.te.placeholder(gen.shape, name="gen", dtype="uint64")
left_placeholder, right_placeholder = tvm.topi.random.threefry_generate(gen_placeholder, size)
s = tvm.topi.generic.schedule_extern([left_placeholder, right_placeholder])
f = tvm.build(s, [gen_placeholder, left_placeholder, right_placeholder])
out_gen = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"))
rands = tvm.nd.array(np.zeros(size, dtype="uint64"))
f(tvm.nd.array(gen), out_gen, rands)
return out_gen.numpy(), rands.numpy()
def uniform(target, dev, gen, low, high, size, dtype):
gen_placeholder = tvm.te.placeholder(gen.shape, name="gen", dtype="uint64")
low_placeholder = tvm.te.placeholder(low.shape, name="low", dtype=dtype)
high_placeholder = tvm.te.placeholder(high.shape, name="high", dtype=dtype)
left_placeholder, right_placeholder = tvm.topi.random.uniform(
gen_placeholder, low_placeholder, high_placeholder, size, dtype
)
s = tvm.topi.generic.schedule_extern([left_placeholder, right_placeholder])
f = tvm.build(
s,
[gen_placeholder, low_placeholder, high_placeholder, left_placeholder, right_placeholder],
target=target,
)
out_gen = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"), device=dev)
rands = tvm.nd.array(np.zeros(size, dtype=dtype), device=dev)
f(
tvm.nd.array(gen, device=dev),
tvm.nd.array(low, device=dev),
tvm.nd.array(high, device=dev),
out_gen,
rands,
)
return out_gen.numpy(), rands.asnumpy()
def multinomial(target, dev, gen, probs, num_samples):
gen_placeholder = tvm.te.placeholder(gen.shape, name="gen", dtype="uint64")
probs_placeholder = tvm.te.placeholder(probs.shape, name="probs", dtype="float32")
new_gen_placeholder, indices_placeholder = tvm.topi.random.multinomial(
gen_placeholder, probs_placeholder, num_samples
)
s = tvm.topi.generic.schedule_extern([new_gen_placeholder, indices_placeholder])
f = tvm.build(
s,
[gen_placeholder, probs_placeholder, new_gen_placeholder, indices_placeholder],
target=target,
)
out_gen = tvm.nd.array(np.zeros(gen.shape, dtype="uint64"), device=dev)
indices = tvm.nd.array(np.zeros((*probs.shape[:-1], num_samples), dtype="int32"), device=dev)
f(tvm.nd.array(gen), tvm.nd.array(probs), out_gen, indices)
return out_gen.numpy(), indices.asnumpy()
@tvm.testing.parametrize_targets("llvm")
def test_threefry_split(target, dev):
# test that results of split do not equal eachother or the input
gen = tvm.relay.random.threefry_key(0).data.numpy()
a, b = threefry_split(target, dev, gen)
assert (a != b).any() and (
a != gen
).any(), "Splitting a gen should result in different output gens"
# unittest some split inputs
assert (a == np.array([0, 0, 0, 0, 0, 0, 0, 0, 1 << 62, 0], dtype="uint64")).all()
assert (b == np.array([0, 0, 0, 0, 1 << 63, 0, 0, 0, 1 << 62, 0], dtype="uint64")).all()
# test enough splits to go over path length
for i in range(129):
a, b = threefry_split(target, dev, b)
assert (a[0:4] == b[0:4]).all(), "State part of split should be the same"
assert (b[0:4] != np.zeros(4, dtype="uint64")).any()
# check that split then generate does not generate the same for both sides
a, a_rands = threefry_generate(target, dev, a, (100,))
b, b_rands = threefry_generate(target, dev, b, (100,))
assert (
a_rands != b_rands
).all(), "Numbers generated from different initial states should be different"
# check repeatability
_, rands1 = threefry_generate(target, dev, a, (100,))
_, rands2 = threefry_generate(target, dev, a, (100,))
assert (
rands1 == rands2
).all(), "Numbers generated from the same initial state should be the same"
a1, b1 = threefry_split(target, dev, a)
a2, b2 = threefry_split(target, dev, a)
assert (a1 == a2).all() and (
b1 == b2
).all(), "Split called on the same input should return the same result"
@tvm.testing.parametrize_targets("llvm")
def test_threefry_generate(target, dev):
gen = tvm.relay.random.threefry_key(0).data.numpy()
# check that we can generate some data
a, rands = threefry_generate(target, dev, gen, (2048,))
assert (
rands.shape[0] == 2048 and len(rands.shape) == 1
), "Output shape should match requested shape"
# check that gen out does not equal input
assert (a != gen).any(), "Output generator should be different from input generator"
# check that we can generate data whose total number of elements is not a multiple of 4.
a, rands = threefry_generate(target, dev, gen, (7,))
assert (
rands.shape[0] == 7 and len(rands.shape) == 1
), "Output shape should match requested shape"
# test enough generates to go over generate limit
gen = np.array(
[0, 0, 0, 0, 0, 0, 0, 2**64 - 2, 1 << 63, 0], dtype="uint64"
) # make counter large
a, rands = threefry_generate(target, dev, gen, (2048,))
assert gen[4] != a[4], "Overflow of counter should trigger path change"
assert a[7] == 2048, "Overflow of counter should still update counter"
# check generate with path at length limit
gen = np.array([0, 0, 0, 0, 0, 0, 0, 2**64 - 2, 0, 0], dtype="uint64") # make counter large
a, rands = threefry_generate(target, dev, gen, (2048,))
assert (
gen[0:4] != a[0:4]
).any(), "Overflowing counter with no space left in path should change state"
@tvm.testing.parametrize_targets("llvm")
def test_threefry_wrapping(target, dev):
assert tvm.topi.random.threefry_test_wrapping(
target, dev
), f"{target} does not suppport wrapping unsigned integer arithmetic"
@tvm.testing.parametrize_targets("llvm")
def test_uniform(target, dev):
gen = tvm.relay.random.threefry_key(0).data.numpy()
m = 1024
n = 1024
dtypes = ["float32", "float64"]
for dtype in dtypes:
low = np.array(5.0, dtype=dtype)
high = np.array(10.0, dtype=dtype)
new_gen, rands = uniform(target, dev, gen, low, high, (m, n), dtype)
assert (gen != new_gen).any()
assert abs(np.mean(rands) - 7.5) < 1e-1
assert np.min(rands) >= 5.0
assert np.max(rands) <= 10.0
@tvm.testing.parametrize_targets("llvm")
def test_multinomial(target, dev):
def _verify_multinomial(size, num_samples, test_statistics=False):
gen = tvm.relay.random.threefry_key(np.random.randint(0, 1e5)).data.numpy()
probs = np.random.randint(low=-50, high=1000, size=size).astype("float32")
new_gen, indices = multinomial(target, dev, gen, probs, num_samples)
assert (gen != new_gen).any()
assert np.min(indices) >= 0
assert np.max(indices) < probs.shape[-1]
# Note, only use test_statistics with sample size > 10,000.
if test_statistics:
# Clipped and normalized probabilities * number of samples
# represents expected frequency of each category.
# First upcast to float64 to remove numerical error.
probs = probs.astype("float64")
probs = np.reshape(probs, [-1, probs.shape[-1]])
probs = np.maximum(probs, 0)
probs = probs / np.expand_dims(np.sum(probs, axis=-1), axis=-1)
# Multiply by number of samples and add epsilon to get non-zero expected samples per index.
expected_frequency = probs * num_samples + np.finfo(float).eps
# Do a small adjustment to make sure each row of expected_frequencies sums to exactly num_samples.
expected_frequency = (
np.expand_dims((num_samples / np.sum(expected_frequency, axis=-1)), axis=-1)
* expected_frequency
)
# Reduce shape to a 2D matrix.
indices = np.reshape(indices, [-1, indices.shape[-1]])
# Split indendent rows of indices.
index_list = [np.squeeze(x, 0) for x in np.split(indices, indices.shape[0], axis=0)]
# Count frequency of selected indices in each row.
observed_freqs = [np.bincount(samples, minlength=size[-1]) for samples in index_list]
# Stack observed frequencies back into a matrix.
observed_freqs = np.stack(observed_freqs, axis=0)
# Test how closely observed samples match expectations.
_, p_value = scipy.stats.chisquare(observed_freqs, expected_frequency, axis=-1)
# If sampled correctly, p_value should be greater than 1e-6 almost all the time.
assert np.all(p_value > 1e-6)
# Test simple 1-D case.
_verify_multinomial([3], 2)
# Test 2-D case.
_verify_multinomial([2, 10], 1)
# Test 3-D case.
_verify_multinomial([2, 3, 10], 4)
# Test large sample size statistics.
_verify_multinomial([3, 10], 10000, test_statistics=True)
if __name__ == "__main__":
test_threefry_split(tvm.target.Target("llvm"), tvm.device("cpu"))
test_threefry_generate(tvm.target.Target("llvm"), tvm.device("cpu"))
test_threefry_wrapping(tvm.target.Target("llvm"), tvm.device("cpu"))
test_uniform(tvm.target.Target("llvm"), tvm.device("cpu"))
test_multinomial(tvm.target.Target("llvm"), tvm.device("cpu"))
| 10,817 | 43.887967 | 110 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_dense_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument
"""Test code for dense tensorcore operator"""
import numpy as np
import tvm
from tvm import topi
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm import te
from tvm.contrib.pickle_memoize import memoize
import tvm.testing
_dense_implement = {"gpu": [(topi.cuda.dense_tensorcore, topi.cuda.schedule_dense_tensorcore)]}
def convert_int32_into_int4(a_int32):
"""convert int32 values into int4
Parameters
----------
a_int32 : int
Return
------
a_int4 : int
"""
K, L = a_int32.shape
assert L % 8 == 0
a_int4 = np.zeros(shape=(K, L // 8), dtype=np.int32)
for k in range(K):
for l in range(L // 8):
for m in range(min(8, L - l * 8)):
a_int4[k, l] = a_int4[k, l] | ((a_int32[k, l * 8 + m] & 0xF) << ((7 - m) * 4))
return a_int4
def convert_int32_into_int4_bias(a_int32):
"""convert int32 values into int4
Parameters
----------
a_int32 : int
Return
------
a_int4 : int
"""
(L,) = a_int32.shape
assert L % 8 == 0
a_int4 = np.zeros(shape=(L // 8), dtype=np.int32)
for l in range(L // 8):
for m in range(min(8, L - l * 8)):
a_int4[l] = a_int4[l] | ((a_int32[l * 8 + m] & 0xF) << ((7 - m) * 4))
return a_int4
def verify_dense(batch, in_dim, out_dim, dtype, use_bias=True):
"""Dense tensorcore verify function"""
A = te.placeholder((batch, in_dim), name="A", dtype=dtype)
B = te.placeholder((out_dim, in_dim), name="B", dtype=dtype)
C = te.placeholder((out_dim,), name="C", dtype=dtype)
assert dtype in ["int4", "int8", "float16"]
out_dtype = "float32"
if dtype in ["int8", "int4"]:
out_dtype = "int32"
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_dense_tensorcore")
def get_ref_data():
if dtype == "int4":
a_np = np.random.randint(low=-8, high=7, size=(batch, in_dim))
b_np = np.random.randint(low=-8, high=7, size=(out_dim, in_dim))
c_np = np.random.randint(low=-8, high=7, size=(out_dim,))
elif dtype == "int8":
a_np = np.random.randint(low=-128, high=127, size=(batch, in_dim)).astype(dtype)
b_np = np.random.randint(low=-128, high=127, size=(out_dim, in_dim)).astype(dtype)
c_np = np.random.randint(low=-128, high=127, size=(out_dim,)).astype(dtype)
else:
a_np = np.random.uniform(size=(batch, in_dim)).astype(dtype)
b_np = np.random.uniform(size=(out_dim, in_dim)).astype(dtype)
c_np = np.random.uniform(size=(out_dim,)).astype(dtype)
d_np = tvm.topi.testing.dense(a_np, b_np, c_np, use_bias, True, out_dtype)
return (a_np, b_np, c_np, d_np)
# get the test data
a_np, b_np, c_np, d_np = get_ref_data()
if dtype == "int4":
a_np = convert_int32_into_int4(a_np)
b_np = convert_int32_into_int4(b_np)
c_np = convert_int32_into_int4_bias(c_np)
def check_device(device):
dev = tvm.device(device, 0)
print("Running on target: %s" % device)
for fcompute, fschedule in tvm.topi.testing.dispatch(device, _dense_implement):
with tvm.target.Target(device):
D = fcompute(A, B, C if use_bias else None, out_dtype)
D = topi.nn.relu(D)
s = fschedule([D])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=out_dtype), dev)
f = tvm.build(s, [A, B, C, D], device, name="dense")
f(a, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-3)
check_device("cuda")
@tvm.testing.requires_tensorcore
def test_dense_tensorcore():
"""Test cases"""
for dtype in ["float16", "int8"]:
verify_dense(8, 16, 32, "float16", use_bias=True)
verify_dense(16, 32, 16, dtype, use_bias=True)
verify_dense(256, 1024, 1024, dtype, use_bias=True)
verify_dense(1000, 1024, 1024, dtype, use_bias=False)
verify_dense(256, 2048, 1000, dtype, use_bias=False)
# TODO: need fix int4 use_bias=True, wyc-ruiker
verify_dense(16, 32, 16, "int4", use_bias=False)
verify_dense(256, 1024, 1024, "int4", use_bias=False)
verify_dense(1000, 1024, 1024, "int4", use_bias=False)
verify_dense(256, 2048, 1000, "int4", use_bias=False)
if __name__ == "__main__":
test_dense_tensorcore()
| 5,413 | 36.337931 | 95 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv3d_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for 3d convolution with winograd."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.utils import get_pad_tuple3d
from tvm.topi.utils import get_const_tuple
_conv3d_ncdhw_implement = {
"gpu": (topi.cuda.conv3d_ncdhw_winograd, topi.cuda.schedule_conv3d_ncdhw_winograd),
}
def verify_conv3d_ncdhw(
batch,
in_channel,
in_size,
num_filter,
depth_kernel,
space_kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
):
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (depth_kernel, space_kernel, space_kernel)
)
padding_sum = pad_front + pad_back + pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, space_kernel, stride, padding_sum, dilation)
)
in_depth = in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_depth, in_height, in_width), name="A")
W = te.placeholder((num_filter, in_channel, depth_kernel, space_kernel, space_kernel), name="W")
bias = te.placeholder((num_filter, 1, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv3d_ncdhw.verify_conv3d_ncdhw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
c_np = tvm.topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding)
if add_bias:
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv3d_ncdhw_implement)
with tvm.target.Target(device):
C = fcompute(
A, W, (stride, stride, stride), padding, (dilation, dilation, dilation), 1, dtype
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
space_kernel,
stride,
padding_sum,
dilation,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
space_kernel,
stride,
padding_sum,
dilation,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-4, atol=1e-6)
for device in ["cuda"]:
with autotvm.tophub.context(device): # load tophub pre-tuned parameters
check_device(device)
@tvm.testing.requires_gpu
def test_conv3d_ncdhw():
# Try without depth transformation
# 3DCNN workloads
verify_conv3d_ncdhw(1, 61, 20, 120, 3, 3, 1, 0)
verify_conv3d_ncdhw(1, 61, 20, 120, 1, 3, 1, 0)
verify_conv3d_ncdhw(1, 61, 20, 120, 5, 3, 1, 0)
verify_conv3d_ncdhw(1, 61, 20, 120, 5, 5, 1, 2)
verify_conv3d_ncdhw(1, 61, 20, 120, 1, 5, 1, 2)
verify_conv3d_ncdhw(1, 61, 20, 120, 7, 7, 1, 3)
verify_conv3d_ncdhw(1, 128, 12, 256, 3, 3, 1, 1)
verify_conv3d_ncdhw(1, 64, 12, 128, 3, 3, 1, 1)
# bias, relu
verify_conv3d_ncdhw(1, 64, 12, 128, 3, 3, 1, 1, add_relu=True)
verify_conv3d_ncdhw(1, 64, 12, 128, 3, 3, 1, 1, add_relu=True, add_bias=True)
verify_conv3d_ncdhw(1, 64, 12, 128, 1, 3, 1, 1, add_relu=True, add_bias=True)
# dilation = 2
verify_conv3d_ncdhw(1, 16, 12, 16, 3, 3, 1, "VALID", dilation=2)
verify_conv3d_ncdhw(1, 16, 12, 16, 1, 3, 1, "VALID", dilation=2)
# batch size
verify_conv3d_ncdhw(4, 32, 12, 64, 3, 3, 1, 1)
verify_conv3d_ncdhw(4, 32, 12, 64, 1, 3, 1, 1)
# weird workloads
verify_conv3d_ncdhw(2, 2, 2, 2, 3, 3, 1, 2)
verify_conv3d_ncdhw(3, 3, 3, 3, 3, 3, 1, 3)
if __name__ == "__main__":
test_conv3d_ncdhw()
| 6,220 | 33.370166 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_instance_norm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for instance_norm."""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import topi
from tvm.topi.utils import get_const_tuple
import tvm.topi.testing
import tvm.testing
_instance_norm_schedule = {
"generic": topi.generic.schedule_injective,
}
# only test on llvm because schedule is missing
@tvm.testing.parametrize_targets("llvm")
@pytest.mark.parametrize("shape,axis", [([4, 16], (1,)), ([4, 16, 16], (1, 2))])
def test_instance_norm(
target, dev, shape, axis, episilon=1e-5, dtype="float32", rtol=1e-5, atol=1e-5
):
data = te.placeholder(shape, dtype=dtype, name="data")
scale_shape = [shape[dim] for dim in axis]
gamma = te.placeholder(scale_shape, dtype=dtype, name="gamma")
beta = te.placeholder(scale_shape, dtype=dtype, name="beta")
B = topi.nn.instance_norm(data, gamma, beta, axis, episilon)
data_np = np.random.uniform(size=shape).astype(dtype)
gamma_np = np.random.uniform(size=scale_shape).astype(dtype)
beta_np = np.random.uniform(size=scale_shape).astype(dtype)
b_np = tvm.topi.testing.instance_norm_python(data_np, gamma_np, beta_np, axis, episilon)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _instance_norm_schedule)
s = s_func([B])
data_tvm = tvm.nd.array(data_np, dev)
gamma_tvm = tvm.nd.array(gamma_np, dev)
beta_tvm = tvm.nd.array(beta_np, dev)
b_tvm = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [data, gamma, beta, B], target)
f(data_tvm, gamma_tvm, beta_tvm, b_tvm)
tvm.testing.assert_allclose(b_tvm.numpy(), b_np, rtol=rtol, atol=atol)
if __name__ == "__main__":
tvm.testing.main()
| 2,505 | 37.553846 | 92 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_batch_norm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for the batch_norm operator."""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
_DEVICE = "llvm"
_BATCH_NORM_IMPLEMENT = {
"generic": (topi.nn.batch_norm, topi.generic.schedule_batch_norm),
"cpu": (topi.nn.batch_norm, topi.x86.schedule_batch_norm),
}
@pytest.mark.parametrize(
"shape, axis, epsilon, center, scale, training, momentum",
[
((1,), 0, 0.1, True, True, False, 0.1),
((2, 3), 0, 0.1, True, True, False, 0.1),
((1, 2, 4), 0, 0.1, True, True, False, 0.1),
((1, 2, 3, 4), 0, 0.001, False, False, False, 0.1),
((2, 3, 4, 1), 1, 0.01, False, True, False, 0.1),
((3, 4, 1, 2), 2, 0.1, True, False, True, 0.1),
((4, 1, 2, 3), 3, 1.0, True, True, True, 0.2),
((1, 2, 4, 4, 5), 0, 0.1, True, True, True, 0.3),
],
)
def test_batch_norm(shape, axis, epsilon, center, scale, training, momentum):
x_np = np.random.random(shape).astype("float32")
gamma_np = np.random.random(shape[axis]).astype("float32")
beta_np = np.random.random(shape[axis]).astype("float32")
moving_mean_np = np.random.random(shape[axis]).astype("float32")
moving_var_np = np.random.random(shape[axis]).astype("float32")
out_x_np, out_moving_mean_np, out_moving_var_np = tvm.topi.testing.batch_norm(
x_np,
gamma_np,
beta_np,
moving_mean_np,
moving_var_np,
axis,
epsilon,
center,
scale,
training,
momentum,
)
x_te = te.placeholder(shape, name="x", dtype="float32")
gamma_te = te.placeholder((shape[axis],), name="gamma", dtype="float32")
beta_te = te.placeholder((shape[axis],), name="beta", dtype="float32")
moving_mean_te = te.placeholder((shape[axis],), name="moving_mean", dtype="float32")
moving_var_te = te.placeholder((shape[axis],), name="moving_var", dtype="float32")
with tvm.target.Target(_DEVICE):
fcompute, fschedule = tvm.topi.testing.dispatch(_DEVICE, _BATCH_NORM_IMPLEMENT)
out_x, out_moving_mean, out_moving_var = fcompute(
x_te,
gamma_te,
beta_te,
moving_mean_te,
moving_var_te,
axis,
epsilon,
center,
scale,
training,
momentum,
)
s = fschedule([out_x, out_moving_mean, out_moving_var])
dev = tvm.device(_DEVICE, 0)
x_tvm = tvm.nd.array(x_np, dev)
gamma_tvm = tvm.nd.array(gamma_np, dev)
beta_tvm = tvm.nd.array(beta_np, dev)
moving_mean_tvm = tvm.nd.array(moving_mean_np, dev)
moving_var_tvm = tvm.nd.array(moving_var_np, dev)
out_x_tvm = tvm.nd.array(np.zeros(shape, dtype=out_x.dtype), dev)
out_moving_mean_tvm = tvm.nd.array(
np.zeros((shape[axis],), dtype=out_moving_mean.dtype), dev
)
out_moving_var_tvm = tvm.nd.array(np.zeros((shape[axis],), dtype=out_moving_var.dtype), dev)
f = tvm.build(
s,
[
x_te,
gamma_te,
beta_te,
moving_mean_te,
moving_var_te,
out_x,
out_moving_mean,
out_moving_var,
],
_DEVICE,
)
f(
x_tvm,
gamma_tvm,
beta_tvm,
moving_mean_tvm,
moving_var_tvm,
out_x_tvm,
out_moving_mean_tvm,
out_moving_var_tvm,
)
tvm.testing.assert_allclose(out_x_tvm.numpy(), out_x_np, rtol=1e-3)
tvm.testing.assert_allclose(out_moving_mean_tvm.numpy(), out_moving_mean_np, rtol=1e-3)
tvm.testing.assert_allclose(out_moving_var_tvm.numpy(), out_moving_var_np, rtol=1e-3)
if __name__ == "__main__":
tvm.testing.main()
| 4,705 | 33.350365 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_broadcast.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for broadcasting operators."""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
def verify_broadcast_to_ele(in_shape, out_shape, fbcast):
# Build the logic and compile the function
A = te.placeholder(shape=in_shape, name="A")
B = fbcast(A, out_shape)
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="broadcast_to")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.broadcast_to(data_npy, out_shape)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_target(target)
check_target("sdaccel")
def verify_broadcast_binary_ele(
lhs_shape,
rhs_shape,
ftopi,
fnumpy,
lhs_min=-100,
lhs_max=100,
rhs_min=-100,
rhs_max=100,
dtype="float32",
):
# Build the logic and compile the function
A = (
te.var("A", dtype=dtype)
if lhs_shape is None
else te.placeholder(shape=lhs_shape, name="A", dtype=dtype)
)
B = (
te.var("B", dtype=dtype)
if rhs_shape is None
else te.placeholder(shape=rhs_shape, name="B", dtype=dtype)
)
C = ftopi(A, B)
if isinstance(A, tvm.tir.PrimExpr) and isinstance(B, tvm.tir.PrimExpr):
assert isinstance(C, tvm.tir.PrimExpr)
return
def gen_operand(shape, low, high, dev):
if shape is None:
npy = float(np.random.uniform(low=low, high=high))
if dtype.startswith("int"):
npy = int(npy)
nd = npy
else:
npy = np.random.uniform(low=low, high=high, size=shape).astype(dtype)
nd = tvm.nd.array(npy, dev)
return npy, nd
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(C)
foo = tvm.build(s, [A, B, C], target, name="broadcast_binary" + "_" + ftopi.__name__)
lhs_npy, lhs_nd = gen_operand(lhs_shape, lhs_min, lhs_max, dev)
rhs_npy, rhs_nd = gen_operand(rhs_shape, rhs_min, rhs_max, dev)
out_npy = fnumpy(lhs_npy, rhs_npy)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), dev)
foo(lhs_nd, rhs_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy, rtol=1e-4, atol=1e-4)
for target, dev in tvm.testing.enabled_targets():
check_target(target)
check_target("sdaccel")
@tvm.testing.uses_gpu
def test_broadcast_to():
verify_broadcast_to_ele((1,), (10,), topi.broadcast_to)
verify_broadcast_to_ele((), (10,), topi.broadcast_to)
verify_broadcast_to_ele((1, 1, 5, 4), (3, 4, 4, 4, 5, 4), topi.broadcast_to)
verify_broadcast_to_ele((1, 128, 1, 32), (64, 128, 64, 32), topi.broadcast_to)
@tvm.testing.uses_gpu
def test_add():
verify_broadcast_binary_ele((), (), topi.add, np.add)
verify_broadcast_binary_ele((5, 2, 3), (2, 1), topi.add, np.add)
@tvm.testing.uses_gpu
def test_subtract():
verify_broadcast_binary_ele((5, 2, 3), (), topi.subtract, np.subtract)
verify_broadcast_binary_ele((5, 2, 3), None, topi.subtract, np.subtract)
verify_broadcast_binary_ele(None, None, topi.subtract, np.subtract)
verify_broadcast_binary_ele((1, 32), (64, 32), topi.subtract, np.subtract)
@tvm.testing.uses_gpu
def test_multiply():
verify_broadcast_binary_ele((5, 64, 128), (2, 5, 64, 1), topi.multiply, np.multiply)
@tvm.testing.uses_gpu
def test_divide():
verify_broadcast_binary_ele(None, (10,), topi.divide, np.divide, rhs_min=0.0001)
verify_broadcast_binary_ele((), None, topi.divide, np.divide, rhs_min=0.0001)
verify_broadcast_binary_ele((2, 3, 1, 32), (64, 32), topi.divide, np.divide, rhs_min=0.0001)
@tvm.testing.uses_gpu
def test_floor_divide():
def _canonical_floor_div(a, b):
return np.floor(a / b)
verify_broadcast_binary_ele(
None, (10,), topi.floor_divide, _canonical_floor_div, rhs_min=0.0001
)
verify_broadcast_binary_ele((), None, topi.floor_divide, _canonical_floor_div, rhs_min=0.0001)
verify_broadcast_binary_ele(
(2, 3, 64, 32), (64, 32), topi.floor_divide, _canonical_floor_div, rhs_min=0.0001
)
@tvm.testing.uses_gpu
def test_maximum_minmum():
verify_broadcast_binary_ele((32,), (64, 32), topi.maximum, np.maximum)
verify_broadcast_binary_ele((1, 2, 2, 1, 32), (64, 32), topi.minimum, np.minimum)
@tvm.testing.uses_gpu
def test_power():
verify_broadcast_binary_ele(
(1, 2, 2), (2,), topi.power, np.power, lhs_min=0.001, rhs_min=0.001, rhs_max=2
)
@tvm.testing.uses_gpu
def test_mod():
verify_broadcast_binary_ele(
(1, 2, 2), (2,), topi.mod, np.mod, lhs_min=0.001, rhs_min=1, dtype="int32"
)
@tvm.testing.uses_gpu
def test_floor_mod():
def _canonical_floor_mod(a, b):
return a - np.floor(a / b) * b
verify_broadcast_binary_ele(
(1, 2, 2),
(2,),
topi.floor_mod,
_canonical_floor_mod,
lhs_min=0.001,
rhs_min=1,
dtype="int32",
)
verify_broadcast_binary_ele(
(3, 4, 5),
(3, 4, 5),
topi.floor_mod,
_canonical_floor_mod,
lhs_min=0.001,
rhs_min=1,
dtype="float32",
)
@tvm.testing.uses_gpu
def test_cmp():
# explicit specify the output type
def greater(x, y):
return topi.greater(x, y).astype("int8")
def less(x, y):
return topi.less(x, y).astype("int8")
def equal(x, y):
return topi.equal(x, y).astype("int8")
def not_equal(x, y):
return topi.not_equal(x, y).astype("int8")
def greater_equal(x, y):
return topi.greater_equal(x, y).astype("int8")
def less_equal(x, y):
return topi.less_equal(x, y).astype("int8")
verify_broadcast_binary_ele((1, 2, 2), (2,), greater, np.greater)
verify_broadcast_binary_ele((2, 1, 2), (2, 3, 1), less, np.less)
verify_broadcast_binary_ele(
(2, 1, 2),
(2, 3, 1),
equal,
np.equal,
lhs_min=-2,
lhs_max=2,
rhs_min=-2,
rhs_max=2,
dtype="int32",
)
verify_broadcast_binary_ele(
(2, 1, 2),
(2, 3, 1),
not_equal,
np.not_equal,
lhs_min=-2,
lhs_max=2,
rhs_min=-2,
rhs_max=2,
dtype="int32",
)
verify_broadcast_binary_ele(
(7, 1, 5),
(7, 3, 1),
greater_equal,
np.greater_equal,
lhs_min=-3,
lhs_max=3,
rhs_min=-3,
rhs_max=3,
dtype="int32",
)
verify_broadcast_binary_ele(
(7, 1, 5),
(7, 3, 1),
less_equal,
np.less_equal,
lhs_min=-3,
lhs_max=3,
rhs_min=-3,
rhs_max=3,
dtype="int32",
)
@tvm.testing.uses_gpu
def test_shift():
# explicit specify the output type
verify_broadcast_binary_ele(
(2, 1, 2), None, topi.right_shift, np.right_shift, dtype="int32", rhs_min=0, rhs_max=32
)
verify_broadcast_binary_ele(
(1, 2, 2), (2,), topi.left_shift, np.left_shift, dtype="int32", rhs_min=0, rhs_max=32
)
verify_broadcast_binary_ele(
(1, 2, 2), (2,), topi.left_shift, np.left_shift, dtype="int32", rhs_min=0, rhs_max=32
)
@tvm.testing.uses_gpu
def test_logical_single_ele():
def test_apply(
func,
name,
f_numpy,
indata,
dtype="bool",
):
# Build the logic and compile the function
A = te.placeholder(shape=indata.shape, name="A", dtype=dtype)
B = func(A)
if isinstance(A, tvm.tir.PrimExpr):
assert isinstance(B, tvm.tir.PrimExpr)
return
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name=name)
data_npy = indata.astype(A.dtype)
data_nd = tvm.nd.array(data_npy, dev)
out_npy = f_numpy(indata)
out_nd = tvm.nd.array(np.empty(data_npy.shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
test_apply(topi.logical_not, "logical_not", np.logical_not, np.array([True, False, 0, 1]))
test_apply(topi.logical_not, "logical_not", np.logical_not, np.array(np.arange(5) < 3))
@tvm.testing.uses_gpu
def test_bitwise_not():
def test_apply(
func,
name,
f_numpy,
shape,
dtype="int32",
):
# Build the logic and compile the function
A = te.placeholder(shape=shape, name="A", dtype=dtype)
B = func(A)
if isinstance(A, tvm.tir.PrimExpr):
assert isinstance(B, tvm.tir.PrimExpr)
return
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name=name)
data_npy = np.random.uniform(size=shape).astype(A.dtype)
data_nd = tvm.nd.array(data_npy, dev)
out_npy = f_numpy(data_npy)
out_nd = tvm.nd.array(np.empty(data_npy.shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
test_apply(topi.bitwise_not, "bitwise_not", np.bitwise_not, ())
test_apply(topi.bitwise_not, "bitwise_not", np.bitwise_not, (2, 1, 2))
@tvm.testing.uses_gpu
def test_logical_binary_ele():
def test_apply(
func,
name,
f_numpy,
lhs,
rhs,
dtype="bool",
):
# Build the logic and compile the function
A = te.var("A", dtype=dtype)
B = te.var("B", dtype=dtype)
C = func(A, B)
if isinstance(A, tvm.tir.PrimExpr) and isinstance(B, tvm.tir.PrimExpr):
assert isinstance(C, tvm.tir.PrimExpr)
return
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(C)
foo = tvm.build(s, [A, B, C], target, name=name)
lhs_nd = tvm.nd.array(lhs, dev)
rhs_nd = tvm.nd.array(rhs, dev)
out_npy = f_numpy(lhs, rhs)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), dev)
foo(lhs_nd, rhs_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy, rtol=1e-4, atol=1e-4)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
test_apply(topi.logical_and, "logical_and", np.logical_and, True, False)
test_apply(topi.logical_and, "logical_and", np.logical_and, [True, False], [False, False])
test_apply(topi.logical_or, "logical_or", np.logical_or, True, False)
test_apply(topi.logical_or, "logical_or", np.logical_or, [True, False], [False, False])
test_apply(topi.logical_xor, "logical_xor", np.logical_xor, True, False)
test_apply(topi.logical_xor, "logical_xor", np.logical_xor, [True, False], [False, False])
@tvm.testing.uses_gpu
def test_bitwise_and():
verify_broadcast_binary_ele(None, None, topi.bitwise_and, np.bitwise_and, dtype="int32")
verify_broadcast_binary_ele(
(2, 1, 2), (2, 1, 2), topi.bitwise_and, np.bitwise_and, dtype="int32"
)
@tvm.testing.uses_gpu
def test_bitwise_or():
verify_broadcast_binary_ele(None, None, topi.bitwise_or, np.bitwise_or, dtype="int32")
verify_broadcast_binary_ele((2, 1, 2), (2, 1, 2), topi.bitwise_or, np.bitwise_or, dtype="int32")
@tvm.testing.uses_gpu
def test_bitwise_xor():
verify_broadcast_binary_ele(None, None, topi.bitwise_xor, np.bitwise_xor, dtype="int32")
verify_broadcast_binary_ele(
(2, 1, 2), (2, 1, 2), topi.bitwise_xor, np.bitwise_xor, dtype="int32"
)
if __name__ == "__main__":
test_add()
test_shift()
test_cmp()
test_mod()
test_floor_mod()
test_subtract()
test_multiply()
test_divide()
test_floor_divide()
test_maximum_minmum()
test_power()
test_broadcast_to()
test_logical_single_ele()
test_bitwise_not()
test_logical_binary_ele()
test_bitwise_and()
test_bitwise_or()
test_bitwise_xor()
| 14,216 | 30.593333 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv2d_nchw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import sys
import pytest
import numpy as np
import tvm
from tvm import autotvm, te, topi
import tvm.topi.testing
from tvm.contrib import cudnn
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.conv2d import _get_workload
from tvm.topi.x86.conv2d_avx_common import _fallback_schedule
import tvm.testing
dtype = tvm.testing.parameter("float16", "float32")
random_seed = tvm.testing.parameter(0)
@tvm.testing.fixture
def input_shape(batch, in_channel, in_size):
return (batch, in_channel, in_size, in_size)
@tvm.testing.fixture
def weight_shape(num_filter, in_channel, kernel):
return (num_filter, in_channel, kernel, kernel)
@tvm.testing.fixture
def bias_shape(num_filter):
return (num_filter, 1, 1)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
random_seed,
input_shape,
weight_shape,
bias_shape,
dtype,
stride,
padding,
dilation,
add_bias,
apply_relu,
):
np.random.seed(random_seed)
# scipy.signal.convolve2d does not support float16 data types, and
# the python fallback is too slow for general use. Computing
# ref_data in float32 will have fewer rounding errors than the TVM
# float16 compute, but those vary based on schedule anyways.
conv_dtype = "float32" if dtype == "float16" else dtype
a_np = np.random.uniform(size=input_shape).astype(dtype)
w_np = np.random.uniform(size=weight_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(
a_np.astype(conv_dtype), dw_np.astype(conv_dtype), stride, padding
).astype(dtype)
if add_bias:
c_np = c_np + b_np
if apply_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
class BaseConv2DTests:
add_bias = tvm.testing.parameter(False)
apply_relu = tvm.testing.parameter(False)
dilation = tvm.testing.parameter(1)
batch = tvm.testing.parameter(1)
def test_conv2d_nchw(
self,
target,
dev,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dtype,
ref_data,
dilation,
add_bias,
apply_relu,
):
target = tvm.target.Target(target)
is_cudnn_target = target.kind.name == "cuda" and "cudnn" in target.attrs.get("libs", [])
if target.kind.name == "vulkan" and dtype == "float16":
if not target.attrs.get("supports_float16", False) or not target.attrs.get(
"supports_16bit_buffer", False
):
pytest.xfail("Vulkan device does not support float16")
if (
target.kind.name == "cuda"
and dtype == "float16"
and not tvm.contrib.nvcc.have_fp16(dev.compute_version)
):
pytest.xfail("CUDA float16 intrinsics not available")
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
has_asymmetric_padding = (pad_top != pad_bottom) or (pad_left != pad_right)
if is_cudnn_target and has_asymmetric_padding:
pytest.xfail("CuDNN does not support asymmetric padding")
a_np, w_np, b_np, c_np = ref_data
A = te.placeholder(a_np.shape, name="A", dtype=dtype)
W = te.placeholder(w_np.shape, name="W", dtype=dtype)
bias = te.placeholder(b_np.shape, name="bias", dtype=dtype)
if "int" in dtype:
tol = {"atol": 0, "rtol": 0}
elif dtype == "float32":
tol = {"rtol": 1e-4, "atol": 2e-4}
elif dtype == "float16":
# A summation in float16 with a single accumulator very
# quickly runs into large rounding errors. At some point,
# this tolerance should be schedule-dependent for to avoid
# false negatives.
num_values_summed = in_channel * kernel * kernel
gap_size = np.nextafter(c_np.max(), np.inf, dtype=c_np.dtype) - c_np.max()
tol = {"rtol": 1e-3, "atol": num_values_summed * gap_size / 2}
with autotvm.tophub.context(target): # load tophub pre-tuned parameters
if is_cudnn_target:
fcompute, fschedule = topi.cuda.conv2d_cudnn, topi.cuda.schedule_conv2d_cudnn
else:
fcompute, fschedule = tvm.topi.testing.get_conv2d_nchw_implement(target)
with target:
if is_cudnn_target:
C = fcompute(
A, W, (stride, stride), padding, (dilation, dilation), 1, "NCHW", dtype
)
else:
C = fcompute(A, W, (stride, stride), padding, (dilation, dilation), dtype)
if add_bias:
C = topi.add(C, bias)
if apply_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(
s,
[A, W, bias, C],
target,
name="conv2d_{}_{}_{}_{}_{}_{}_{}_{}_{}".format(
dtype,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding_sum,
dilation,
),
)
func(a, w, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, **tol)
@tvm.testing.parametrize_targets("llvm")
def test_workload_padding(
self,
target,
input_shape,
weight_shape,
stride,
padding,
dilation,
dtype,
ref_data,
):
a_np, w_np, b_np, c_np = ref_data
_, _, out_height, out_width = c_np.shape
A = te.placeholder(input_shape, name="A", dtype=dtype)
W = te.placeholder(weight_shape, name="W", dtype=dtype)
with tvm.target.Target(target):
wkl = _get_workload(A, W, (stride, stride), padding, dilation, dtype)
# check if tile_ow candidates are the factors of the right output weight.
cfg = autotvm.get_config()
_fallback_schedule(cfg, wkl)
ow_tile = np.prod(cfg["tile_ow"].size)
tvm.testing.assert_allclose(ow_tile, out_width)
class TestResNet18Workloads(BaseConv2DTests):
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(3, 224, 64, 7, 2, 3),
(64, 56, 64, 3, 1, 1),
(64, 56, 64, 1, 1, 0),
(64, 56, 128, 3, 2, 1),
(64, 56, 128, 1, 2, 0),
(128, 28, 128, 3, 1, 1),
(128, 28, 256, 3, 2, 1),
(128, 28, 256, 1, 2, 0),
(256, 14, 256, 3, 1, 1),
(256, 14, 512, 3, 2, 1),
(256, 14, 512, 1, 2, 0),
(512, 7, 512, 3, 1, 1),
)
class TestInceptionV3Workloads(BaseConv2DTests):
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(3, 299, 32, 3, 2, 0),
(32, 149, 32, 3, 1, 0),
(32, 147, 64, 3, 1, 1),
(64, 73, 80, 1, 1, 0),
(80, 73, 192, 3, 1, 0),
(192, 35, 64, 1, 1, 0),
(192, 35, 48, 1, 1, 0),
(48, 35, 64, 5, 1, 2),
(64, 35, 96, 3, 1, 1),
(96, 35, 96, 3, 1, 1),
(192, 35, 32, 1, 1, 0),
(256, 35, 64, 1, 1, 0),
(256, 35, 48, 1, 1, 0),
(288, 35, 64, 1, 1, 0),
(288, 35, 48, 1, 1, 0),
(288, 35, 384, 3, 2, 0),
(96, 35, 96, 3, 2, 0),
(768, 17, 192, 1, 1, 0),
(768, 17, 128, 1, 1, 0),
(128, 17, 128, 1, 1, 0),
(128, 17, 192, 7, 1, 3),
(128, 17, 128, 7, 1, 3),
(128, 17, 192, 1, 1, 0),
(768, 17, 160, 1, 1, 0),
# disable these tests due to some bugs of llvm with nvptx
# (160, 17, 160, 1, 1, 0),
(160, 17, 192, 7, 1, 3),
(160, 17, 160, 7, 1, 3),
(160, 17, 192, 1, 1, 0),
(192, 17, 192, 1, 1, 0),
(192, 17, 192, 7, 1, 3),
(192, 17, 320, 3, 2, 0),
(192, 17, 192, 3, 2, 0),
(1280, 8, 320, 1, 1, 0),
(1280, 8, 384, 1, 1, 0),
(384, 8, 384, 1, 1, 0),
(384, 8, 384, 3, 1, 1),
(1280, 8, 448, 1, 1, 0),
(448, 8, 384, 3, 1, 1),
(1280, 8, 192, 1, 1, 0),
(2048, 8, 320, 1, 1, 0),
(2048, 8, 384, 1, 1, 0),
(2048, 8, 448, 1, 1, 0),
(2048, 8, 192, 1, 1, 0),
(1024, 19, 84, 3, 1, 1),
(2048, 10, 126, 3, 1, 1),
(512, 5, 126, 3, 1, 1),
(256, 3, 126, 3, 1, 1),
)
class TestWeirdWorkloads(BaseConv2DTests):
batch, in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(2, 2, 2, 2, 2, 2, 2),
(3, 3, 3, 3, 3, 3, 3),
(4, 4, 4, 4, 4, 4, 4),
(5, 5, 5, 5, 5, 5, 5),
(6, 6, 6, 6, 6, 6, 6),
# disable these tests due to some bugs of llvm with nvptx
# (1, 1, 1, 1, 1, 1, 1),
# (2, 13, 71, 59, 3, 1, 1),
)
class TestAsymmetricPadding(BaseConv2DTests):
dilation = tvm.testing.parameter(1, 2)
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(3, 35, 64, 7, 2, (0, 0, 1, 1)),
(64, 8, 128, 3, 1, (3, 3, 2, 2)),
(64, 8, 64, 1, 1, (1, 2, 2, 1)),
(64, 17, 192, 1, 1, (1, 2)),
(64, 8, 64, 3, 1, (3, 1)),
(128, 8, 384, 3, 1, (0, 2)),
(64, 35, 64, 3, 1, (1, 2)),
(64, 8, 64, 1, 1, "VALID"),
(388, 8, 64, 3, 1, "VALID"),
(64, 10, 48, 3, 1, "VALID"),
(512, 19, 64, 1, 1, "SAME"),
(64, 5, 32, 2, 1, "SAME"),
(64, 8, 64, 3, 1, "SAME"),
(64, 8, 64, 3, 1, (1, 2, 2, 1)),
(64, 8, 64, 5, 2, (1, 3)),
(64, 8, 64, 3, 1, "VALID"),
(64, 8, 64, 24, 1, "SAME"),
(32, 35, 64, 7, 2, (0, 0, 2, 2)),
)
class TestBatchSize(BaseConv2DTests):
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(64, 56, 64, 3, 1, 1),
)
batch = tvm.testing.parameter(1, 4, 9)
class TestBiasRelu(BaseConv2DTests):
apply_relu = tvm.testing.parameter(True, False, ids=["relu", "no_relu"])
add_bias = tvm.testing.parameter(True, False, ids=["bias", "no_bias"])
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(64, 56, 64, 3, 1, 1),
(64, 8, 64, 3, 1, (1, 2, 2, 1)),
(64, 8, 64, 5, 2, (1, 3)),
(64, 8, 64, 3, 1, "VALID"),
(64, 8, 64, 24, 1, "SAME"),
)
if __name__ == "__main__":
tvm.testing.main()
| 11,837 | 32.346479 | 96 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_loss.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for loss operators."""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
import tvm.testing
prediction_shape, reduction, ignore_index, dtype = tvm.testing.parameters(
((10, 5), "mean", -100, "float32"),
((10, 5, 2, 2), "mean", -100, "float32"),
((10, 5), "sum", -100, "float32"),
((10, 5), "none", -100, "float32"),
((10, 5), "mean", 3, "float32"),
((10, 5), "mean", -100, "float64"),
((5,), "mean", -100, "float32"),
((5,), "mean", 3, "float32"),
((5,), "none", -100, "float32"),
)
def test_nll_loss(target, dev, prediction_shape, reduction, ignore_index, dtype):
if len(prediction_shape) == 1:
C = prediction_shape[0]
target_shape = []
else:
C = prediction_shape[1]
target_shape = prediction_shape[:1] + prediction_shape[2:]
predictions = te.placeholder(shape=prediction_shape, name="predictions", dtype=dtype)
targets = te.placeholder(shape=target_shape, name="targets", dtype="int32")
weights = te.placeholder(shape=(C,), name="weights", dtype=dtype)
nll_loss_result = topi.nn.nll_loss(predictions, targets, weights, reduction, ignore_index)
with tvm.target.Target(target):
fschedule = tvm.topi.testing.get_reduce_schedule(target)
s = fschedule([nll_loss_result])
fn = tvm.build(s, [predictions, targets, weights, nll_loss_result], target, name="nll_loss")
predictions_npy = np.random.uniform(size=prediction_shape).astype(dtype)
targets_npy = np.random.randint(0, C, target_shape).astype("int32")
weights_npy = np.random.uniform(size=(C,)).astype(dtype)
out_npy = tvm.topi.testing.nll_loss(
predictions_npy, targets_npy, weights_npy, reduction, ignore_index
)
predictions_nd = tvm.nd.array(predictions_npy, dev)
targets_nd = tvm.nd.array(targets_npy, dev)
weights_nd = tvm.nd.array(weights_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(nll_loss_result.dtype), dev)
fn(predictions_nd, targets_nd, weights_nd, out_nd)
out_topi = out_nd.numpy()
tvm.testing.assert_allclose(out_topi, out_npy, rtol=1e-4, atol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 3,021 | 38.763158 | 96 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm import topi
from tvm.topi.utils import get_const_tuple
def with_tvm(lam, *args):
"""Take numpy arrays as args, convert them to TVM tensors and call `lam`.
Result of lambda is converted back to numpy array and returned.
"""
dev = tvm.cpu(0)
pls = [] # placeholders
vals_nd = [] # initial values
for i, arg in enumerate(args):
pls.append(te.placeholder(arg.shape, name="pl" + str(i)))
vals_nd.append(tvm.nd.array(arg, dev))
out = lam(*pls)
out_nd = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out.dtype), dev)
s = te.create_schedule([out.op])
m = tvm.build(s, pls + [out], "llvm")
m(*(vals_nd + [out_nd]))
return out_nd.numpy()
def verify_nn_matmul(sa, sb, transp_a, transp_b):
a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32)
b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32)
c1 = np.matmul(np.transpose(a) if transp_a else a, np.transpose(b) if transp_b else b)
c2 = with_tvm(
lambda A, B: topi.nn.matmul(A, B, transpose_a=transp_a, transpose_b=transp_b),
a,
b,
)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
def test_nn_matmul():
verify_nn_matmul((1, 1), (1, 1), False, False)
verify_nn_matmul((1, 1), (1, 1), True, True)
verify_nn_matmul((2, 2), (2, 2), False, False)
verify_nn_matmul((2, 2), (2, 2), True, True)
verify_nn_matmul((2, 3), (3, 5), False, False)
verify_nn_matmul((5, 3), (3, 2), False, False)
verify_nn_matmul((3, 5), (3, 2), True, False)
verify_nn_matmul((3, 5), (2, 3), True, True)
verify_nn_matmul((3, 5), (3, 2), True, False)
verify_nn_matmul((5, 3), (2, 3), False, True)
def verify_matmul(sa, sb, transp_a, transp_b):
a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32)
b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32)
c1 = np.matmul(np.transpose(a) if transp_a else a, np.transpose(b) if transp_b else b)
c2 = with_tvm(lambda A, B: topi.matmul(A, B, transp_a, transp_b), a, b)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
def test_matmul():
verify_matmul((1, 1), (1, 1), False, False)
verify_matmul((1, 1), (1, 1), True, True)
verify_matmul((2, 2), (2, 2), False, False)
verify_matmul((2, 2), (2, 2), True, True)
verify_matmul((2, 3), (3, 5), False, False)
verify_matmul((5, 3), (3, 2), False, False)
verify_matmul((3, 5), (3, 2), True, False)
verify_matmul((3, 5), (2, 3), True, True)
def verify_tensordot(sa, sb, axes):
a = np.random.uniform(low=-1.0, high=1.0, size=sa).astype(np.float32)
b = np.random.uniform(low=-1.0, high=1.0, size=sb).astype(np.float32)
c1 = np.tensordot(a, b, axes)
c2 = with_tvm(lambda A, B: topi.tensordot(A, B, axes), a, b)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
def test_tensordot():
verify_tensordot((3), (3), 0)
verify_tensordot((2, 3), (3, 5), 1)
verify_tensordot((2, 2, 3), (2, 3, 5), 2)
verify_tensordot((2, 2, 3, 4), (2, 3, 4, 5), 3)
verify_tensordot((3, 2, 2), (2, 3, 5), (1, 0))
verify_tensordot((3, 2, 2), (2, 3, 5), ((1, 0), (0, 1)))
verify_tensordot((4, 3, 2, 2), (2, 4, 3, 5), ((1, 2, 0), (2, 0, 1)))
if __name__ == "__main__":
test_nn_matmul()
test_matmul()
test_tensordot()
| 4,226 | 37.427273 | 90 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_dilate.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
import numpy as np
def test_dilate():
target = "llvm"
dev = tvm.cpu(0)
def _test_dilate(input_size, strides, dilation_value=None):
Input = te.placeholder((input_size))
if dilation_value is None:
Output = topi.nn.dilate(Input, strides)
else:
Output = topi.nn.dilate(Input, strides, dilation_value)
schedule = te.create_schedule(Output.op)
input_np = np.random.uniform(size=input_size).astype(Input.dtype)
if dilation_value is None:
output_np = tvm.topi.testing.dilate_python(input_np, strides)
else:
output_np = tvm.topi.testing.dilate_python(input_np, strides, dilation_value)
input_tvm = tvm.nd.array(input_np, device=dev)
output_size = topi.utils.get_const_tuple(Output.shape)
output_tvm = tvm.nd.array(np.zeros(shape=output_size).astype(Output.dtype), device=dev)
f = tvm.build(schedule, [Input, Output], target)
f(input_tvm, output_tvm)
tvm.testing.assert_allclose(output_tvm.numpy(), output_np, rtol=1e-5)
_test_dilate((32,), (2,))
_test_dilate((32, 32), (2, 2))
_test_dilate((1, 3, 32, 32), (1, 1, 1, 1))
_test_dilate((1, 3, 32, 32), (2, 2, 2, 2))
_test_dilate((1, 32, 32, 3, 3), (1, 1, 1, 1, 1))
_test_dilate((1, 32, 32, 3, 3), (2, 2, 2, 2, 2))
_test_dilate((1, 32, 32, 32, 3, 3), (1, 1, 1, 2, 2, 2))
_test_dilate((1, 32, 32, 32, 3, 3), (2, 2, 2, 1, 1, 1))
_test_dilate((1, 32, 32, 32, 3, 3), (2, 2, 2, 1, 1, 1), 1.0)
if __name__ == "__main__":
test_dilate()
| 2,455 | 39.262295 | 95 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_deformable_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
import tvm.testing
_deformable_conv2d_nchw_implement = {
"generic": (topi.nn.deformable_conv2d_nchw, topi.generic.schedule_deformable_conv2d_nchw),
"cuda": (topi.cuda.deformable_conv2d_nchw, topi.cuda.schedule_deformable_conv2d_nchw),
}
_deformable_conv2d_nhwc_implement = {
"generic": (topi.nn.deformable_conv2d_nhwc, topi.generic.schedule_deformable_conv2d_nhwc),
}
def verify_deformable_conv2d_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
deformable_groups=1,
groups=1,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
deformable_groups,
groups,
)
)
A = te.placeholder((batch, in_channel, in_size, in_size), name="A")
out_size = (in_size - (kernel - 1) * dilation - 1 + 2 * padding) // stride + 1
Offset = te.placeholder(
(batch, deformable_groups * kernel * kernel * 2, out_size, out_size), name="offset"
)
W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
offset_shape = get_const_tuple(Offset.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_deformable_conv2d_nchw.verify_deformable_conv2d_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
offset_np = np.random.randn(*offset_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np = tvm.topi.testing.deformable_conv2d_nchw_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
)
return a_np, offset_np, w_np, c_np
a_np, offset_np, w_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _deformable_conv2d_nchw_implement)
with tvm.target.Target(device):
C = fcompute(A, Offset, W, stride, padding, dilation, deformable_groups, groups, dtype)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
offset = tvm.nd.array(offset_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.empty(c_np.shape, dtype=c_np.dtype, device=dev)
func = tvm.build(s, [A, Offset, W, C], device)
func(a, offset, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for device in ["llvm", "cuda"]:
check_device(device)
def verify_deformable_conv2d_nhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
deformable_groups=1,
groups=1,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
deformable_groups,
groups,
)
)
A = te.placeholder((batch, in_size, in_size, in_channel), name="A")
out_size = (in_size - (kernel - 1) * dilation - 1 + 2 * padding) // stride + 1
Offset = te.placeholder(
(batch, out_size, out_size, deformable_groups * kernel * kernel * 2), name="offset"
)
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
bias = te.placeholder((num_filter,), name="bias")
a_shape = get_const_tuple(A.shape)
offset_shape = get_const_tuple(Offset.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_deformable_conv2d_nchw.verify_deformable_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
offset_np = np.random.randn(*offset_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np = tvm.topi.testing.deformable_conv2d_nhwc_python(
a_np, offset_np, w_np, stride, padding, dilation, deformable_groups, groups
)
return a_np, offset_np, w_np, c_np
a_np, offset_np, w_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _deformable_conv2d_nhwc_implement)
with tvm.target.Target(device):
C = fcompute(A, Offset, W, stride, padding, dilation, deformable_groups, groups, dtype)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
offset = tvm.nd.array(offset_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.empty(c_np.shape, dtype=c_np.dtype, device=dev)
func = tvm.build(s, [A, Offset, W, C], device)
func(a, offset, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for device in ["llvm"]:
check_device(device)
@tvm.testing.uses_gpu
def test_deformable_conv2d_nchw():
verify_deformable_conv2d_nchw(1, 16, 7, 16, 1, 1, 0, deformable_groups=4)
verify_deformable_conv2d_nchw(1, 16, 7, 16, 3, 1, 1, dilation=2, deformable_groups=4)
verify_deformable_conv2d_nchw(1, 16, 7, 16, 3, 1, 2, dilation=2)
def test_deformable_conv2d_nhwc():
verify_deformable_conv2d_nhwc(1, 16, 7, 16, 1, 1, 0, deformable_groups=4)
verify_deformable_conv2d_nhwc(1, 16, 7, 16, 3, 1, 1, dilation=2, deformable_groups=4)
verify_deformable_conv2d_nhwc(1, 16, 7, 16, 3, 1, 2, dilation=2)
if __name__ == "__main__":
test_deformable_conv2d_nchw()
test_deformable_conv2d_nhwc()
| 7,405 | 33.446512 | 99 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_searchsorted.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm.topi.testing import searchsorted_ref
from tvm import te, topi
topi_funcs = {"generic": topi.searchsorted, "cuda": topi.cuda.searchsorted}
def get_implementations():
topi_func_generic = topi_funcs["generic"]
topi_func_cuda = topi_funcs["cuda"]
return {
"generic": (
lambda x, y, side, out_dtype: topi_func_generic(x, y, side, out_dtype),
topi.generic.schedule_extern,
),
"cuda": (
lambda x, y, side, out_dtype: topi_func_cuda(x, y, side, out_dtype),
topi.cuda.schedule_extern,
),
"vulkan": (
lambda x, y, side, out_dtype: topi_func_cuda(x, y, side, out_dtype),
topi.cuda.schedule_extern,
),
}
@tvm.testing.parametrize_targets
def test_searchsorted(dev, target):
def verify_with_input(sorted_sequence_np, values_np, right):
sorted_sequence = te.placeholder(sorted_sequence_np.shape, dtype="float32")
values = te.placeholder(values_np.shape, dtype="float32")
out_dtype = "int32"
implementations = get_implementations()
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
with tvm.target.Target(target):
indices = fcompute(sorted_sequence, values, right, out_dtype)
s = fschedule([indices])
func = tvm.build(s, [sorted_sequence, values, indices], target=target)
dev = tvm.device(target, 0)
a = tvm.nd.array(sorted_sequence_np, dev)
b = tvm.nd.array(values_np, dev)
c = tvm.nd.array(np.zeros(values_np.shape, dtype=indices.dtype), dev)
func(a, b, c)
ref = searchsorted_ref(sorted_sequence_np, values_np, right, out_dtype)
np.testing.assert_equal(c.numpy(), ref)
def verify(sequence_len, num_search, outer_axes, right, sorted_sequence_1d=False):
if sorted_sequence_1d:
sorted_sequence_shape = (sequence_len,)
else:
sorted_sequence_shape = outer_axes + (sequence_len,)
values_shape = outer_axes + (num_search,)
verify_with_input(
np.sort(np.random.randn(*sorted_sequence_shape).astype("float32"), axis=-1),
np.random.randn(*values_shape).astype("float32"),
right,
)
verify(1024, 1000, (10, 5, 3), False)
verify(999, 2000, (10, 5, 3), True)
verify(1000, 1000, (), False)
verify(2001, 100, (500,), True)
verify(2001, 100, (500,), False, sorted_sequence_1d=True)
# Check edge cases
for right in [True, False]:
sorted_sequence = np.array([1, 2, 3, 4, 5], dtype="float32")
verify_with_input(sorted_sequence, np.array([6], dtype="float32"), right)
verify_with_input(sorted_sequence, np.array([0], dtype="float32"), right)
| 3,642 | 37.755319 | 88 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_dft.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for discrete Fourier transform."""
import numpy as np
import tvm
import tvm.testing
from tvm import topi
import tvm.topi.testing
inverse = tvm.testing.parameter(False, True)
shape = tvm.testing.parameter((7,), (3, 7), (3, 4, 5))
dtype = tvm.testing.parameter("float16", "float32", "float64")
# pylint: disable=redefined-outer-name, invalid-name
def numpy_reference(inverse, re: np.ndarray, im: np.ndarray):
if inverse:
reference = np.fft.ifft(re + 1j * im)
else:
reference = np.fft.fft(re + 1j * im)
return np.real(reference), np.imag(reference)
def test_dft(target, dev, inverse, shape, dtype):
"""Test for discrete Fourier transform."""
implementations = {
"generic": (
topi.dft,
topi.generic.schedule_extern,
),
"gpu": (
topi.cuda.dft,
topi.cuda.schedule_extern,
),
"nvptx": (
topi.cuda.dft,
topi.cuda.schedule_extern,
),
}
Re = tvm.te.placeholder(shape, dtype=dtype, name="Re")
Im = tvm.te.placeholder(shape, dtype=dtype, name="Im")
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
outs = fcompute(Re, Im, inverse)
s = fschedule(outs)
f = tvm.build(s, [Re, Im, *outs], target)
re_np = np.random.normal(size=shape).astype(dtype)
im_np = np.random.normal(size=shape).astype(dtype)
re = tvm.nd.array(re_np, device=dev)
im = tvm.nd.array(im_np, device=dev)
re_out = tvm.nd.array(np.zeros(shape).astype(dtype), device=dev)
im_out = tvm.nd.array(np.zeros(shape).astype(dtype), device=dev)
f(re, im, re_out, im_out)
re_reference, im_reference = numpy_reference(inverse, re_np, im_np)
atol = rtol = 1e-3
if dtype == "float16":
atol = rtol = 1e-1
tvm.testing.assert_allclose(re_out.numpy(), re_reference, rtol=rtol, atol=atol)
tvm.testing.assert_allclose(im_out.numpy(), im_reference, rtol=rtol, atol=atol)
if __name__ == "__main__":
tvm.testing.main()
| 2,884 | 31.41573 | 83 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv3d_ndhwc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import os
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
_conv3d_ndhwc_implement = {
"generic": (topi.nn.conv3d_ndhwc, topi.generic.schedule_conv3d_ndhwc),
"cpu": (topi.x86.conv3d_ndhwc, topi.x86.schedule_conv3d_ndhwc),
"gpu": (topi.cuda.conv3d_ndhwc, topi.cuda.schedule_conv3d_ndhwc),
}
def verify_conv3d_ndhwc(
target,
dev,
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
groups=1,
):
if isinstance(in_size, tuple):
in_depth, in_height, in_width = in_size
else:
in_depth = in_height = in_width = in_size
if isinstance(kernel, tuple):
kernel_depth, kernel_height, kernel_width = kernel
else:
kernel_depth = kernel_height = kernel_width = kernel
A = te.placeholder((batch, in_depth, in_height, in_width, in_channel), name="A")
W = te.placeholder(
(kernel_depth, kernel_height, kernel_width, in_channel // groups, num_filter), name="W"
)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv3d_ndhwc.verify_ndhwc.v2")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, dilation, 1, 1))
b_np = tvm.topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding, groups)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv3d_ndhwc_implement)
with tvm.target.Target(target):
B = fcompute(A, W, stride, padding, dilation, groups, dtype)
s = fschedule([B])
dev = tvm.device(target, 0)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], target)
print(tvm.lower(s, [A, W, B], target))
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
def test_conv3d_ndhwc(target, dev):
verify_conv3d_ndhwc(target, dev, 1, 16, 32, 16, 3, 1, "SAME")
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 32, 5, 2, "SAME")
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 64, 5, 2, "SAME")
verify_conv3d_ndhwc(target, dev, 1, 64, 32, 64, 3, 1, "VALID")
verify_conv3d_ndhwc(target, dev, 1, 64, 32, 64, 3, 1, "VALID")
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 32, 5, 2, "VALID")
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 64, 5, 2, "VALID")
# dilation = 2
verify_conv3d_ndhwc(target, dev, 1, 64, 32, 64, 3, 1, "SAME", dilation=2)
verify_conv3d_ndhwc(target, dev, 1, 1, (20, 256, 256), 32, (1, 3, 3), (1, 2, 2), "SAME")
verify_conv3d_ndhwc(target, dev, 1, 1, (20, 256, 256), 32, (1, 6, 6), (1, 2, 2), (0, 2, 2))
verify_conv3d_ndhwc(target, dev, 1, 4, (20, 256, 256), 8, (1, 5, 5), (1, 2, 2), (0, 2, 2))
verify_conv3d_ndhwc(target, dev, 1, 16, 32, 16, 3, 1, "SAME", groups=4)
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 32, 5, 2, "SAME", groups=4)
verify_conv3d_ndhwc(target, dev, 4, 32, 16, 64, 5, 2, "SAME", groups=4)
if __name__ == "__main__":
test_conv3d_ndhwc()
| 4,261 | 36.716814 | 95 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_image.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for bilinear scale """
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
def verify_resize2d(
batch,
in_channel,
in_height,
in_width,
out_height,
out_width,
layout="NCHW",
coord_trans="align_corners",
method="linear",
):
if layout == "NCHW":
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype="float32")
dtype = A.dtype
out_shape = (batch, in_channel, out_height, out_width)
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
elif layout == "NHWC":
A = te.placeholder((batch, in_height, in_width, in_channel), name="A", dtype="float32")
dtype = A.dtype
out_shape = (batch, out_height, out_width, in_channel)
a_np = np.random.uniform(size=(batch, in_height, in_width, in_channel)).astype(dtype)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.image.resize2d(
A,
[0.0] * 4,
(out_height, out_width),
layout=layout,
coordinate_transformation_mode=coord_trans,
method=method,
)
scale_h = out_height / in_height
scale_w = out_width / in_width
b_np = tvm.topi.testing.resize2d_python(a_np, (scale_h, scale_w), layout, method, coord_trans)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_resize2d():
# Scale NCHW
verify_resize2d(4, 16, 32, 32, 50, 50, "NCHW")
# Scale NCHW + Align Corners
verify_resize2d(6, 32, 64, 64, 20, 20, "NCHW")
# Scale NHWC
verify_resize2d(4, 16, 32, 32, 50, 50, "NHWC")
# Scale NHWC + Align Corners
verify_resize2d(6, 32, 64, 64, 20, 20, "NHWC")
for layout in ["NCHW", "NHWC"]:
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 64, 50, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 96, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 96, 96, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "align_corners", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "half_pixel", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "asymmetric", method="linear")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "half_pixel", method="linear")
def verify_resize3d(
batch,
in_channel,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
layout="NCDHW",
coordinate_transformation_mode="asymmetric",
method="linear",
):
if layout == "NCDHW":
A = te.placeholder(
(batch, in_channel, in_depth, in_height, in_width), name="A", dtype="float32"
)
dtype = A.dtype
out_shape = (batch, in_channel, out_depth, out_height, out_width)
a_np = np.random.uniform(size=(batch, in_channel, in_depth, in_height, in_width)).astype(
dtype
)
elif layout == "NDHWC":
A = te.placeholder(
(batch, in_depth, in_height, in_width, in_channel), name="A", dtype="float32"
)
dtype = A.dtype
out_shape = (batch, out_depth, out_height, out_width, in_channel)
a_np = np.random.uniform(size=(batch, in_depth, in_height, in_width, in_channel)).astype(
dtype
)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.image.resize3d(
A,
[0.0] * 6,
(out_depth, out_height, out_width),
layout=layout,
coordinate_transformation_mode=coordinate_transformation_mode,
method=method,
)
scale_d = out_depth / in_depth
scale_h = out_height / in_height
scale_w = out_width / in_width
b_np = tvm.topi.testing.resize3d_python(
a_np, (scale_d, scale_h, scale_w), layout, method, coordinate_transformation_mode
)
def check_target(target, dev):
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_resize3d():
# Trilinear
for method in ["nearest_neighbor", "linear"]:
for coord_trans in ["asymmetric", "align_corners", "half_pixel"]:
for layout in ["NCDHW", "NDHWC"]:
verify_resize3d(3, 16, 32, 32, 32, 10, 10, 10, layout, coord_trans, method)
@tvm.testing.uses_gpu
def test_crop_and_resize():
def verify_crop_and_resize(
image_shape,
np_boxes,
np_box_indices,
np_crop_size,
layout="NHWC",
method="bilinear",
extrapolation_value=0.0,
):
images = te.placeholder(image_shape, name="images", dtype="float32")
np_images = np.random.uniform(size=image_shape).astype("float32")
boxes = te.placeholder(np_boxes.shape, name="boxes", dtype="float32")
box_ind = te.placeholder(np_box_indices.shape, name="box_ind", dtype="int32")
batch = len(np_box_indices)
target_height, target_width = np_crop_size[0], np_crop_size[1]
if layout == "NHWC":
channel = image_shape[3]
out_shape = (batch, target_height, target_width, channel)
elif layout == "NCHW":
channel = image_shape[1]
out_shape = (batch, channel, target_height, target_width)
else:
raise NotImplementedError("Layout {} is not supported.".format(layout))
out = topi.image.crop_and_resize(
images,
boxes,
box_ind,
np_crop_size,
layout=layout,
method=method,
extrapolation_value=extrapolation_value,
)
baseline_np = tvm.topi.testing.crop_and_resize_python(
np_images, np_boxes, np_box_indices, np_crop_size, layout, method, extrapolation_value
)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
tvm_images = tvm.nd.array(np_images, dev)
tvm_boxes = tvm.nd.array(np_boxes, dev)
tvm_indices = tvm.nd.array(np_box_indices, dev)
tvm_out = tvm.nd.array(np.zeros(out_shape, dtype="float32"), dev)
f = tvm.build(s, [images, boxes, box_ind, out], target, name="crop_and_resize")
f(tvm_images, tvm_boxes, tvm_indices, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), baseline_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
boxes_1 = np.array([[0.2, 0.3, 0.7, 0.9]], dtype="float32")
boxes_2 = np.array([[0.2, 0.3, 0.7, 0.9], [0, 0.1, 0.8, 1]], dtype="float32")
indices_1 = np.array([0], dtype="int32")
indices_2 = np.array([1, 0], dtype="int32")
size_1 = (7, 11)
size_2 = (90, 60)
verify_crop_and_resize((1, 255, 255, 3), boxes_1, indices_1, size_1, layout="NHWC")
verify_crop_and_resize(
(10, 224, 224, 5), boxes_2, indices_2, size_2, extrapolation_value=0.3, layout="NHWC"
)
verify_crop_and_resize((1, 100, 100, 3), boxes_1, indices_1, size_1, method="nearest_neighbor")
verify_crop_and_resize((1, 3, 224, 224), boxes_1, indices_1, size_1, layout="NCHW")
@tvm.testing.uses_gpu
def test_affine_grid():
def verify_affine_grid(num_batch, target_shape):
dtype = "float32"
data_shape = (num_batch, 2, 3)
data = te.placeholder(data_shape, dtype=dtype)
out = topi.image.affine_grid(data, target_shape)
@memoize("topi.tests.test_affine_grid.verify_affine_grid")
def get_ref_data():
data_np = np.random.uniform(size=data_shape).astype(dtype)
out_np = tvm.topi.testing.affine_grid_python(data_np, target_shape)
return data_np, out_np
data_np, out_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
tvm_data = tvm.nd.array(data_np, dev)
tvm_out = tvm.nd.empty(out_np.shape, dtype, dev)
f = tvm.build(s, [data, out], target)
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), out_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
verify_affine_grid(1, (16, 32))
verify_affine_grid(4, (16, 32))
@tvm.testing.uses_gpu
def test_grid_sample():
def verify_grid_sample(
data_shape,
grid_shape,
method="bilinear",
layout="NCHW",
padding_mode="zeros",
align_corners=True,
):
dtype = "float32"
data = te.placeholder(data_shape, dtype=dtype)
grid = te.placeholder(grid_shape, dtype=dtype)
out = topi.image.grid_sample(data, grid, method, layout, padding_mode, align_corners)
@memoize("topi.tests.test_grid_sample.verify_grid_sample")
def get_ref_data():
data_np = np.random.uniform(size=data_shape).astype(dtype)
# allow grid values to be out-of-bound
grid_np = np.random.uniform(size=grid_shape, low=-1.5, high=1.5).astype(dtype)
out_np = tvm.topi.testing.grid_sample_python(
data_np, grid_np, method, layout, padding_mode, align_corners
)
return data_np, grid_np, out_np
data_np, grid_np, out_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
tvm_data = tvm.nd.array(data_np, dev)
tvm_grid = tvm.nd.array(grid_np, dev)
tvm_out = tvm.nd.empty(out_np.shape, dtype, dev)
f = tvm.build(s, [data, grid, out], target)
f(tvm_data, tvm_grid, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), out_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
methods = ["nearest", "bilinear", "bicubic"]
padding_modes = ["zeros", "border", "reflection"]
align_corners = [True, False]
data_2D_shape = (4, 4, 8, 8)
grid_2D_shape = (4, 2, 16, 16)
layout_2D = "NCHW"
# choosing smaller sizes to be testable on weaker GPUs
data_3D_shape = (4, 4, 4, 4, 4)
grid_3D_shape = (4, 3, 8, 8, 8)
layout_3D = "NCDHW"
for _method in methods:
for _padding in padding_modes:
for _align in align_corners:
verify_grid_sample(
data_2D_shape, grid_2D_shape, _method, layout_2D, _padding, _align
)
# 3D "bicubic"(tricubic) is not supported in pytorch
if _method != "bicubic":
verify_grid_sample(
data_3D_shape, grid_3D_shape, _method, layout_3D, _padding, _align
)
if __name__ == "__main__":
test_resize2d()
test_resize3d()
test_crop_and_resize()
test_affine_grid()
test_grid_sample()
| 13,145 | 36.452991 | 99 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv2d_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Example code to do convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.conv2d import _get_workload
from tvm.topi.generic.conv2d import fallback_schedule_cpu_common_int8
from tvm.testing.aot import get_dtype_range
from common import Int8Fallback
import tvm.testing
import pytest
import platform
devices = [
(
"llvm",
topi.arm_cpu.compute_conv2d_NHWC_quantized_interleaved,
topi.arm_cpu.schedule_conv2d_NHWC_quantized_interleaved,
),
(
"llvm --device arm_cpu --mtriple aarch64-linux-gnu",
topi.arm_cpu.compute_conv2d_NHWC_quantized_interleaved,
topi.arm_cpu.schedule_conv2d_NHWC_quantized_interleaved,
),
(
"llvm --device arm_cpu --mtriple aarch64-linux-gnu -mattr=+v8.2a,+dotprod",
topi.arm_cpu.compute_conv2d_NHWC_quantized_interleaved,
topi.arm_cpu.schedule_conv2d_NHWC_quantized_interleaved,
),
(
"llvm --device arm_cpu --mtriple aarch64-linux-gnu -mattr=+v8.2a,+dotprod",
topi.arm_cpu.compute_conv2d_NHWC_quantized_native,
topi.arm_cpu.schedule_conv2d_NHWC_quantized_native,
),
# TODO(giuseros) We need LLVM-11 in order to compile with +i8mm extension
# (
# "llvm --device arm_cpu --mtriple aarch64-linux-gnu -mattr=+v8.2a,+i8mm",
# topi.arm_cpu.compute_conv2d_NHWC_quantized_interleaved,
# topi.arm_cpu.schedule_conv2d_NHWC_quantized_interleaved,
# ),
]
@tvm.testing.requires_llvm
@pytest.mark.parametrize("device", devices)
@pytest.mark.parametrize(
"params",
[
# Subset of inception v3 expanded (dilation > 1, batch > 1, 'VALID' padding)
(1, 3, 299, 32, 3, 2, "SAME", 1, False, False),
(1, 32, 149, 32, 3, 1, "SAME", 2, False, False),
(4, 32, 147, 64, 3, 1, "SAME", 1, False, False),
(1, 64, 73, 80, 1, 1, "SAME", 1, False, False),
(1, 80, 73, 192, 3, 1, "SAME", 1, False, False),
(1, 192, 35, 48, 1, 1, "SAME", 1, False, False),
(1, 192, 35, 64, 1, 1, "VALID", 1, False, False),
(1, 192, 35, 32, 1, 1, "SAME", 1, False, False),
(1, 48, 35, 64, 5, 1, "SAME", 1, False, False),
(1, 96, 35, 96, 3, 1, "SAME", 1, False, False),
(1, 256, 35, 48, 1, 1, "SAME", 1, False, False),
(1, 256, 35, 64, 1, 1, "SAME", 1, False, False),
(1, 288, 35, 64, 1, 1, "SAME", 1, False, False),
(1, 288, 35, 48, 1, 1, "SAME", 1, False, False),
(1, 96, 35, 96, 3, 2, "SAME", 1, False, False),
(1, 128, 17, 192, 7, 1, "SAME", 2, False, False),
(1, 160, 17, 160, 7, 1, "SAME", 1, False, False),
(1, 160, 17, 192, 1, 1, "VALID", 1, False, False),
(1, 192, 17, 192, 1, 1, "SAME", 1, False, False),
(1, 768, 5, 128, 1, 1, "SAME", 1, False, False),
(1, 192, 17, 320, 3, 2, "SAME", 1, False, False),
(1, 192, 17, 192, 3, 2, "SAME", 1, False, False),
(1, 1280, 8, 192, 1, 1, "SAME", 1, False, False),
(1, 1280, 8, 384, 1, 1, "SAME", 1, False, False),
(1, 1280, 8, 320, 1, 1, "SAME", 1, False, False),
(1, 1280, 8, 448, 1, 1, "SAME", 1, False, False),
(1, 384, 8, 384, 1, 1, "SAME", 1, False, False),
(1, 384, 8, 384, 3, 1, "SAME", 1, False, False),
(1, 448, 8, 384, 3, 1, "VALID", 1, False, False),
(1, 2048, 8, 320, 1, 1, "SAME", 1, False, False),
(1, 2048, 8, 448, 1, 1, "SAME", 1, True, True),
(1, 2048, 8, 192, 1, 1, "SAME", 1, True, False),
# A trouble case for native schedule
(1, 8, 1, 24, 1, 1, "SAME", 1, False, False),
],
)
def test_conv2d_NHWC_gemm_int8(params, device):
with Int8Fallback():
target, compute, schedule = device
(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
add_bias,
add_relu,
) = params
dtype = "int8"
# TODO(ekalda): These combinations hang during compilation
failing_cases = [
(devices[1], (1, 128, 17, 192, 7, 1, "SAME", 2, False, False)),
(devices[1], (1, 160, 17, 160, 7, 1, "SAME", 1, False, False)),
(
devices[1],
(1, 448, 8, 384, 3, 1, "VALID", 1, False, False),
), # this one passes but is just incredibly slow
]
if (device, params) in failing_cases:
pytest.skip("Skipping because this test will hang")
print("Compiling for target: %s" % target)
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
a_shape = (batch, in_height, in_width, in_channel)
w_shape = (kernel, kernel, in_channel, num_filter)
bias_shape = (num_filter,)
@memoize("topi.tests.test_topi_conv2d_int8.test_conv2d_NHWC_gemm_int8")
def get_ref_data():
input_min, input_max = get_dtype_range(dtype)
a_np = np.random.randint(low=input_min, high=input_max, size=a_shape).astype(dtype)
w_np = np.random.randint(low=input_min, high=input_max, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding).astype(dtype)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
with tvm.target.Target(target) as tvm_target:
A = te.placeholder(a_shape, name="A", dtype=dtype)
W = te.placeholder(w_shape, name="W", dtype=dtype)
bias = te.placeholder(bias_shape, name="bias", dtype=dtype)
C = compute(A, W, (stride, stride), padding, (dilation, dilation), dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = schedule([C])
build_args = [A, W, bias, C] if add_bias else [A, W, C]
func = tvm.build(
s,
build_args,
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding_sum,
dilation,
),
)
build_only = tvm_target.features.is_aarch64 and (platform.machine() != "aarch64")
if build_only:
return
print("Running on target: %s" % target)
dev = tvm.device(target, 0)
a_np, w_np, b_np, c_np = get_ref_data()
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
run_args = [a, w, b, c] if add_bias else [a, w, c]
func(*run_args)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
@pytest.mark.parametrize("in_dtype", ["int8", "uint8"])
@pytest.mark.parametrize(
"params",
[
# ResNet18 workloads where channels in / out are multiple of oc_block_factor
(1, 64, 56, 64, 3, 1, 1, 1, False, False),
(1, 64, 56, 64, 1, 1, 0, 1, False, False),
(1, 64, 56, 128, 3, 2, 1, 1, False, False),
(1, 64, 56, 128, 1, 2, 0, 1, False, False),
(1, 128, 28, 128, 3, 1, 1, 1, False, False),
(1, 128, 28, 256, 3, 2, 1, 1, False, False),
(1, 128, 28, 256, 1, 2, 0, 1, False, False),
(1, 256, 14, 256, 3, 1, 1, 1, False, False),
(1, 256, 14, 512, 3, 2, 1, 1, False, False),
(1, 256, 14, 512, 1, 2, 0, 1, False, False),
(1, 512, 7, 512, 3, 1, 1, 1, False, False),
# bias, relu
(1, 64, 56, 64, 3, 1, 1, 1, False, True),
(1, 64, 56, 64, 3, 1, 1, 1, True, False),
(1, 64, 56, 64, 3, 1, 1, 1, True, True),
# dilation = 2
(1, 64, 56, 64, 3, 1, 1, 2, False, False),
# batch size
(4, 64, 56, 64, 3, 1, 1, 1, False, False),
(9, 64, 56, 64, 3, 1, 1, 1, False, False),
# weird workloads
(4, 4, 4, 8, 4, 4, 4, 1, False, False),
# inception v3 workloads where channels in / out are multiple of oc_block_factor
(1, 32, 149, 32, 3, 1, 0, 1, False, False),
(1, 32, 147, 64, 3, 1, 1, 1, False, False),
(1, 64, 73, 80, 1, 1, 0, 1, False, False),
(1, 80, 73, 192, 3, 1, 0, 1, False, False),
(1, 192, 35, 64, 1, 1, 0, 1, False, False),
(1, 192, 35, 48, 1, 1, 0, 1, False, False),
(1, 48, 35, 64, 5, 1, 2, 1, False, False),
(1, 64, 35, 96, 3, 1, 1, 1, False, False),
(1, 96, 35, 96, 3, 1, 1, 1, False, False),
(1, 192, 35, 32, 1, 1, 0, 1, False, False),
(1, 256, 35, 64, 1, 1, 0, 1, False, False),
(1, 256, 35, 48, 1, 1, 0, 1, False, False),
(1, 288, 35, 64, 1, 1, 0, 1, False, False),
(1, 288, 35, 48, 1, 1, 0, 1, False, False),
(1, 288, 35, 384, 3, 2, 0, 1, False, False),
(1, 96, 35, 96, 3, 2, 0, 1, False, False),
(1, 768, 17, 192, 1, 1, 0, 1, False, False),
(1, 768, 17, 128, 1, 1, 0, 1, False, False),
(1, 128, 17, 128, 1, 1, 0, 1, False, False),
(1, 128, 17, 192, 7, 1, 3, 1, False, False),
(1, 128, 17, 128, 7, 1, 3, 1, False, False),
(1, 128, 17, 192, 1, 1, 0, 1, False, False),
(1, 768, 17, 160, 1, 1, 0, 1, False, False),
(1, 160, 17, 160, 1, 1, 0, 1, False, False),
(1, 160, 17, 192, 7, 1, 3, 1, False, False),
(1, 160, 17, 160, 7, 1, 3, 1, False, False),
(1, 160, 17, 192, 1, 1, 0, 1, False, False),
(1, 192, 17, 192, 1, 1, 0, 1, False, False),
(1, 192, 17, 192, 7, 1, 3, 1, False, False),
(1, 192, 17, 320, 3, 2, 0, 1, False, False),
(1, 192, 17, 192, 3, 2, 0, 1, False, False),
(1, 1280, 8, 320, 1, 1, 0, 1, False, False),
(1, 1280, 8, 384, 1, 1, 0, 1, False, False),
(1, 384, 8, 384, 1, 1, 0, 1, False, False),
(1, 384, 8, 384, 3, 1, 1, 1, False, False),
(1, 1280, 8, 448, 1, 1, 0, 1, False, False),
(1, 448, 8, 384, 3, 1, 1, 1, False, False),
(1, 1280, 8, 192, 1, 1, 0, 1, False, False),
(1, 2048, 8, 320, 1, 1, 0, 1, False, False),
(1, 2048, 8, 384, 1, 1, 0, 1, False, False),
(1, 2048, 8, 448, 1, 1, 0, 1, False, False),
(1, 2048, 8, 192, 1, 1, 0, 1, False, False),
(1, 1024, 19, 88, 3, 1, 1, 1, False, False),
# batch > 1
(7, 32, 149, 32, 3, 1, 0, 1, False, False),
(8, 32, 149, 32, 3, 1, 0, 1, False, False),
(32, 32, 149, 32, 3, 1, 0, 1, False, False),
# Asymmetric padding
(1, 32, 35, 64, 7, 2, (0, 0, 1, 1), 1, False, False),
(1, 64, 8, 128, 3, 1, (3, 3, 2, 2), 1, False, False),
(1, 64, 8, 64, 1, 1, (1, 2, 2, 1), 1, False, False),
(1, 64, 17, 192, 1, 1, (1, 2), 1, False, False),
(1, 64, 8, 64, 3, 1, (3, 1), 1, False, False),
(1, 128, 8, 384, 3, 1, (0, 2), 1, False, False),
(1, 64, 8, 64, 1, 1, "VALID", 1, False, False),
(1, 392, 8, 64, 3, 1, "VALID", 1, False, False),
(1, 512, 19, 64, 1, 1, "SAME", 1, False, False),
(1, 64, 16, 32, 2, 1, "SAME", 1, False, False),
(1, 64, 8, 64, 3, 1, (1, 2, 2, 1), 1, False, True),
(1, 64, 8, 64, 5, 2, (1, 3), 1, True, False),
(1, 64, 56, 64, 3, 1, "VALID", 1, True, True),
(1, 64, 56, 64, 24, 1, "SAME", 1, True, True),
],
)
def test_conv2d_NCHWc_int8(in_dtype, params):
with Int8Fallback():
(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
add_bias,
add_relu,
) = params
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype=in_dtype)
W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W", dtype=in_dtype)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
out_dtype = "int32" if in_dtype == "int8" else "uint32"
input_min, input_max = get_dtype_range(in_dtype)
def check_target(target, compute, schedule, oc_block_factor, build_only):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
pytest.skip(reason="Skip because %s is not enabled" % target)
if target == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
pytest.skip(reason="Skip because %s is not enabled" % target)
bias = te.placeholder(
(num_filter // oc_block_factor, 1, 1, oc_block_factor), name="bias", dtype=out_dtype
)
bias_shape = get_const_tuple(bias.shape)
@memoize("topi.tests.test_topi_conv2d_int8.test_conv2d_NCHWc_int8")
def get_ref_data():
a_np = np.random.randint(low=input_min, high=input_max, size=a_shape).astype(
out_dtype
)
w_np = np.random.randint(low=input_min, high=input_max, size=w_shape).astype(
out_dtype
)
b_np = np.random.uniform(size=bias_shape).astype(out_dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(
out_dtype
)
# convert to NCHWc
_, _, out_height, out_width = c_np.shape
c_np = c_np.reshape(
(batch, num_filter // oc_block_factor, oc_block_factor, out_height, out_width)
).transpose(0, 1, 3, 4, 2)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(out_dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
with tvm.target.Target(target):
C = compute(
A,
W,
(stride, stride),
padding,
(dilation, dilation),
"NCHW",
"NCHW",
out_dtype,
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = schedule([C])
compile_args = [A, W, bias, C] if add_bias else [A, W, C]
func = tvm.build(
s,
compile_args,
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
if build_only:
return
a_np, w_np, b_np, c_np = get_ref_data()
a = tvm.nd.array(a_np.astype(dtype), dev)
w = tvm.nd.array(w_np.astype(dtype), dev)
b = tvm.nd.array(b_np.astype(out_dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
run_args = [a, w, b, c] if add_bias else [a, w, c]
print("Running on target: %s" % target)
func(*run_args)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
targets = [
(
"cuda",
lambda a, w, s, p, d, l, ol, o: topi.cuda.conv2d_NCHWc_int8(a, w, s, p, d, l, o),
topi.cuda.schedule_conv2d_NCHWc_int8,
4,
False,
),
# Disable on CI since it does not support spirv int8 dot product
# (
# "vulkan -from_device=0",
# lambda a, w, s, p, d, l, ol, o: topi.cuda.conv2d_NCHWc_int8(a, w, s, p, d, l, o),
# topi.cuda.schedule_conv2d_NCHWc_int8,
# 4,
# False,
# ),
]
build_only_aarch64 = platform.machine() != "aarch64"
targets.append(
(
"llvm -device arm_cpu -mtriple aarch64-linux-gnu -mattr=+neon,+v8.2a,+dotprod",
topi.arm_cpu.conv2d_NCHWc_int8,
topi.arm_cpu.schedule_conv2d_NCHWc_int8,
8,
build_only_aarch64,
)
)
if in_dtype == "int8":
targets += [
(
"llvm -device arm_cpu -mtriple aarch64-linux-gnu -mattr=+neon",
topi.arm_cpu.conv2d_NCHWc_int8,
topi.arm_cpu.schedule_conv2d_NCHWc_int8,
8,
build_only_aarch64,
),
(
"rocm -mattr=+dotprod",
lambda a, w, s, p, d, l, ol, o: topi.cuda.conv2d_NCHWc_int8(
a, w, s, p, d, l, o
),
topi.cuda.schedule_conv2d_NCHWc_int8,
4,
False,
),
]
for target, compute, schedule, oc_block_factor, build_only in targets:
check_target(target, compute, schedule, oc_block_factor, build_only)
# Conv2d NCHW int8 schedule testing. Internally, it uses NCHWc schedule. So, just
# performing basic testing - one test for all different scenarios - batch, dilation etc..
@pytest.mark.parametrize("in_dtype", ["int8", "uint8"])
@pytest.mark.parametrize(
"params",
[
(1, 64, 56, 64, 3, 1, 1, 1, False, False),
(1, 64, 56, 64, 3, 1, 1, 1, False, True),
(1, 64, 56, 64, 3, 1, 1, 2, False, False),
(9, 64, 56, 64, 3, 1, 1, 1, False, False),
(4, 4, 4, 4, 4, 4, 4, 1, False, False),
(1, 32, 149, 32, 3, 1, 0, 1, False, False),
(7, 32, 149, 32, 3, 1, 0, 1, False, False),
(1, 32, 35, 64, 7, 2, (0, 0, 1, 1), 1, False, False),
(1, 32, 35, 64, 7, 2, (0, 0, 2, 2), 1, False, False),
],
)
def test_conv2d_nchw_int8(in_dtype, params):
with Int8Fallback():
(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
add_bias,
add_relu,
) = params
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype=in_dtype)
W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W", dtype=in_dtype)
bias = te.placeholder((num_filter, 1, 1), name="bias", dtype=in_dtype)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_int8.test_conv2d_nchw_int8")
def get_ref_data():
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding).astype(dtype)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def verify_workload_padding():
_, _, _, out_width = get_const_tuple(c_np.shape)
wkl = _get_workload(A, W, (stride, stride), padding, dilation, dtype)
# for testing functionality,
# we choose arbitrary int32_lanes and num_int8_elements can divide the channel,
# regardless of the performance.
int32_lanes, num_int8_elements = num_filter, in_channel
# check if tile_ow candidates are the factors of the right output weight.
cfg = autotvm.get_config()
fallback_schedule_cpu_common_int8(cfg, wkl, int32_lanes, num_int8_elements)
ow_tile = np.prod(cfg["tile_ow"].size)
tvm.testing.assert_allclose(ow_tile, out_width)
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
pytest.skip("Skip because %s is not enabled" % target)
if target == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
pytest.skip("Skip because int8 intrinsics are not available")
print("Running on target: %s" % target)
with tvm.target.Target(target):
C = topi.cuda.conv2d_nchw_int8(
A, W, (stride, stride), padding, (dilation, dilation), dtype
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.cuda.schedule_conv2d_nchw_int8([C])
build_args = [A, W, bias, C] if add_bias else [A, W, C]
func = tvm.build(
s,
build_args,
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding_sum,
dilation,
),
)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
run_args = [a, w, b, c] if add_bias else [a, w, c]
func(*run_args)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
verify_workload_padding()
check_target("cuda")
if __name__ == "__main__":
tvm.testing.main()
| 24,715 | 38.5456 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_reorg.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do reorg."""
import numpy as np
from tvm import topi
from tvm.topi.utils import get_const_tuple
import tvm
from tvm import te
import tvm.topi.testing
import tvm.testing
_reorg_schedule = {
"generic": topi.generic.schedule_reorg,
"gpu": topi.cuda.schedule_reorg,
}
def verify_reorg(batch, in_size, in_channel, stride):
"""Verify reorg operator by comparing outputs from tvm and numpy implementation"""
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
B = topi.vision.reorg(A, stride)
a_shape = get_const_tuple(A.shape)
dtype = A.dtype
def get_ref_data_reorg():
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_np = tvm.topi.testing.reorg_python(a_np, stride)
return a_np, b_np
a_np, b_np = get_ref_data_reorg()
def check_device(device):
"""Cheching devices is enabled or not"""
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
s_func = tvm.topi.testing.dispatch(device, _reorg_schedule)
s = s_func([B])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, B], device)
func(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
for device in ["llvm", "cuda"]:
check_device(device)
@tvm.testing.uses_gpu
def test_reorg():
verify_reorg(1, 20, 8, 2)
if __name__ == "__main__":
test_reorg()
| 2,507 | 32 | 86 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv2d_NCHWc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test for NCHW[x]c convolution"""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
def _transform_data(data, bn):
# NCHW -> NCHW[x]c
batch_size, channel, height, width = data.shape
data = np.reshape(data, (batch_size, channel // bn, bn, height, width))
data = np.transpose(data, (0, 1, 3, 4, 2))
return data
def _transform_kernel(kernel, ic_bn, oc_bn):
# OIHW -> OIHW[x]i[x]o
out_channel, in_channel, kh, kw = kernel.shape
kernel = np.reshape(kernel, (out_channel // oc_bn, oc_bn, in_channel // ic_bn, ic_bn, kh, kw))
kernel = np.transpose(kernel, (0, 2, 4, 5, 3, 1))
return kernel
def _transform_bias(bias, bn):
# [num_filter, 1, 1] -> [num_filter//bn, 1, 1, bn]
num_filter, h, w = bias.shape
bias = np.reshape(bias, (num_filter // bn, bn, h, w))
bias = np.transpose(bias, (0, 2, 3, 1))
return bias
def verify_conv2d_NCHWc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
groups=1,
dtype="float32",
):
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
in_height = in_width = in_size
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum)
)
# for testing functionality,
# we choose arbitrary block size that can divide the channel,
# regardless of the performance.
oc_block = 1
for bn in range(16, 0, -1):
if num_filter % bn == 0:
oc_block = bn
break
ic_block = 1
for bn in range(oc_block, 0, -1):
if in_channel % bn == 0:
ic_block = bn
break
A = te.placeholder((batch, in_channel // ic_block, in_height, in_width, ic_block), name="A")
W = te.placeholder(
(
num_filter // oc_block,
in_channel // ic_block // groups,
kernel,
kernel,
ic_block,
oc_block,
),
name="W",
)
bias = te.placeholder((num_filter // oc_block, 1, 1, oc_block), name="bias")
@memoize("topi.tests.test_topi_conv2d_NCHWc.verify_conv2d_NCHWc")
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
w_np = np.random.uniform(size=(num_filter, in_channel // groups, kernel, kernel)).astype(
dtype
)
b_np = np.random.uniform(size=(num_filter, 1, 1)).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups)
if add_bias:
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return (
_transform_data(a_np, ic_block),
_transform_kernel(w_np, ic_block, oc_block),
_transform_bias(b_np, oc_block),
_transform_data(c_np, oc_block),
)
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
C = topi.x86.conv2d_NCHWc(
A,
W,
(stride, stride),
padding,
(dilation, dilation),
"NCHW%dc" % ic_block,
"NCHW%dc" % oc_block,
dtype,
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.x86.schedule_conv2d_NCHWc([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3)
# test llvm only for now since conv2d_NCHWc implement is missing in other backend.
for device in ["llvm"]:
with autotvm.tophub.context(device): # load tophub pre-tuned parameters
check_device(device)
def test_conv2d_NCHWc():
# ResNet18 workloads
verify_conv2d_NCHWc(1, 3, 224, 64, 7, 2, 3)
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1)
verify_conv2d_NCHWc(1, 64, 56, 64, 1, 1, 0)
verify_conv2d_NCHWc(1, 64, 56, 128, 3, 2, 1)
verify_conv2d_NCHWc(1, 64, 56, 128, 1, 2, 0)
verify_conv2d_NCHWc(1, 128, 28, 128, 3, 1, 1)
verify_conv2d_NCHWc(1, 128, 28, 256, 3, 2, 1)
verify_conv2d_NCHWc(1, 128, 28, 256, 1, 2, 0)
verify_conv2d_NCHWc(1, 256, 14, 256, 3, 1, 1)
verify_conv2d_NCHWc(1, 256, 14, 512, 3, 2, 1)
verify_conv2d_NCHWc(1, 256, 14, 512, 1, 2, 0)
verify_conv2d_NCHWc(1, 512, 7, 512, 3, 1, 1)
# bias, relu
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1, add_relu=True)
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1, add_bias=True)
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1, add_bias=True, add_relu=True)
# dilation
verify_conv2d_NCHWc(1, 64, 56, 64, 3, 1, 1, dilation=2)
# batch size
verify_conv2d_NCHWc(4, 64, 56, 64, 3, 1, 1)
verify_conv2d_NCHWc(9, 64, 56, 64, 3, 1, 1)
# groups
verify_conv2d_NCHWc(1, 2048, 10, 2048, 3, 1, 1, groups=128)
# weird workloads
verify_conv2d_NCHWc(2, 2, 2, 2, 2, 2, 2)
verify_conv2d_NCHWc(3, 3, 3, 3, 3, 3, 3)
verify_conv2d_NCHWc(4, 4, 4, 4, 4, 4, 4)
verify_conv2d_NCHWc(5, 5, 5, 5, 5, 5, 5)
verify_conv2d_NCHWc(6, 6, 6, 6, 6, 6, 6)
# disable these tests due to some bugs of llvm with nvptx
# verify_conv2d_NCHWc(1, 1, 1, 1, 1, 1, 1, dilation=1)
# verify_conv2d_NCHWc(1, 1, 1, 1, 1, 1, 1, dilation=2)
# verify_conv2d_NCHWc(2, 13, 71, 59, 3, 1, 1)
# inception v3 workloads
verify_conv2d_NCHWc(1, 3, 299, 32, 3, 2, 0)
verify_conv2d_NCHWc(1, 32, 149, 32, 3, 1, 0)
verify_conv2d_NCHWc(1, 32, 147, 64, 3, 1, 1)
verify_conv2d_NCHWc(1, 64, 73, 80, 1, 1, 0)
verify_conv2d_NCHWc(1, 80, 73, 192, 3, 1, 0)
verify_conv2d_NCHWc(1, 192, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc(1, 192, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc(1, 48, 35, 64, 5, 1, 2)
verify_conv2d_NCHWc(1, 64, 35, 96, 3, 1, 1)
verify_conv2d_NCHWc(1, 96, 35, 96, 3, 1, 1)
verify_conv2d_NCHWc(1, 192, 35, 32, 1, 1, 0)
verify_conv2d_NCHWc(1, 256, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc(1, 256, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc(1, 288, 35, 64, 1, 1, 0)
verify_conv2d_NCHWc(1, 288, 35, 48, 1, 1, 0)
verify_conv2d_NCHWc(1, 288, 35, 384, 3, 2, 0)
verify_conv2d_NCHWc(1, 96, 35, 96, 3, 2, 0)
verify_conv2d_NCHWc(1, 768, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 768, 17, 128, 1, 1, 0)
verify_conv2d_NCHWc(1, 128, 17, 128, 1, 1, 0)
verify_conv2d_NCHWc(1, 128, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc(1, 128, 17, 128, 7, 1, 3)
verify_conv2d_NCHWc(1, 128, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 768, 17, 160, 1, 1, 0)
verify_conv2d_NCHWc(1, 160, 17, 160, 1, 1, 0)
verify_conv2d_NCHWc(1, 160, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc(1, 160, 17, 160, 7, 1, 3)
verify_conv2d_NCHWc(1, 160, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 192, 17, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 192, 17, 192, 7, 1, 3)
verify_conv2d_NCHWc(1, 192, 17, 320, 3, 2, 0)
verify_conv2d_NCHWc(1, 192, 17, 192, 3, 2, 0)
verify_conv2d_NCHWc(1, 1280, 8, 320, 1, 1, 0)
verify_conv2d_NCHWc(1, 1280, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc(1, 384, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc(1, 384, 8, 384, 3, 1, 1)
verify_conv2d_NCHWc(1, 1280, 8, 448, 1, 1, 0)
verify_conv2d_NCHWc(1, 448, 8, 384, 3, 1, 1)
verify_conv2d_NCHWc(1, 1280, 8, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 2048, 8, 320, 1, 1, 0)
verify_conv2d_NCHWc(1, 2048, 8, 384, 1, 1, 0)
verify_conv2d_NCHWc(1, 2048, 8, 448, 1, 1, 0)
verify_conv2d_NCHWc(1, 2048, 8, 192, 1, 1, 0)
verify_conv2d_NCHWc(1, 1024, 19, 84, 3, 1, 1)
verify_conv2d_NCHWc(1, 2048, 10, 126, 3, 1, 1)
verify_conv2d_NCHWc(1, 512, 5, 126, 3, 1, 1)
verify_conv2d_NCHWc(1, 256, 3, 126, 3, 1, 1)
# Asymmetric padding
verify_conv2d_NCHWc(1, 32, 17, 64, 7, 2, (0, 0, 1, 1))
verify_conv2d_NCHWc(1, 32, 35, 128, 3, 1, (3, 3, 2, 2))
verify_conv2d_NCHWc(1, 32, 35, 32, 1, 1, (1, 2, 2, 1))
verify_conv2d_NCHWc(1, 32, 17, 192, 1, 1, (1, 2))
verify_conv2d_NCHWc(1, 32, 8, 32, 3, 1, (3, 1))
verify_conv2d_NCHWc(1, 128, 8, 384, 3, 1, (0, 2))
verify_conv2d_NCHWc(1, 32, 8, 32, 1, 1, "VALID")
verify_conv2d_NCHWc(1, 388, 8, 32, 3, 1, "VALID")
verify_conv2d_NCHWc(1, 512, 19, 32, 1, 1, "SAME")
verify_conv2d_NCHWc(1, 32, 10, 32, 2, 1, "SAME")
verify_conv2d_NCHWc(1, 32, 8, 32, 3, 1, (1, 2, 2, 1), add_relu=True)
verify_conv2d_NCHWc(1, 32, 8, 32, 5, 2, (1, 3), add_bias=True)
verify_conv2d_NCHWc(1, 32, 8, 32, 3, 1, "VALID", add_bias=True, add_relu=True)
verify_conv2d_NCHWc(1, 32, 8, 32, 24, 1, "SAME", add_bias=True, add_relu=True)
if __name__ == "__main__":
test_conv2d_NCHWc()
| 10,920 | 36.52921 | 98 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_space_to_batch_nd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for space to batch"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
def verify_space_to_batch_nd(input_shape, block_shape, pad_before, pad_after, pad_value=0):
out_shape = []
out_shape.append(int((input_shape[0] * np.prod(block_shape))))
for i in range(1, len(block_shape) + 1):
pad = pad_before[i - 1] + pad_after[i - 1]
out_shape.append(int((input_shape[i] + pad) // block_shape[i - 1]))
for i in range(len(block_shape) + 1, len(input_shape)):
out_shape.append(input_shape[i])
A = te.placeholder(input_shape, name="A", dtype="float32")
dtype = A.dtype
a_np = np.random.uniform(size=input_shape).astype(dtype)
B = topi.nn.space_to_batch_nd(A, block_shape, pad_before, pad_after, pad_value)
b_np = tvm.topi.testing.space_to_batch_nd_python(
a_np, block_shape, pad_before, pad_after, pad_value
)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.create(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_space_to_batch():
# Without paddings
verify_space_to_batch_nd([3, 3, 2, 1], [3], [0], [0])
# With paddings
verify_space_to_batch_nd([3, 3, 2, 1], [3], [1], [2])
# Multiple spatial dims
verify_space_to_batch_nd([3, 3, 4, 5, 2], [3, 4, 2], [1, 0, 3], [2, 0, 0])
# No remaining dims
verify_space_to_batch_nd([3, 3, 4, 5, 2], [3, 4, 2, 2], [1, 4, 0, 0], [2, 0, 1, 0])
if __name__ == "__main__":
test_space_to_batch()
| 2,733 | 36.452055 | 91 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_scatter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import topi
import tvm.topi.testing
@tvm.testing.parametrize_targets
def test_scatter_nd(dev, target):
def check_scatter_nd(data, indices, updates, out, mode="add"):
implementations = {
"generic": (
lambda x, y, z: topi.scatter_nd(x, y, z, mode),
topi.generic.schedule_extern,
),
"gpu": (
lambda x, y, z: topi.cuda.scatter_nd(x, y, z, mode),
topi.generic.schedule_extern,
),
}
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
tvm.topi.testing.compare_numpy_tvm(
[data, indices, updates], out, target, dev, fcompute, fschedule
)
data = np.zeros((2, 2)).astype("int64")
indices = np.array([[1, 1, 0], [0, 1, 0]])
updates = np.array([2, 3, 0])
out = np.array([[0, 0], [2, 3]])
check_scatter_nd(data, indices, updates, out)
data = np.zeros((2, 2, 2, 2)).astype("int64")
indices = np.array([[0, 1], [1, 1]])
updates = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
out = np.array([[[[0, 0], [0, 0]], [[1, 2], [3, 4]]], [[[0, 0], [0, 0]], [[5, 6], [7, 8]]]])
check_scatter_nd(data, indices, updates, out)
indices = np.array([[1, 0, 0]])
updates = np.reshape(np.arange(1560 * 3), (3, 1560)).astype("float32")
shape = (2, 1560)
data = np.zeros(shape).astype("float32")
out = data.copy()
out[1, :] += updates[0, :]
out[0, :] += updates[1, :]
out[0, :] += updates[2, :]
check_scatter_nd(data, indices, updates, out)
for mode in ["update", "add", "mul", "min", "max"]:
updates = np.ones((5, 3)).astype("float64")
indices = np.stack((np.random.randint(2, size=5), np.random.randint(7, size=5))).astype(
"int64"
)
shape = (2, 7, 3)
data = np.random.random(shape).astype("float64")
out = data.copy()
for i in range(indices.shape[1]):
for j in range(updates.shape[1]):
if mode == "update":
out[indices[0, i], indices[1, i], j] = updates[i, j]
elif mode == "add":
out[indices[0, i], indices[1, i], j] += updates[i, j]
elif mode == "mul":
out[indices[0, i], indices[1, i], j] *= updates[i, j]
elif mode == "min":
out[indices[0, i], indices[1, i], j] = min(
out[indices[0, i], indices[1, i], j], updates[i, j]
)
elif mode == "max":
out[indices[0, i], indices[1, i], j] = max(
out[indices[0, i], indices[1, i], j], updates[i, j]
)
check_scatter_nd(data, indices, updates, out, mode)
if __name__ == "__main__":
test_scatter_nd(tvm.device("cpu"), tvm.target.Target("llvm"))
| 3,737 | 38.765957 | 96 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv3d_transpose_ncdhw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for transposed convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
_conv3d_transpose_ncdhw_implement = {
"generic": (topi.nn.conv3d_transpose_ncdhw, topi.generic.schedule_conv3d_transpose_ncdhw),
"cpu": (topi.x86.conv3d_transpose_ncdhw, topi.x86.schedule_conv3d_transpose_ncdhw),
"gpu": (topi.cuda.conv3d_transpose_ncdhw, topi.cuda.schedule_conv3d_transpose_ncdhw),
}
def verify_conv3d_transpose_ncdhw(
batch, in_channel, in_size, num_filter, kernel, stride, padding, output_padding
):
in_depth, in_height, in_width = in_size
kernel_depth, kernel_height, kernel_width = kernel
stride_depth, stride_height, stride_width = stride
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = padding
A = te.placeholder((batch, in_channel, in_depth, in_height, in_width), name="A")
W = te.placeholder(
(in_channel, num_filter, kernel_depth, kernel_height, kernel_width), name="W"
)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv3d_transpose.verify_conv3d_transpose_ncdhw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = tvm.topi.testing.conv3d_transpose_ncdhw_python(
a_np, w_np, stride, padding, output_padding
)
c_np = np.maximum(b_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(
target, _conv3d_transpose_ncdhw_implement
)
B = fcompute(
A,
W,
[stride_depth, stride_height, stride_width],
[pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right],
A.dtype,
output_padding,
)
C = topi.nn.relu(B)
s1 = fschedule([B])
s2 = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func1 = tvm.build(s1, [A, W, B], target)
func2 = tvm.build(s2, [A, W, C], target)
func1(a, w, b)
func2(a, w, c)
tvm.testing.assert_allclose(b.numpy(), b_np, atol=1e-4, rtol=1e-4)
tvm.testing.assert_allclose(c.numpy(), c_np, atol=1e-4, rtol=1e-4)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_conv3d_transpose_ncdhw():
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 1, (1, 1, 1), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 2, (3, 3, 3), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (3, 3, 3), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (3, 3, 3), (0, 0, 0, 0, 0, 0), (2, 2, 2)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (3, 3, 3), (0, 0, 0, 0, 0, 0), (1, 0, 2)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (3, 3, 3), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 3, (24, 24, 24), 16, (2, 2, 2), (2, 2, 2), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 32, (5, 5, 5), (1, 1, 1), (0, 0, 0, 0, 0, 0), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (5, 5, 5), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (5, 5, 5), (2, 2, 2), (1, 1, 1, 1, 1, 1), (1, 1, 1)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (3, 5, 7), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (3, 5, 5), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (3, 3, 7), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
verify_conv3d_transpose_ncdhw(
1, 8, (32, 32, 32), 64, (3, 5, 3), (2, 2, 2), (1, 1, 1, 1, 1, 1), (0, 0, 0)
)
if __name__ == "__main__":
test_conv3d_transpose_ncdhw()
| 5,848 | 37.735099 | 94 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv2d_nhwc_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Example code to do convolution."""
import numpy as np
import tvm
from tvm import topi
import tvm.topi.testing
from tvm import te
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib import nvcc
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv2d_nhwc_tensorcore_implement = {
"cuda": (topi.cuda.conv2d_nhwc_tensorcore, topi.cuda.schedule_conv2d_nhwc_tensorcore)
}
def verify_conv2d_nhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
devices="cuda",
):
"""Test the conv2d with tensorcore for nhwc layout"""
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
bias = te.placeholder((1, 1, 1, num_filter), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_nhwc.verify_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
if not nvcc.have_tensorcore(dev.compute_version):
print("skip because gpu does not support Tensor Cores")
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
fcompute, fschedule = tvm.topi.testing.dispatch(
device, _conv2d_nhwc_tensorcore_implement
)
C = fcompute(A, W, stride, padding, dilation, "float32")
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
rtol = 1e-3
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=rtol)
check_device(devices)
@tvm.testing.requires_cuda
@tvm.testing.requires_gpu
def test_conv2d_nhwc_tensorcore():
"""Test the conv2d with tensorcore for nhwc layout"""
verify_conv2d_nhwc(16, 16, 14, 16, 3, 1, 1)
verify_conv2d_nhwc(16, 128, 7, 128, 7, 1, 3)
verify_conv2d_nhwc(16, 160, 7, 160, 7, 1, 3)
verify_conv2d_nhwc(32, 64, 14, 64, 3, 1, 1, add_bias=True)
verify_conv2d_nhwc(32, 64, 14, 64, 3, 1, 1, add_relu=True)
verify_conv2d_nhwc(32, 64, 14, 64, 3, 1, 1, add_relu=True, add_bias=True)
verify_conv2d_nhwc(16, 64, 17, 64, 7, 1, (3, 3, 2, 2))
verify_conv2d_nhwc(16, 64, 17, 64, 7, 1, "SAME")
verify_conv2d_nhwc(16, 48, 35, 48, 5, 1, "VALID")
verify_conv2d_nhwc(16, 48, 56, 48, 3, 1, (1, 1, 1, 1))
verify_conv2d_nhwc(16, 64, 28, 64, 3, 1, (1, 1, 1, 1))
if __name__ == "__main__":
test_conv2d_nhwc_tensorcore()
| 5,520 | 34.619355 | 98 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_correlation.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
"""test of correlation operator in NCHW layout"""
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import autotvm, te, topi
_correlation_implement = {
"generic": (topi.nn.correlation_nchw, topi.generic.schedule_correlation_nchw),
"gpu": (topi.cuda.correlation_nchw, topi.cuda.schedule_correlation_nchw),
}
(
data_shape,
kernel_size,
max_displacement,
stride1,
stride2,
pad_size,
is_multiply,
) = tvm.testing.parameters(
((1, 3, 10, 10), 1, 4, 1, 1, 4, True),
((1, 3, 10, 10), 1, 5, 1, 1, 5, True),
((5, 1, 4, 4), 3, 1, 2, 1, 2, True),
((5, 1, 6, 4), 3, 1, 2, 2, 2, False),
((5, 1, 11, 11), 5, 1, 1, 1, 2, False),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
dtype, data_shape, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply
):
a_np = np.random.uniform(size=data_shape).astype(dtype)
b_np = np.random.uniform(size=data_shape).astype(dtype)
c_np = tvm.topi.testing.correlation_nchw_python(
a_np, b_np, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply
)
return a_np, b_np, c_np
def test_correlation_nchw(
target,
dev,
ref_data,
dtype,
kernel_size,
max_displacement,
stride1,
stride2,
pad_size,
is_multiply,
):
a_np, b_np, c_np = ref_data
A = te.placeholder(a_np.shape, name="data1", dtype=dtype)
B = te.placeholder(b_np.shape, name="data2", dtype=dtype)
fcompute, fschedule = tvm.topi.testing.dispatch(target, _correlation_implement)
with tvm.target.Target(target):
C = fcompute(A, B, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.empty(c_np.shape, dtype=dtype, device=dev)
func = tvm.build(s, [A, B, C], target)
func(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 2,916 | 28.765306 | 98 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_util.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for util"""
from tvm import topi
def verify_get_shape(src_shape, src_layout, dst_layout, expect_shape):
dst_shape = topi.utils.get_shape(src_shape, src_layout, dst_layout)
assert dst_shape == expect_shape, "Shape mismatch: expecting %s but got %s" % (
expect_shape,
dst_shape,
)
def test_get_shape():
verify_get_shape((1, 3, 224, 224), "NCHW", "NCHW", (1, 3, 224, 224))
verify_get_shape((1, 3, 224, 224), "NCHW", "NHWC", (1, 224, 224, 3))
verify_get_shape((3, 2, 32, 48, 16), "NCHW16c", "NC16cWH", (3, 2, 16, 48, 32))
verify_get_shape((2, 3, 32, 32, 16, 8), "OIHW16i8o", "HWO8oI16i", (32, 32, 2, 8, 3, 16))
if __name__ == "__main__":
test_get_shape()
| 1,504 | 37.589744 | 92 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_layer_norm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for layer_norm."""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import topi
from tvm.topi.utils import get_const_tuple
import tvm.topi.testing
import tvm.testing
_layer_norm_schedule = {
"generic": topi.generic.schedule_injective,
}
# only test on llvm because schedule is missing
@tvm.testing.parametrize_targets("llvm")
@pytest.mark.parametrize("shape,axis", [([4, 16], (1,)), ([4, 16, 16], (1, 2))])
def test_layer_norm(target, dev, shape, axis, episilon=1e-5, dtype="float32", rtol=1e-5, atol=1e-5):
data = te.placeholder(shape, dtype=dtype, name="data")
scale_shape = [shape[dim] for dim in axis]
gamma = te.placeholder(scale_shape, dtype=dtype, name="gamma")
beta = te.placeholder(scale_shape, dtype=dtype, name="beta")
B = topi.nn.layer_norm(data, gamma, beta, axis, episilon)
data_np = np.random.uniform(size=shape).astype(dtype)
gamma_np = np.random.uniform(size=scale_shape).astype(dtype)
beta_np = np.random.uniform(size=scale_shape).astype(dtype)
b_np = tvm.topi.testing.layer_norm_python(data_np, gamma_np, beta_np, axis, episilon)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _layer_norm_schedule)
s = s_func([B])
data_tvm = tvm.nd.array(data_np, dev)
gamma_tvm = tvm.nd.array(gamma_np, dev)
beta_tvm = tvm.nd.array(beta_np, dev)
b_tvm = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [data, gamma, beta, B], target)
f(data_tvm, gamma_tvm, beta_tvm, b_tvm)
tvm.testing.assert_allclose(b_tvm.numpy(), b_np, rtol=rtol, atol=atol)
if __name__ == "__main__":
tvm.testing.main()
| 2,481 | 38.396825 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv3d_ndhwc_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Example code to do convolution."""
import numpy as np
import tvm
from tvm import topi
import tvm.topi.testing
from tvm import te
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib import nvcc
from tvm.topi.nn.utils import get_pad_tuple3d
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv3d_ndhwc_tensorcore_implement = {
"cuda": (topi.cuda.conv3d_ndhwc_tensorcore, topi.cuda.schedule_conv3d_ndhwc_tensorcore)
}
def verify_conv3d_ndhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
devices="cuda",
):
"""Test the conv3d with tensorcore for ndhwc layout"""
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (kernel, kernel, kernel)
)
padding_sum = pad_front + pad_top + pad_left + pad_back + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_depth = in_height = in_width = in_size
dtype = "float16"
A = te.placeholder((batch, in_depth, in_height, in_width, in_channel), dtype, name="A")
W = te.placeholder((kernel, kernel, kernel, in_channel, num_filter), dtype, name="W")
bias = te.placeholder((1, 1, 1, 1, num_filter), dtype, name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
@memoize("topi.tests.test_topi_conv3d_ndhwc.verify_conv3d_ndhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv3d_ndhwc_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
print("Running on target: %s" % device)
with tvm.target.Target(device):
fcompute, fschedule = tvm.topi.testing.dispatch(
device, _conv3d_ndhwc_tensorcore_implement
)
C = fcompute(A, W, stride, padding, dilation, 1, "float16")
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
# Tensorcores are very inaccurate, with large shapes, the accumulation
# error is high especially away from 1. We disable atol as it is very
# large for these numbers that are far away from 1.
tvm.testing.assert_allclose(c.numpy(), c_np, atol=1e200, rtol=0.01)
check_device(devices)
@tvm.testing.requires_tensorcore
@tvm.testing.requires_cuda
def test_conv3d_ndhwc_tensorcore():
"""Test the conv3d with tensorcore for ndhwc layout"""
verify_conv3d_ndhwc(16, 16, 14, 16, 3, 1, 1)
verify_conv3d_ndhwc(16, 64, 7, 64, 7, 1, 3)
verify_conv3d_ndhwc(16, 32, 7, 32, 7, 1, 3)
verify_conv3d_ndhwc(32, 16, 14, 16, 3, 1, 1, add_bias=True)
verify_conv3d_ndhwc(32, 16, 14, 16, 3, 1, 1, add_relu=True)
verify_conv3d_ndhwc(32, 16, 14, 16, 3, 1, 1, add_relu=True, add_bias=True)
verify_conv3d_ndhwc(16, 16, 17, 16, 7, 1, (3, 3, 3, 2, 2, 2))
verify_conv3d_ndhwc(16, 16, 17, 16, 7, 1, "SAME")
verify_conv3d_ndhwc(8, 16, 35, 32, 5, 1, "VALID")
verify_conv3d_ndhwc(16, 32, 16, 32, 3, 1, (1, 1, 1, 1, 1, 1))
verify_conv3d_ndhwc(16, 16, 12, 16, 3, 1, (1, 1, 1, 1, 1, 1))
if __name__ == "__main__":
test_conv3d_ndhwc_tensorcore()
| 5,627 | 35.784314 | 98 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_bitserial_conv2d_rasp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
def generate_quantized_np(shape, bits, out_dtype):
np.random.seed(0)
min_val = 0
max_val = 1 << bits
return np.random.randint(min_val, max_val, size=shape).astype(out_dtype)
# Verify that certain special instructions from the tensorize pass exist
def verify_bitserial_conv2d_nhwc(
batch,
in_size,
in_channel,
num_filter,
kernel,
stride,
padding,
activation_bits,
weight_bits,
unipolar,
use_relu=False,
):
in_height = in_width = in_size
input_type = "uint32"
out_dtype = "int16"
device = "llvm -device=arm_cpu -model=bcm2837 -mtriple=armv7l-linux-gnueabihf -mattr=+neon"
with tvm.target.Target(device):
A = te.placeholder((batch, in_height, in_width, in_channel), dtype=input_type, name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), dtype=input_type, name="W")
B = topi.arm_cpu.bitserial_conv2d_nhwc(
A, W, stride, padding, activation_bits, weight_bits, "uint8", out_dtype, unipolar
)
if use_relu:
B = topi.nn.relu(B)
s = topi.arm_cpu.schedule_bitserial_conv2d_nhwc([B])
func = tvm.build(s, [A, W, B], device)
assembly = func.get_source("asm")
matches = re.findall("vpadal", assembly)
assert len(matches) > 0
matches = re.findall("vcnt", assembly)
assert len(matches) > 0
matches = re.findall("vpadd", assembly)
assert len(matches) > 0
dev = tvm.device(device, 0)
if "arm" not in os.uname()[4]:
print("Skipped running code, not an arm device")
return
print("Running on target: %s" % device)
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(A.shape), activation_bits, input_type)
w_np = generate_quantized_np(get_const_tuple(W.shape), weight_bits, input_type)
if unipolar:
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=["readwrite"]):
x[...] = 1 if x == 1 else -1
b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
else:
b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(
out_dtype
)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], device)
func(a, w, b)
np.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
def test_bitserial_conv2d():
in_size = 56
ic, oc = 64, 64
k = 3
stride = 1
pad = 1
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, True, True)
if __name__ == "__main__":
test_bitserial_conv2d()
| 4,110 | 32.696721 | 99 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_depthwise_conv2d_back_input.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import topi
import numpy as np
from tvm.contrib.pickle_memoize import memoize
from scipy import signal
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.utils import get_pad_tuple
import tvm.topi.testing
from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_input_nhwc
import tvm.testing
def verify_depthwise_conv2d_back_input(
batch, in_channel, in_h, channel_multiplier, filter_h, stride_h, padding_h
):
in_w = in_h
filter_channel = in_channel
filter_w = filter_h
stride_w = stride_h
padding_w = padding_h
out_h = np.int((in_h + 2 * padding_h - filter_h) / stride_h + 1)
out_w = np.int((in_w + 2 * padding_w - filter_w) / stride_w + 1)
out_channel = in_channel * channel_multiplier
ishape = [batch, in_h, in_w, in_channel]
oshape = [batch, out_h, out_w, out_channel]
# placeholder
Out_grad = te.placeholder(oshape, name="Out_grad")
Filter = te.placeholder((filter_h, filter_w, filter_channel, channel_multiplier))
# declare
In_grad = topi.nn.depthwise_conv2d_backward_input_nhwc(
Filter,
Out_grad,
oshape,
ishape,
stride=[stride_h, stride_w],
padding=[padding_h, padding_w],
)
# schedule
schedule = schedule_depthwise_conv2d_backward_input_nhwc(In_grad)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
# build the kernel
f = tvm.build(schedule, [Filter, Out_grad, In_grad], device)
# prepare pod type for test data closure
dtype = Out_grad.dtype
out_grad_shape = get_const_tuple(Out_grad.shape)
filter_shape = get_const_tuple(Filter.shape)
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_depthwise_conv2d_backward_input.nhwc")
def get_ref_data():
out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype)
filter_np = np.random.uniform(size=filter_shape).astype(dtype)
dilated_out_grad_np = tvm.topi.testing.dilate_python(
out_grad_np, [1, stride_h, stride_w, 1]
)
# padding params in forward propagation
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(
[padding_h, padding_w], (filter_h, filter_w)
)
# padding params in backward propagation
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = (filter_h - 1 - fpad_bottom) + (stride_h - 1)
bpad_left = filter_w - 1 - fpad_left
bpad_right = (filter_w - 1 - fpad_right) + (stride_w - 1)
padded_out_grad = np.zeros(
(
batch,
dilated_out_grad_np.shape[1] + bpad_top + bpad_bottom,
dilated_out_grad_np.shape[2] + bpad_left + bpad_right,
out_channel,
)
)
padded_out_grad[
:,
bpad_top : dilated_out_grad_np.shape[1] + bpad_top,
bpad_left : dilated_out_grad_np.shape[2] + bpad_left,
:,
] = dilated_out_grad_np
in_grad_np = np.zeros((batch, in_h, in_w, in_channel))
for b in range(batch):
for c in range(in_channel):
for m in range(channel_multiplier):
in_grad_np[b, :, :, c] += signal.convolve2d(
padded_out_grad[b, :, :, c * channel_multiplier + m],
filter_np[:, :, c, m],
mode="valid",
)[0:in_h, 0:in_w]
return (out_grad_np, filter_np, in_grad_np)
(out_grad_np, filter_np, in_grad_np) = get_ref_data()
out_grad_tvm = tvm.nd.array(out_grad_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
in_grad_tvm = tvm.nd.array(np.zeros(shape=ishape, dtype=dtype), dev)
# launch the kernel
timer = f.time_evaluator(f.entry_name, dev, number=1)
tcost = timer(filter_tvm, out_grad_tvm, in_grad_tvm).mean
tvm.testing.assert_allclose(in_grad_np, in_grad_tvm.numpy(), rtol=1e-5)
check_device("opencl")
check_device("cuda")
check_device("metal")
check_device("rocm")
check_device("vulkan")
check_device("nvptx")
@tvm.testing.requires_gpu
def test_topi_depthwise_conv2d_backward_input_nhwc():
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 1, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 1, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 1, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 1, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 2, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 2, 1)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 2, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 2, 2)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 1, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 3, 2, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 3, 2, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 1, 5, 2, 0)
verify_depthwise_conv2d_back_input(16, 256, 56, 2, 5, 2, 0)
if __name__ == "__main__":
test_topi_depthwise_conv2d_backward_input_nhwc()
| 6,542 | 40.150943 | 88 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv2d_hwnc_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
"""Example code to do convolution."""
import numpy as np
import tvm
import os
import tvm.testing
import tvm.topi.testing
from tvm import te, autotvm, topi, relay
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib import nvcc
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
_conv2d_hwnc_tensorcore_implement = {
"cuda": (topi.cuda.conv2d_hwnc_tensorcore, topi.cuda.schedule_conv2d_hwnc_tensorcore)
}
def verify_conv2d_hwnc(
batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1, dtype="int4"
):
"""Test the conv2d with tensorcore for hwnc layout"""
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
# choose dtype from int4, int8
assert dtype in ["int4", "int8"]
in_height = in_width = in_size
A = te.placeholder((in_height, in_width, batch, in_channel), name="A", dtype=dtype)
W = te.placeholder((kernel, kernel, num_filter, in_channel), name="W", dtype=dtype)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
@memoize("topi.tests.test_topi_conv2d_hwnc.verify_conv2d_hwnc")
def get_ref_data():
if dtype == "int4":
a_np = np.random.randint(low=-8, high=7, size=a_shape).transpose((2, 0, 1, 3))
w_np = np.random.randint(low=-8, high=7, size=w_shape)
dw_np = topi.testing.dilate_python(
w_np.transpose((0, 1, 3, 2)), (1, 1, dilation, dilation)
)
elif dtype == "int8":
a_np = (
np.random.randint(low=-128, high=127, size=a_shape)
.transpose((2, 0, 1, 3))
.astype(dtype)
)
w_np = np.random.randint(low=-128, high=127, size=w_shape).astype(dtype)
dw_np = topi.testing.dilate_python(
w_np.transpose((0, 1, 3, 2)), (1, 1, dilation, dilation)
)
c_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, c_np
def convert_int32_into_int4(a_int32):
"""convert int32 values into int4
Parameters
----------
a_int32 : int
Return
------
a_int4 : int
"""
I, J, K, L = a_int32.shape
a_int4 = np.zeros(shape=(I, J, K, L // 8), dtype=np.int32)
for i in range(I):
for j in range(J):
for k in range(K):
for l in range(L // 8):
for m in range(min(8, L - l * 8)):
a_int4[i, j, k, l] = a_int4[i, j, k, l] | (
(a_int32[i, j, k, l * 8 + m] & 0xF) << ((7 - m) * 4)
)
return a_int4
a_np, w_np, c_np = get_ref_data()
if dtype == "int4":
a_np = convert_int32_into_int4(a_np)
w_np = convert_int32_into_int4(w_np)
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
if not nvcc.have_tensorcore(dev.compute_version):
print("skip because gpu does not support Tensor Cores")
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = topi.testing.dispatch(target, _conv2d_hwnc_tensorcore_implement)
C = fcompute(A, W, stride, padding, dilation, dtype, "int32")
s = fschedule([C])
a = tvm.nd.array(a_np.transpose((1, 2, 0, 3)), dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
rtol = 1e-3
tvm.testing.assert_allclose(c.numpy().transpose((2, 0, 1, 3)), c_np, rtol=rtol)
check_target("cuda")
def verify_feature_length():
np.random.seed(123)
target = "cuda"
ctx = tvm.device(target)
batch_size = 32
input_shape = (32, 512, 7, 7)
kernel_shape = (512, 512, 3, 3)
def get_mod():
x = relay.var("x", relay.TensorType(input_shape, "float32"))
y = relay.var("y", relay.TensorType(kernel_shape, "float32"))
f = relay.Function(
[x, y], relay.nn.conv2d(x, y, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3])
)
mod = tvm.IRModule()
mod["main"] = f
mod = relay.transform.InferType()(mod)
return mod, {}
mod, params = get_mod()
layout_config = relay.transform.LayoutConfig()
desired_layouts = {"nn.conv2d": ["HWNC", "default"]}
with layout_config:
seq = tvm.transform.Sequential([relay.transform.ConvertLayout(desired_layouts)])
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
mod = relay.transform.recast(mod, "int4", "int32")
tasks = autotvm.task.extract_from_program(
mod, target=target, params=params, ops=(relay.op.get("nn.conv2d"),)
)
assert len(tasks) == 1
task = tasks[0]
space = task.config_space
idx1 = space.get_rand_index()
idx2 = space.get_rand_index()
cfg = space.get(idx1)
sch, arg_bufs = task.instantiate(cfg)
fea1 = autotvm.feature.get_itervar_feature_flatten(sch, arg_bufs, take_log=True)
cfg = space.get(idx2)
sch, arg_bufs = task.instantiate(cfg)
fea2 = autotvm.feature.get_itervar_feature_flatten(sch, arg_bufs, take_log=True)
assert len(fea1) == len(fea2)
@tvm.testing.requires_tensorcore
def test_conv2d_hwnc_tensorcore():
"""Test the conv2d with tensorcore for hwnc layout"""
verify_conv2d_hwnc(8, 64, 56, 64, 3, 1, 1, dtype="int8")
verify_conv2d_hwnc(8, 64, 56, 64, 1, 1, 0, dtype="int4")
verify_conv2d_hwnc(8, 64, 56, 128, 3, 2, 1)
verify_conv2d_hwnc(8, 64, 56, 64, 1, 2, 0)
verify_conv2d_hwnc(8, 128, 28, 128, 3, 1, 1)
verify_conv2d_hwnc(8, 128, 28, 256, 3, 2, 1)
verify_conv2d_hwnc(8, 128, 28, 256, 1, 2, 0)
verify_conv2d_hwnc(8, 256, 14, 256, 3, 1, 1)
verify_conv2d_hwnc(8, 256, 14, 512, 3, 2, 1)
verify_conv2d_hwnc(8, 256, 14, 512, 1, 2, 0)
verify_conv2d_hwnc(8, 512, 9, 512, 3, 1, 1)
verify_feature_length()
if __name__ == "__main__":
test_conv2d_hwnc_tensorcore()
| 7,589 | 34.971564 | 98 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_qnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for QNN operators."""
import numpy as np
import tvm
from tvm import topi, relay, te
from tvm.contrib import graph_executor
import tvm.topi.testing
def verify_simulated_quantize(data_shape, out_dtype, channels, axis):
# Create placeholder variables for all qnn inputs.
A = te.placeholder(data_shape, name="value", dtype="float32")
D = te.placeholder([], name="dtype", dtype="int32")
S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32")
Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32")
SIM_Q = topi.nn.simulated_quantize(A, D, output_scale=S, output_zero_point=Z, axis=axis)
# Create random numpy values to assign to inputs.
a_np = np.random.uniform(size=data_shape).astype("float32")
d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[out_dtype])
s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32")
z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32")
q_np = np.zeros(shape=data_shape, dtype="float32")
def check_target(target, dev):
# Wrap the numpy arrays in nd arrays.
a = tvm.nd.array(a_np, dev)
d = tvm.nd.array(d_np, dev)
s = tvm.nd.array(s_np, dev)
z = tvm.nd.array(z_np, dev)
q = tvm.nd.array(q_np, dev)
# Construct equivalent relay graph.
per_channel = channels[0] != 1
a_var = relay.var("a", shape=data_shape, dtype="float32")
if per_channel:
s_var = relay.const(s_np)
z_var = relay.const(z_np)
else:
s_var = relay.const(s_np[0])
z_var = relay.const(z_np[0])
real_q_op = relay.qnn.op.quantize(a_var, s_var, z_var, axis=axis, out_dtype=out_dtype)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(tvm.IRModule.from_expr(real_q_op), target=target)
# Get real qnn quantize output.
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("a", a_np)
m.run()
real_q_out = m.get_output(0)
# Compile the simulated quantize function.
with tvm.target.Target(target):
sched = tvm.topi.testing.get_injective_schedule(target)(SIM_Q)
func = tvm.build(sched, [A, D, S, Z, SIM_Q], target, name="sim_quantize")
func(a, d, s, z, q)
# Check correctness against the true qnn output.
mismatch = q.numpy() != real_q_out.numpy().astype("float32")
# Allow some rounding errors due to GPU fp32 arithmetic.
assert np.sum(mismatch) <= 3
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def test_simulated_quantize():
verify_simulated_quantize([1], "int8", [1], -1)
verify_simulated_quantize([2, 5], "int8", [5], 1)
verify_simulated_quantize([1, 32, 32, 32], "int8", [32], -1)
verify_simulated_quantize([1, 32, 32, 32], "uint8", [32], -2)
verify_simulated_quantize([2, 5], "int32", [5], 1)
def verify_simulated_dequantize(data_shape, in_dtype, channels, axis):
# Create placeholder variables for all qnn inputs.
A = te.placeholder(data_shape, name="value", dtype="float32")
D = te.placeholder([], name="dtype", dtype="int32")
S = te.placeholder([te.size_var("scale_dim")], name="scale", dtype="float32")
Z = te.placeholder([te.size_var("zp_dim")], name="zp", dtype="int32")
SIM_DQ = topi.nn.simulated_dequantize(A, D, input_scale=S, input_zero_point=Z, axis=axis)
# Create random numpy values to assign to inputs.
a_np = np.random.uniform(low=-128, high=127, size=data_shape).astype(in_dtype)
a_np_f = a_np.astype("float32")
d_np = np.int32(topi.nn.SQNN_DTYPE_TO_CODE[in_dtype])
s_np = np.random.uniform(low=1e-4, high=0.1, size=channels).astype("float32")
z_np = np.random.uniform(low=-10, high=10, size=channels).astype("int32")
dq_np = np.zeros(shape=data_shape, dtype="float32")
def check_target(target, dev):
# Wrap the numpy arrays in nd arrays.
a = tvm.nd.array(a_np_f, dev)
d = tvm.nd.array(d_np, dev)
s = tvm.nd.array(s_np, dev)
z = tvm.nd.array(z_np, dev)
dq = tvm.nd.array(dq_np, dev)
# Construct equivalent relay graph.
per_channel = channels[0] != 1
a_var = relay.var("a", shape=data_shape, dtype=in_dtype)
if per_channel:
s_var = relay.const(s_np)
z_var = relay.const(z_np)
else:
s_var = relay.const(s_np[0])
z_var = relay.const(z_np[0])
real_dq_op = relay.qnn.op.dequantize(a_var, s_var, z_var, axis=axis)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(tvm.IRModule.from_expr(real_dq_op), target=target)
# Get real qnn quantize output.
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("a", a_np)
m.run()
real_dq_out = m.get_output(0)
# Compile the simulated quantize function.
with tvm.target.Target(target):
sched = tvm.topi.testing.get_injective_schedule(target)(SIM_DQ)
func = tvm.build(sched, [A, D, S, Z, SIM_DQ], target, name="sim_quantize")
func(a, d, s, z, dq)
# Check correctness against the true qnn output.
tvm.testing.assert_allclose(dq.numpy(), real_dq_out.numpy().astype("float32"), rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def test_simulated_dequantize():
verify_simulated_dequantize([1], "int8", [1], -1)
verify_simulated_dequantize([2, 5], "int8", [5], 1)
verify_simulated_dequantize([2, 5], "int8", [2], 0)
verify_simulated_dequantize([1, 32, 32, 32], "int8", [32], -1)
verify_simulated_dequantize([1, 32, 32, 32], "uint8", [32], -2)
verify_simulated_dequantize([2, 5], "int32", [5], 1)
if __name__ == "__main__":
test_simulated_quantize()
test_simulated_dequantize()
| 6,744 | 41.15625 | 97 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv2d_nhwc_pack_int8.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import pytest
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
def verify_conv2d_1x1_nhwc_pack_int8(
batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1
):
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name="A", dtype="uint8")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W", dtype="int8")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
adtype = A.dtype
wdtype = W.dtype
@memoize("topi.tests.test_topi_conv2d_1x1_nhwc_pack_int8.verify_nhwc.v2")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(adtype)
w_np = np.random.uniform(size=w_shape).astype(wdtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
B = topi.nn.conv2d(A, W, stride, padding, dilation, layout="NHWC", out_dtype="int32")
s = topi.x86.schedule_conv2d_nhwc_pack_int8([B])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], device)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
# for device in ['llvm -mcpu=skylake-avx512']:
for device in ["llvm"]:
check_device(device)
# TODO(@llyfacebook): Please fix https://github.com/apache/tvm/issues/4122 to enable this test.
@pytest.mark.skip
def test_conv2d_nhwc():
verify_conv2d_1x1_nhwc_pack_int8(1, 256, 32, 256, 1, 1, 0)
if __name__ == "__main__":
# test_conv2d_nhwc()
pass
| 3,128 | 35.811765 | 97 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_lrn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for local response normalization"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
from tvm.topi.utils import get_const_tuple
import tvm.topi.testing
import tvm.testing
_lrn_schedule = {
"generic": topi.generic.schedule_lrn,
"gpu": topi.cuda.schedule_lrn,
"opencl": topi.cuda.schedule_lrn,
"metal": topi.cuda.schedule_lrn,
"rocm": topi.cuda.schedule_lrn,
"vulkan": topi.cuda.schedule_lrn,
"nvptx": topi.cuda.schedule_lrn,
}
def verify_lrn(shape, size, axis, bias, alpha, beta, dtype="float32", rtol=1e-5, atol=1e-5):
A = te.placeholder(shape, dtype=dtype, name="A")
B = topi.nn.lrn(A, size, axis, alpha, beta, bias)
a_np = np.random.uniform(size=shape).astype(dtype)
b_np = tvm.topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
def check_device(device):
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
s_func = tvm.topi.testing.dispatch(device, _lrn_schedule)
s = s_func([B])
dev = tvm.device(device, 0)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=rtol, atol=atol)
for device in ["llvm", "cuda", "opencl", "metal", "rocm", "vulkan", "nvptx"]:
check_device(device)
@tvm.testing.uses_gpu
def test_lrn():
verify_lrn((1, 3, 5, 5), 3, 1, 1.0, 1.0, 0.5)
verify_lrn((1, 3, 5, 5), 3, 3, 1.0, 1.0, 0.5)
verify_lrn((1, 3, 20, 20), 3, 1, 2.0, 1.0, 0.75)
verify_lrn((1, 3, 5, 5), 3, 3, 1.0, 1.0, 0.5, dtype="float16", rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
test_lrn()
| 2,677 | 35.684932 | 92 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_batch_matmul.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for batch_matmul operator"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
import tvm.testing
from common import Int8Fallback
_batch_matmul_implement = {
"generic": (topi.nn.batch_matmul, topi.generic.schedule_batch_matmul),
"cpu": (topi.x86.batch_matmul, topi.x86.schedule_batch_matmul),
"gpu": (topi.cuda.batch_matmul, topi.cuda.schedule_batch_matmul),
}
def verify_batch_matmul(x_batch, y_batch, M, N, K, dynamic=False, debug=False):
if not dynamic:
x = te.placeholder((x_batch, M, K), name="x")
y = te.placeholder((y_batch, N, K), name="y")
dtype = x.dtype
else:
assert x_batch == y_batch or x_batch == 1 or y_batch == 1
batch_size = max(x_batch, y_batch)
dynamic_batch_size = te.var("dynamic_batch_size")
dynamic_M = te.var("dynamic_M")
dynamic_N = te.var("dynamic_N")
dynamic_K = te.var("dynamic_K")
x = te.placeholder((dynamic_batch_size, dynamic_M, dynamic_K), name="x")
y = te.placeholder((dynamic_batch_size, dynamic_N, dynamic_K), name="y")
dtype = x.dtype
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_batch_matmul")
def get_ref_data():
a_np = np.random.uniform(size=(x_batch, M, K)).astype(dtype)
b_np = np.random.uniform(size=(y_batch, N, K)).astype(dtype)
c_np = tvm.topi.testing.batch_matmul(a_np, b_np)
return (a_np, b_np, c_np)
# get the test data
a_np, b_np, c_np = get_ref_data()
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _batch_matmul_implement)
out = fcompute(x, y)
if not dynamic:
s = fschedule([out])
out_shape = out.shape
else:
s = te.create_schedule(out.op)
out_shape = (batch_size, M, N)
if debug:
print(tvm.lower(s, [x, y, out], simple_mode=True))
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(out_shape), dtype=dtype), dev)
f = tvm.build(s, [x, y, out], target, name="dense")
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
target_kind = tvm.target.Target(target).kind.name
if dynamic and target_kind in ["cuda", "nvptx", "vulkan", "opencl"]:
print("Dynamic batch matmul test is skippped on %s" % target)
continue
check_device(target, dev)
def verify_batch_matmul_int8(x_batch, y_batch, M, N, K):
dtype = "int8"
out_dtype = "int32"
assert x_batch == y_batch or x_batch == 1 or y_batch == 1
x = te.placeholder((x_batch, M, K), name="x", dtype=dtype)
y = te.placeholder((y_batch, N, K), name="y", dtype=dtype)
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_batch_matmul")
def get_ref_data():
a_np = np.random.randint(low=-128, high=127, size=(x_batch, M, K)).astype(dtype)
b_np = np.random.randint(low=-128, high=127, size=(y_batch, N, K)).astype(dtype)
c_np = tvm.topi.testing.batch_matmul(a_np, b_np, out_dtype=out_dtype)
return (a_np, b_np, c_np)
# get the test data
a_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if device == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
print("Skip because int8 intrinsics are not available")
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
out = topi.cuda.batch_matmul_int8(x, y, None, out_dtype)
s = topi.cuda.schedule_batch_matmul_int8([out])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out_dtype), dev)
f = tvm.build(s, [x, y, out], device, name="batch_matmul_int8")
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for device in ["cuda"]:
check_device(device)
@tvm.testing.uses_gpu
def test_batch_matmul():
verify_batch_matmul(1, 1, 16, 16, 32)
verify_batch_matmul(5, 5, 16, 16, 32)
verify_batch_matmul(5, 5, 16, 20, 32)
verify_batch_matmul(30, 30, 16, 20, 32)
# Test batch broadcasting.
verify_batch_matmul(1, 5, 16, 16, 32)
verify_batch_matmul(5, 1, 16, 16, 32)
# Test dynamic batch
verify_batch_matmul(1, 1, 16, 16, 32, dynamic=True)
verify_batch_matmul(5, 5, 16, 16, 32, dynamic=True)
@tvm.testing.requires_cuda
@tvm.testing.requires_gpu
def test_batch_matmul_int8():
with Int8Fallback():
verify_batch_matmul_int8(1, 1, 2, 3, 1)
verify_batch_matmul_int8(1, 1, 16, 24, 32)
verify_batch_matmul_int8(5, 5, 24, 16, 32)
verify_batch_matmul_int8(30, 30, 16, 20, 32)
verify_batch_matmul_int8(1, 5, 16, 16, 32)
verify_batch_matmul_int8(5, 1, 16, 16, 32)
if __name__ == "__main__":
test_batch_matmul()
test_batch_matmul_int8()
| 6,199 | 36.575758 | 92 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_group_conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do group transpose convolution."""
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
_group_conv2d_nchw_implement = {
"generic": (
topi.nn.group_conv2d_transpose_nchw,
topi.generic.schedule_group_conv2d_transpose_nchw,
),
"cuda": (topi.cuda.conv2d_transpose_nchw, topi.cuda.schedule_conv2d_transpose_nchw),
}
def verify_group_conv2d_transpose_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
output_padding,
groups,
):
print(
"Workload: (%d, %d, %s, %d, %s, %s, %s, %s, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, output_padding, groups)
)
in_height, in_width = in_size
kernel_height, kernel_width = kernel
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
W = te.placeholder((in_channel, num_filter // groups, kernel_height, kernel_width), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d_transpose.verify_group_conv2d_transpose_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np = tvm.topi.testing.conv2d_transpose_nchw_python(
a_np, w_np, stride, padding, output_padding, groups
).astype(dtype)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _group_conv2d_nchw_implement)
C = fcompute(A, W, stride, padding, dtype, output_padding, groups)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(
s,
[A, W, C],
target,
name="group_conv2d_transpose_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size[0],
in_size[1],
num_filter,
kernel[0],
kernel[1],
stride[0],
stride[1],
padding[0],
padding[1],
padding[2],
padding[3],
output_padding[0],
output_padding[1],
groups,
),
)
func(a, w, c)
c = c.numpy()
for measurement, reference in zip(c, c_np):
tvm.testing.assert_allclose(measurement, reference, rtol=1e-5)
for target in ["llvm", "cuda"]:
check_target(target)
@tvm.testing.uses_gpu
def test_group_conv2d_transpose_nchw():
verify_group_conv2d_transpose_nchw(1, 4, (32, 32), 4, (5, 5), (1, 1), (0, 0, 0, 0), (0, 0), 2)
verify_group_conv2d_transpose_nchw(1, 9, (32, 32), 9, (5, 5), (1, 1), (0, 0, 0, 0), (0, 0), 3)
verify_group_conv2d_transpose_nchw(1, 4, (32, 32), 16, (5, 5), (2, 2), (1, 1, 1, 1), (0, 0), 4)
verify_group_conv2d_transpose_nchw(
1, 32, (8192, 1), 8, (31, 1), (2, 1), (14, 0, 15, 0), (0, 0), 2
)
verify_group_conv2d_transpose_nchw(
1, 512, (8, 1), 256, (31, 1), (2, 1), (14, 0, 15, 0), (0, 0), 16
)
verify_group_conv2d_transpose_nchw(
1, 512, (8, 1), 256, (31, 1), (2, 1), (14, 0, 15, 0), (1, 0), 16
)
verify_group_conv2d_transpose_nchw(
1, 64, (64, 64), 64, (4, 4), (1, 1), (0, 0, 0, 0), (0, 0), 64
)
verify_group_conv2d_transpose_nchw(
1, 128, (32, 32), 128, (4, 4), (1, 1), (0, 0, 0, 0), (0, 0), 128
)
verify_group_conv2d_transpose_nchw(
1, 256, (16, 16), 256, (4, 4), (1, 1), (0, 0, 0, 0), (0, 0), 256
)
verify_group_conv2d_transpose_nchw(1, 1, (224, 224), 1, (1, 1), (1, 1), (0, 0, 0, 0), (0, 0), 1)
verify_group_conv2d_transpose_nchw(
1, 3, (224, 224), 32, (3, 3), (1, 1), (0, 0, 0, 0), (0, 0), 1
)
verify_group_conv2d_transpose_nchw(
1, 3, (224, 224), 32, (3, 3), (3, 3), (0, 0, 0, 0), (0, 0), 1
)
verify_group_conv2d_transpose_nchw(
1, 3, (224, 224), 32, (3, 3), (1, 1), (0, 0, 0, 0), (0, 0), 1
)
verify_group_conv2d_transpose_nchw(
1, 3, (224, 224), 32, (3, 3), (2, 2), (1, 1, 1, 1), (0, 0), 1
)
verify_group_conv2d_transpose_nchw(1, 48, (64, 64), 12, (4, 4), (2, 2), (1, 1, 1, 1), (0, 0), 1)
if __name__ == "__main__":
test_group_conv2d_transpose_nchw()
| 5,974 | 34.993976 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_reduce.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for reduce."""
import os
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi, tir
in_shape, axis, keepdims, reduce_type, dtype = tvm.testing.parameters(
((32,), 0, False, "argmax", "float32"),
((128, 24, 128, 24), (1, 2, 3), True, "sum", "float32"),
((2, 3), None, True, "all", "bool"),
((128, 24 * 128 * 24), (1,), False, "max", "float32"),
((32, 128, 24), None, True, "sum", "float32"),
((32, 128, 24), None, True, "all", "bool"),
((128, 24, 128, 24), (0, 2), False, "min", "float32"),
((32, 128), 1, True, "argmax", "float32"),
((32, 24, 32, 24), 2, False, "argmin", "float32"),
((31, 21, 15), None, True, "argmax", "float32"),
((31, 21, 15), None, False, "sum", "float32"),
((128, 24, 128, 24), (1, 2, 3), True, "sum", "float64"),
((2, 3), None, True, "any", "bool"),
((32, 128, 24), None, True, "any", "bool"),
((1, 4, 7), 1, True, "any", "bool"),
((128, 24, 128, 24), 2, False, "any", "bool"),
((128, 24, 128, 24), 2, False, "sum", "bool"),
((128, 24, 128, 24), 0, True, "sum", "bool"),
((3, 4, 5), None, False, "prod", "float32"),
((3, 4, 5), (2,), False, "prod", "float32"),
((3, 4, 5), (1, 2), True, "prod", "float32"),
((3, 4, 5), (), False, "sum", "float32"),
((3, 4, 5), (), True, "sum", "float32"),
((3, 4, 5), (0, 1, 2), False, "sum", "float32"),
((3, 4, 5), (0, 1, 2), True, "sum", "float32"),
((3, 4, 5), (), False, "prod", "float32"),
((3, 4, 5), (), True, "prod", "float32"),
((3, 4, 5), (0, 1, 2), False, "prod", "float32"),
((3, 4, 5), (0, 1, 2), True, "prod", "float32"),
((3, 4, 5), (), False, "min", "float32"),
((3, 4, 5), (), True, "min", "float32"),
((3, 4, 5), (0, 1, 2), False, "min", "float32"),
((3, 4, 5), (0, 1, 2), True, "min", "float32"),
((3, 4, 5), (), False, "max", "float32"),
((3, 4, 5), (), True, "max", "float32"),
((3, 4, 5), (0, 1, 2), False, "max", "float32"),
((3, 4, 5), (0, 1, 2), True, "max", "float32"),
((3, 4, 5), (), False, "any", "bool"),
((3, 4, 5), (), True, "any", "bool"),
((3, 4, 5), (0, 1, 2), False, "any", "bool"),
((3, 4, 5), (0, 1, 2), True, "any", "bool"),
((3, 4, 5), (), False, "all", "bool"),
((3, 4, 5), (), True, "all", "bool"),
((3, 4, 5), (0, 1, 2), False, "all", "bool"),
((3, 4, 5), (0, 1, 2), True, "all", "bool"),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(in_shape, axis, keepdims, reduce_type, dtype):
# Test
if dtype == "bool":
in_npy_map = in_npy = np.random.choice([True, False], size=in_shape)
else:
in_npy = np.random.uniform(-1, 1, size=in_shape).astype(dtype)
in_npy_map = np.sqrt(np.exp(in_npy)).astype(dtype)
if reduce_type == "sum":
if dtype == "bool":
out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims, dtype="bool")
else:
out_npy = in_npy_map.sum(axis=axis, keepdims=keepdims)
elif reduce_type == "prod":
out_npy = in_npy_map.prod(axis=axis, keepdims=keepdims)
elif reduce_type == "all" and dtype == "bool":
out_npy = in_npy_map.all(axis=axis, keepdims=keepdims)
elif reduce_type == "any" and dtype == "bool":
out_npy = in_npy_map.any(axis=axis, keepdims=keepdims)
elif reduce_type == "max":
out_npy = in_npy_map.max(axis=axis, keepdims=keepdims)
elif reduce_type == "min":
out_npy = in_npy_map.min(axis=axis, keepdims=keepdims)
elif reduce_type == "argmax":
out_npy = _my_npy_argmax(in_npy_map, axis=axis, keepdims=keepdims)
elif reduce_type == "argmin":
out_npy = _my_npy_argmin(in_npy_map, axis=axis, keepdims=keepdims)
else:
raise NotImplementedError
return in_npy, in_npy_map, out_npy
def _my_npy_argmax(arr, axis, keepdims):
if not keepdims:
return arr.argmax(axis=axis)
else:
if axis is None:
out_shape = [1 for _ in arr.shape]
else:
out_shape = list(arr.shape)
out_shape[axis] = 1
return arr.argmax(axis=axis).reshape(out_shape)
def _my_npy_argmin(arr, axis, keepdims):
if not keepdims:
return arr.argmin(axis=axis)
else:
if axis is None:
out_shape = [1 for _ in arr.shape]
else:
out_shape = list(arr.shape)
out_shape[axis] = 1
return arr.argmin(axis=axis).reshape(out_shape)
def test_reduce_map(target, dev, ref_data, in_shape, axis, keepdims, reduce_type, dtype):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and reduce_type in ["sum", "prod", "any", "all"]:
pytest.xfail(f"Vulkan backend has known errors on {reduce_type}")
in_npy, in_npy_map, out_npy = ref_data
# Build the logic and compile the function
A = te.placeholder(shape=in_shape, name="A", dtype=dtype)
A1 = topi.sqrt(topi.exp(A))
out_dtype = dtype
if reduce_type == "sum":
if dtype == "bool":
B = topi.sum(A, axis=axis, keepdims=keepdims)
else:
B = topi.sum(A1, axis=axis, keepdims=keepdims)
elif reduce_type == "prod":
B = topi.prod(A1, axis=axis, keepdims=keepdims)
elif reduce_type == "all":
B = topi.all(A, axis=axis, keepdims=keepdims)
elif reduce_type == "any":
B = topi.any(A, axis=axis, keepdims=keepdims)
elif reduce_type == "max":
B = topi.max(A1, axis=axis, keepdims=keepdims)
elif reduce_type == "min":
B = topi.min(A1, axis=axis, keepdims=keepdims)
elif reduce_type == "argmax":
B = topi.argmax(A1, axis=axis, keepdims=keepdims)
out_dtype = "int32"
elif reduce_type == "argmin":
B = topi.argmin(A1, axis=axis, keepdims=keepdims)
out_dtype = "int32"
else:
raise NotImplementedError
with tvm.target.Target(target):
s = tvm.topi.testing.get_reduce_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name=reduce_type)
data_tvm = tvm.nd.array(in_npy, device=dev)
out_tvm = tvm.nd.empty(shape=out_npy.shape, device=dev, dtype=out_dtype)
foo(data_tvm, out_tvm)
if reduce_type == "argmax" or reduce_type == "argmin":
out_tvm_indices = out_tvm.numpy()
if keepdims:
out_tvm_indices = np.take(out_tvm_indices, indices=0, axis=axis)
if axis is None:
out_tvm_val = in_npy_map.ravel()[out_tvm_indices]
else:
other_indices = tuple(np.indices(in_shape[0:axis] + in_shape[(axis + 1) :]))
sel_indices = other_indices[0:axis] + (out_tvm_indices,) + other_indices[axis:]
out_tvm_val = in_npy_map[sel_indices]
if reduce_type == "argmax":
tvm.testing.assert_allclose(out_tvm_val, in_npy_map.max(axis=axis), 1e-3, 1e-3)
elif reduce_type == "argmin":
tvm.testing.assert_allclose(out_tvm_val, in_npy_map.min(axis=axis), 1e-3, 1e-3)
else:
tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, 1e-3, 1e-3)
def test_complex_reduce(target, dev):
in_shape = (2, 3)
dtype = "float32"
axis = 0
keepdims = False
A = te.placeholder(shape=in_shape, name="A", dtype=dtype)
B = topi.sum(A, axis=axis, keepdims=keepdims)
C = topi.add(B, B)
D = topi.multiply(B, B)
E = topi.add(C, D)
with tvm.target.Target(target):
s = tvm.topi.testing.get_reduce_schedule(target)(E)
foo = tvm.build(s, [A, E], target, name="sum")
in_npy = np.random.uniform(-1, 1, size=in_shape).astype(dtype)
sum_npy = in_npy.sum(axis=axis, keepdims=keepdims)
out_npy = sum_npy * 2 + sum_npy * sum_npy
data_tvm = tvm.nd.array(in_npy, device=dev)
out_tvm = tvm.nd.empty(shape=out_npy.shape, device=dev, dtype=dtype)
foo(data_tvm, out_tvm)
tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, 1e-3, 1e-3)
n = tir.Var("n", "int32")
m = tir.Var("m", "int32")
true_value_map = {n: 3, m: 5}
data_shape, target_shape = tvm.testing.parameters(
((2, 3), (3,)),
((2, 3, 4), (2, 1, 4)),
((2, 3, 4, 5), (3, 1, 5)),
((2, n, 4, m), (n, 1, m)),
)
def _my_npy_collapse_sum(data, target_shape):
reduce_axes = []
i = data.ndim - 1
j = len(target_shape) - 1
while i >= 0:
if j < 0:
reduce_axes.append(i)
elif target_shape[j] == 1 and data.shape[i] > 1:
reduce_axes.append(i)
i -= 1
j -= 1
return np.sum(data, tuple(reduce_axes)).reshape(target_shape)
def test_collapse_sum(data_shape, target_shape):
A = te.placeholder(data_shape, name="A")
B = topi.collapse_sum(A, target_shape)
s = te.create_schedule([B.op])
data_shape_const = [int(s) if s not in true_value_map else true_value_map[s] for s in A.shape]
target_shape_const = [
int(s) if s not in true_value_map else true_value_map[s] for s in target_shape
]
a_np = np.random.uniform(size=data_shape_const).astype(A.dtype)
b_np = _my_npy_collapse_sum(a_np, target_shape_const)
dev = tvm.cpu(0)
a = tvm.nd.array(a_np, dev)
B_shape_const = [int(s) if s not in true_value_map else true_value_map[s] for s in B.shape]
b = tvm.nd.array(np.zeros(B_shape_const, dtype=B.dtype), dev)
# Building with the CSE pass disabled
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
foo = tvm.build(s, [A, B], "llvm", name="collapse_sum")
foo(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 10,398 | 36.814545 | 98 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for dense operator"""
import contextlib
import numpy as np
import pytest
import sys
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.topi.utils import get_const_tuple
from common import Int8Fallback
random_seed = tvm.testing.parameter(0)
use_bias = tvm.testing.parameter(True, False)
batch_size = tvm.testing.parameter(1, 2, 128)
in_dim, out_dim = tvm.testing.parameters((1024, 1000))
in_dtype, out_dtype = tvm.testing.parameters(
("float32", "float32"),
("float16", "float16"),
("int8", "int32"),
)
_dense_implementations = {
"generic": [(topi.nn.dense, topi.generic.schedule_dense)],
"cpu": [
(topi.x86.dense_nopack, topi.x86.schedule_dense_nopack),
(topi.x86.dense_pack, topi.x86.schedule_dense_pack),
(topi.x86.dense_dynamic, topi.x86.schedule_dense_dynamic),
],
"gpu": [
(topi.gpu.dense_small_batch, topi.gpu.schedule_dense_small_batch),
(topi.gpu.dense_large_batch, topi.gpu.schedule_dense_large_batch),
],
"mali": [(topi.mali.dense, topi.mali.schedule_dense)],
"bifrost": [(topi.bifrost.dense, topi.bifrost.schedule_dense)],
"hls": [(topi.nn.dense, topi.hls.schedule_dense)],
}
@tvm.testing.fixture(cache_return_value=True)
def dense_ref_data(random_seed, batch_size, in_dim, out_dim, use_bias, in_dtype, out_dtype):
np.random.seed(random_seed)
if "float" in in_dtype:
a_np = np.random.uniform(size=(batch_size, in_dim)).astype(in_dtype)
b_np = np.random.uniform(size=(out_dim, in_dim)).astype(in_dtype)
c_np = np.random.uniform(size=(out_dim,)).astype(out_dtype)
elif in_dtype == "int8":
a_np = np.random.randint(low=-128, high=127, size=(batch_size, in_dim)).astype(in_dtype)
b_np = np.random.randint(low=-128, high=127, size=(out_dim, in_dim)).astype(in_dtype)
c_np = np.random.randint(low=-128, high=127, size=(out_dim,)).astype(out_dtype)
else:
raise ValueError("No method to generate test data for data type '{}'".format(in_dtype))
matmul = np.dot(a_np.astype(out_dtype), b_np.T.astype(out_dtype))
if use_bias:
matmul += c_np
d_np = np.maximum(matmul, 0)
return (a_np, b_np, c_np, d_np)
def test_dense(
target,
dev,
batch_size,
in_dim,
out_dim,
use_bias,
dense_ref_data,
in_dtype,
out_dtype,
implementations=None,
):
target = tvm.target.Target(target)
if target.kind.name == "cuda":
if in_dtype == "int8" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
pytest.xfail("CUDA int8 intrinsics not available")
if in_dtype == "float16" and not tvm.contrib.nvcc.have_fp16(dev.compute_version):
pytest.xfail("CUDA float16 intrinsics not available")
if target.kind.name == "vulkan":
if in_dtype == "int8" and (
not target.attrs.get("supports_int8", False)
or not target.attrs.get("supports_8bit_buffer", False)
):
pytest.xfail("Vulkan int8 driver support not available")
if in_dtype == "float16" and (
not target.attrs.get("supports_float16", False)
or not target.attrs.get("supports_16bit_buffer", False)
):
pytest.xfail("Vulkan float16 driver support not available")
if (
target.kind.name not in ["llvm", "c"]
and len(set(target.keys) & set(_dense_implementations)) == 0
):
pytest.xfail("No implementation for tvm.topi.testing.dispatch to find")
if "int" in in_dtype:
tol = {"atol": 0, "rtol": 0}
elif in_dtype == "float32":
tol = {"rtol": 1e-5, "atol": 1e-5}
elif in_dtype == "float16":
tol = {"rtol": 5e-2, "atol": 1e-5}
A = te.placeholder((batch_size, in_dim), name="A", dtype=in_dtype)
B = te.placeholder((out_dim, in_dim), name="B", dtype=in_dtype)
C = te.placeholder((out_dim,), name="C", dtype=out_dtype)
a_np, b_np, c_np, d_np = dense_ref_data
if implementations is None:
implementations = tvm.topi.testing.dispatch(target, _dense_implementations)
for fcompute, fschedule in implementations:
if fcompute == topi.x86.dense_dynamic and (batch_size != 1 or in_dtype != "float32"):
continue
with tvm.target.Target(target):
D = fcompute(A, B, C if use_bias else None, out_dtype)
D = topi.nn.relu(D)
s = fschedule([D])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=out_dtype), dev)
f = tvm.build(s, [A, B, C, D], target, name="dense")
f(a, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, **tol)
@pytest.mark.parametrize("target,in_dtype,out_dtype", [("cuda", "int8", "int32")])
def test_dense_cuda_int8(
target,
dev,
batch_size,
in_dim,
out_dim,
use_bias,
dense_ref_data,
in_dtype,
out_dtype,
):
implementations = [
(topi.cuda.dense_int8, topi.cuda.schedule_dense_int8),
]
with Int8Fallback():
test_dense(
target,
dev,
batch_size,
in_dim,
out_dim,
use_bias,
dense_ref_data,
in_dtype,
out_dtype,
implementations=implementations,
)
if __name__ == "__main__":
tvm.testing.main()
| 6,251 | 32.255319 | 96 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_depthwise_conv2d_back_weight.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
import numpy as np
from tvm.contrib.pickle_memoize import memoize
from scipy import signal
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.cuda.depthwise_conv2d import schedule_depthwise_conv2d_backward_weight_nhwc
import tvm.testing
def verify_depthwise_conv2d_back_weight(
batch, in_channel, in_h, channel_multiplier, filter_h, stride_h, padding_h
):
in_w = in_h
filter_channel = in_channel
filter_w = filter_h
stride_w = stride_h
padding_w = padding_h
out_h = int((in_h + 2 * padding_h - filter_h) / stride_h + 1)
out_w = int((in_w + 2 * padding_w - filter_w) / stride_w + 1)
out_channel = in_channel * channel_multiplier
oshape = [batch, out_h, out_w, out_channel]
fshape = [filter_h, filter_w, in_channel, channel_multiplier]
# placeholder
Out_grad = te.placeholder(oshape, name="Out_grad")
Input = te.placeholder((batch, in_h, in_w, in_channel), name="In_grad")
# declare
Weight_grad = topi.nn.depthwise_conv2d_backward_weight_nhwc(
Input, Out_grad, oshape, fshape, stride=[stride_h, stride_w], padding=[padding_h, padding_w]
)
# schedule
schedule = schedule_depthwise_conv2d_backward_weight_nhwc(Weight_grad)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
# build the kernel
f = tvm.build(schedule, [Input, Out_grad, Weight_grad], device)
# prepare pod type for test data closure
dtype = Out_grad.dtype
out_grad_shape = get_const_tuple(Out_grad.shape)
in_shape = get_const_tuple(Input.shape)
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_depthwise_conv2d_backward_weight.nhwc")
def get_ref_data():
out_grad_np = np.random.uniform(size=out_grad_shape).astype(dtype)
input_np = np.random.uniform(size=in_shape).astype(dtype)
dilated_out_grad_np = tvm.topi.testing.dilate_python(
out_grad_np, [1, stride_h, stride_w, 1]
)
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(
[padding_h, padding_w], (filter_h, filter_w)
)
padded_input_np = np.zeros(
(batch, in_h + pad_top + pad_bottom, in_w + pad_left + pad_right, in_channel)
)
padded_input_np[:, pad_top : in_h + pad_top, pad_left : in_w + pad_left, :] = input_np
weight_grad_np = np.zeros((filter_h, filter_w, in_channel, channel_multiplier))
for c in range(in_channel):
for m in range(channel_multiplier):
for b in range(batch):
weight_grad_np[:, :, c, m] += signal.convolve2d(
padded_input_np[b, :, :, c],
np.rot90(
dilated_out_grad_np[
b, :, :, c * channel_multiplier + m % channel_multiplier
],
2,
),
mode="valid",
)[0:filter_h, 0:filter_w]
return (out_grad_np, input_np, weight_grad_np)
(out_grad_np, input_np, weight_grad_np) = get_ref_data()
out_grad_tvm = tvm.nd.array(out_grad_np, dev)
input_tvm = tvm.nd.array(input_np, dev)
weight_grad_tvm = tvm.nd.array(np.zeros(shape=fshape, dtype=dtype), dev)
# launch the kernel
timer = f.time_evaluator(f.entry_name, dev, number=1)
tcost = timer(input_tvm, out_grad_tvm, weight_grad_tvm).mean
tvm.testing.assert_allclose(weight_grad_np, weight_grad_tvm.numpy(), rtol=1e-4)
check_device("opencl")
check_device("cuda")
check_device("metal")
check_device("rocm")
check_device("vulkan")
check_device("nvptx")
@tvm.testing.requires_gpu
def test_topi_depthwise_conv2d_backward_weight_nhwc():
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 3, 1, 1)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 3, 1, 1)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 5, 1, 2)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 5, 1, 2)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 3, 2, 1)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 3, 2, 1)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 5, 2, 2)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 5, 2, 2)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 3, 1, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 3, 1, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 5, 1, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 5, 1, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 3, 2, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 2, 3, 2, 0)
verify_depthwise_conv2d_back_weight(16, 256, 56, 1, 5, 2, 0)
verify_depthwise_conv2d_back_weight(15, 256, 56, 2, 5, 2, 0)
if __name__ == "__main__":
test_topi_depthwise_conv2d_backward_weight_nhwc()
| 6,153 | 42.034965 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_unique.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
import tvm.testing
from tvm import topi
import tvm.topi.testing
in_dtype = tvm.testing.parameter("int32", "int64")
is_sorted = tvm.testing.parameter(True, False, ids=["sorted", "unsorted"])
with_counts = tvm.testing.parameter(True, False, ids=["with_counts", "no_counts"])
arr_size, maxval = tvm.testing.parameters((1, 100), (10, 10), (10000, 100))
@tvm.testing.parametrize_targets
def test_unique(dev, target, in_dtype, is_sorted, with_counts, arr_size, maxval):
def calc_numpy_unique(data, is_sorted=False):
uniq, index, inverse, counts = np.unique(
data, return_index=True, return_inverse=True, return_counts=True
)
num_uniq = np.array([len(uniq)]).astype("int32")
if not is_sorted:
order = np.argsort(index)
index = np.sort(index)
reverse_order = np.argsort(order)
uniq = uniq[order].astype(data.dtype)
inverse = np.array([reverse_order[i] for i in inverse]).astype("int32")
counts = counts[order].astype("int32")
return [
uniq.astype(data.dtype),
index.astype("int32"),
inverse.astype("int32"),
counts,
num_uniq,
]
data = np.random.randint(0, maxval, size=(arr_size)).astype(in_dtype)
# numpy reference
np_unique, np_indices, np_inverse_indices, np_counts, np_num_unique = calc_numpy_unique(
data, is_sorted
)
num_unique = np_num_unique[0]
implementations = {
"generic": (
lambda x, return_counts: topi.unique(x, is_sorted, return_counts),
topi.generic.schedule_unique,
),
"gpu": (
lambda x, return_counts: topi.cuda.unique(x, is_sorted, return_counts),
topi.cuda.schedule_scan,
),
"nvptx": (
lambda x, return_counts: topi.cuda.unique(x, is_sorted, return_counts),
topi.cuda.schedule_scan,
),
}
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
tvm_data = tvm.nd.array(data, device=dev)
tvm_unique = tvm.nd.array(np.zeros(data.shape).astype(data.dtype), device=dev)
tvm_indices = tvm.nd.array(np.zeros(data.shape).astype("int32"), device=dev)
tvm_inverse_indices = tvm.nd.array(np.zeros(data.shape).astype("int32"), device=dev)
tvm_num_unique = tvm.nd.array(np.zeros([1]).astype("int32"), device=dev)
with tvm.target.Target(target):
te_input = tvm.te.placeholder(shape=data.shape, dtype=str(data.dtype))
outs = fcompute(te_input, with_counts)
s = fschedule(outs)
func = tvm.build(s, [te_input, *outs])
if with_counts:
tvm_counts = tvm.nd.array(np.zeros(data.shape).astype("int32"), device=dev)
func(
tvm_data,
tvm_unique,
tvm_indices,
tvm_inverse_indices,
tvm_num_unique,
tvm_counts,
)
else:
func(tvm_data, tvm_unique, tvm_indices, tvm_inverse_indices, tvm_num_unique)
num_unique = np_num_unique[0]
assert tvm_num_unique.numpy()[0] == np_num_unique
np.testing.assert_allclose(tvm_unique.numpy()[:num_unique], np_unique, atol=1e-5, rtol=1e-5)
np.testing.assert_allclose(tvm_indices.numpy()[:num_unique], np_indices, atol=1e-5, rtol=1e-5)
np.testing.assert_allclose(
tvm_inverse_indices.numpy(), np_inverse_indices, atol=1e-5, rtol=1e-5
)
if with_counts:
np.testing.assert_allclose(tvm_counts.numpy()[:num_unique], np_counts, atol=1e-5, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 4,469 | 37.869565 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_argwhere.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test for argwhere operator"""
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm import topi
import tvm.topi.testing
_argwhere_schedule = {
"generic": topi.generic.schedule_argwhere,
"gpu": topi.cuda.schedule_argwhere,
}
_argwhere_compute = {"llvm": topi.argwhere, "cuda": topi.cuda.argwhere}
data_shape = tvm.testing.parameter(
(1,),
(100,),
(1, 1),
(5, 3),
(32, 64),
(128, 65),
(200, 500),
(6, 5, 3),
(1, 1, 1),
(1, 1, 1, 1),
(6, 4, 5, 3),
(1, 1, 1, 1, 1),
(6, 4, 5, 3, 7),
)
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_argwhere(target, dev, data_shape):
dtype = "int32"
np_data = np.random.choice([0, 1, 2, 3], size=data_shape).astype(dtype)
np_out = np.argwhere(np_data)
out_shape = np_out.shape[0]
np_shape = np.ones(shape=(out_shape, len(data_shape)), dtype=dtype)
out_shape = te.placeholder(shape=(out_shape, len(data_shape)), name="out_shape", dtype=dtype)
condition = te.placeholder(shape=data_shape, name="condition", dtype=dtype)
with tvm.target.Target(target):
out = _argwhere_compute[target](out_shape, condition)
s_func = tvm.topi.testing.dispatch(target, _argwhere_schedule)
sch = s_func(out)
func = tvm.build(sch, [out_shape, condition, out], target, name="argwhere")
args = [tvm.nd.array(np_shape, dev)]
args.append(tvm.nd.array(np_data, dev))
args.append(tvm.nd.empty(out.shape, device=dev, dtype=condition.dtype))
func(*args)
np.set_printoptions(threshold=np.inf)
tvm_out = args[-1].numpy()
tvm.testing.assert_allclose(tvm_out, np_out)
if __name__ == "__main__":
tvm.testing.main()
| 2,508 | 29.975309 | 97 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_group_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do group convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
from common import Int8Fallback
import tvm.testing
def _transform_data(data, bn):
# NCHW -> NCHW[x]c
batch_size, channel, height, width = data.shape
data = np.reshape(data, (batch_size, channel // bn, bn, height, width))
data = np.transpose(data, (0, 1, 3, 4, 2))
return data
def _transform_kernel(kernel, ic_bn, oc_bn):
# OIHW -> OIHW[x]o[x]i
out_channel, in_channel, kh, kw = kernel.shape
kernel = np.reshape(kernel, (out_channel // oc_bn, oc_bn, in_channel // ic_bn, ic_bn, kh, kw))
kernel = np.transpose(kernel, (0, 2, 4, 5, 1, 3))
return kernel
_group_conv2d_nchw_implement = {
"generic": (topi.nn.group_conv2d_nchw, topi.generic.schedule_group_conv2d_nchw),
"gpu": (topi.cuda.group_conv2d_nchw, topi.cuda.schedule_group_conv2d_nchw),
}
_group_conv2d_nhwc_implement = {
"generic": (topi.nn.group_conv2d_nhwc, topi.generic.schedule_group_conv2d_nhwc),
}
def verify_group_conv2d_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
W = te.placeholder((num_filter, in_channel // groups, kernel, kernel), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _group_conv2d_nchw_implement)
C = fcompute(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target in ["llvm", "cuda"]:
check_target(target)
oc_block_factor = 4
ic_block_factor = 4
def verify_group_conv2d_NCHWc_int8(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder(
(batch, in_channel // ic_block_factor, in_height, in_width, ic_block_factor),
name="A",
dtype="int8",
)
W = te.placeholder(
(
num_filter // oc_block_factor,
(in_channel // groups) // ic_block_factor,
kernel,
kernel,
oc_block_factor,
ic_block_factor,
),
name="W",
dtype="int8",
)
bias = te.placeholder(
(num_filter // oc_block_factor, 1, 1, oc_block_factor), name="bias", dtype="int8"
)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_NCHWc_int8")
def get_ref_data():
a_np = np.random.randint(
low=-128, high=127, size=(batch, in_channel, in_height, in_width)
).astype(dtype)
w_np = np.random.randint(
low=-128, high=128, size=(num_filter, in_channel // groups, kernel, kernel)
).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
# convert to NCHWc
_, _, out_height, out_width = c_np.shape
c_np = c_np.reshape(
(batch, num_filter // oc_block_factor, oc_block_factor, out_height, out_width)
).transpose(0, 1, 3, 4, 2)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return (
_transform_data(a_np, ic_block_factor),
_transform_kernel(w_np, ic_block_factor, oc_block_factor),
b_np,
c_np,
)
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
if target == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
print("Skip because int8 intrinsics are not available")
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
C = topi.cuda.group_conv2d_NCHWc_int8(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.cuda.schedule_group_conv2d_NCHWc_int8([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target in ["cuda"]:
check_target(target)
def verify_group_conv2d_nchw_int8(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype="int8")
W = te.placeholder((num_filter, in_channel // groups, kernel, kernel), name="W", dtype="int8")
bias = te.placeholder(
(num_filter // oc_block_factor, 1, 1, oc_block_factor), name="bias", dtype="int8"
)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_nchw_int8")
def get_ref_data():
a_np = np.random.randint(low=-128, high=127, size=a_shape).astype(dtype)
w_np = np.random.randint(low=-128, high=128, size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
# convert to NCHWc
_, _, out_height, out_width = c_np.shape
c_np = c_np.reshape(
(batch, num_filter // oc_block_factor, oc_block_factor, out_height, out_width)
).transpose(0, 1, 3, 4, 2)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
if target == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
print("Skip because int8 intrinsics are not available")
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
C = topi.cuda.group_conv2d_NCHWc_int8(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = topi.cuda.schedule_group_conv2d_NCHWc_int8([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target in ["cuda"]:
check_target(target)
def verify_group_conv2d_nhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
add_bias=False,
add_relu=False,
):
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation, groups)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name="A")
W = te.placeholder((kernel, kernel, in_channel // groups, num_filter), name="W")
bias = te.placeholder((1, 1, num_filter), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_group_conv2d.verify_group_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding, groups).astype(
dtype
)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _group_conv2d_nhwc_implement)
C = fcompute(A, W, stride, padding, dilation, groups, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for target in ["llvm"]:
check_target(target)
@tvm.testing.uses_gpu
def test_group_conv2d_nchw():
# ResNeXt-50 workload
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
# bias, relu
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True)
# dilation
verify_group_conv2d_nchw(1, 128, 56, 128, 3, 1, 1, 2, 32)
# batch size
verify_group_conv2d_nchw(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw(9, 128, 56, 128, 3, 1, 1, 1, 32)
@tvm.testing.requires_cuda
def test_group_conv2d_NCHWc_int8():
with Int8Fallback():
# ResNeXt-50 workload
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
# bias, relu
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_NCHWc_int8(
1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True
)
# dilation
verify_group_conv2d_NCHWc_int8(1, 128, 56, 128, 3, 1, 1, 2, 32)
# batch size
verify_group_conv2d_NCHWc_int8(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_NCHWc_int8(9, 128, 56, 128, 3, 1, 1, 1, 32)
@tvm.testing.requires_cuda
def test_group_conv2d_nchw_int8():
with Int8Fallback():
# ResNeXt-50 workload
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_nchw_int8(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
# bias, relu
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True)
# dilation
verify_group_conv2d_nchw_int8(1, 128, 56, 128, 3, 1, 1, 2, 32)
# batch size
verify_group_conv2d_nchw_int8(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nchw_int8(9, 128, 56, 128, 3, 1, 1, 1, 32)
def test_group_conv2d_nhwc():
# ResNeXt-50 workload
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nhwc(1, 256, 56, 256, 3, 2, 1, 1, 32)
verify_group_conv2d_nhwc(1, 256, 28, 256, 3, 1, 1, 1, 32)
verify_group_conv2d_nhwc(1, 512, 28, 512, 3, 2, 1, 1, 32)
verify_group_conv2d_nhwc(1, 512, 14, 512, 3, 1, 1, 1, 32)
verify_group_conv2d_nhwc(1, 1024, 14, 1024, 3, 2, 1, 1, 32)
verify_group_conv2d_nhwc(1, 1024, 7, 1024, 3, 1, 1, 1, 32)
# bias, relu
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True)
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 1, 32, add_bias=True)
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 1, 32, add_relu=True, add_bias=True)
# dilation
verify_group_conv2d_nhwc(1, 128, 56, 128, 3, 1, 1, 2, 32)
# batch size
verify_group_conv2d_nhwc(2, 128, 56, 128, 3, 1, 1, 1, 32)
verify_group_conv2d_nhwc(9, 128, 56, 128, 3, 1, 1, 1, 32)
if __name__ == "__main__":
test_group_conv2d_nchw()
test_group_conv2d_NCHWc_int8()
test_group_conv2d_nchw_int8()
test_group_conv2d_nhwc()
| 21,933 | 32.283763 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_bitserial_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for bitserial_dense operator"""
import os
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
_bitserial_dense_implement = {
"generic": (topi.nn.bitserial_dense, topi.generic.schedule_bitserial_dense),
"cpu": (topi.x86.bitserial_dense, topi.x86.schedule_bitserial_dense),
"arm_cpu": (topi.arm_cpu.bitserial_dense, topi.arm_cpu.schedule_bitserial_dense),
}
def generate_quantized_np(shape, bits, out_dtype):
min_val = 0
max_val = 1 << bits
return np.random.randint(min_val, max_val, size=shape).astype(out_dtype)
def verify_bitserial_dense(batch, in_dim, out_dim, activation_bits, weight_bits, unipolar):
out_dtype = "int16"
def get_ref_data(a_shape, b_shape, input_dtype):
a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_dtype)
b_np = generate_quantized_np(get_const_tuple(b_shape), weight_bits, input_dtype)
if unipolar:
b_ = np.copy(b_np).astype(out_dtype)
for x in np.nditer(b_, op_flags=["readwrite"]):
x[...] = 1 if x == 1 else -1
c_np = np.dot(a_np, b_.T)
else:
c_np = np.dot(a_np, b_np.T)
return a_np, b_np, c_np
for target in ["llvm", "llvm -device=arm_cpu"]:
if "arm_cpu" in target and "arm" not in os.uname()[4]:
print("Skipped running code, not an arm device")
continue
input_dtype = "uint8" if "arm_cpu" in target else "uint32"
A = te.placeholder((batch, in_dim), dtype=input_dtype, name="A")
B = te.placeholder((out_dim, in_dim), dtype=input_dtype, name="B")
fcompute, fschedule = tvm.topi.testing.dispatch(target, _bitserial_dense_implement)
C = fcompute(A, B, activation_bits, weight_bits, input_dtype, out_dtype, unipolar)
s = fschedule([C])
a_shape = get_const_tuple(A.shape)
b_shape = get_const_tuple(B.shape)
a_np, b_np, c_np = get_ref_data(a_shape, b_shape, input_dtype)
dev = tvm.cpu(0)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func = tvm.build(s, [A, B, C], target)
func(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
def test_bitserial_dense():
verify_bitserial_dense(1, 1024, 1000, 1, 1, True)
verify_bitserial_dense(1, 1024, 1000, 2, 1, True)
verify_bitserial_dense(1, 1024, 1000, 1, 1, False)
verify_bitserial_dense(1, 1024, 1000, 2, 1, False)
if __name__ == "__main__":
test_bitserial_dense()
| 3,539 | 38.333333 | 92 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_lstm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Test code for LSTM."""
import numpy as np
from rsa import verify
import tvm
from tvm import te, topi
import tvm.testing
import tvm.topi.testing
def verify_lstm(
target,
dev,
seq_len,
batch_size,
in_dim,
hidden_dim,
proj_dim=0,
bias=True,
zero_init=True,
peephole=False,
reverse=False,
weight_layout="IFGO",
):
out_dim = proj_dim if proj_dim > 0 else hidden_dim
def rand(*shape):
sqrt_k = np.sqrt(1 / hidden_dim)
return np.random.uniform(-sqrt_k, sqrt_k, size=shape).astype("float32")
def get_ref_data():
Xs = np.random.normal(size=(seq_len, batch_size, in_dim)).astype("float32")
Wi = rand(4 * hidden_dim, in_dim)
Wh = rand(4 * hidden_dim, out_dim)
Bi = None
Bh = None
h0 = None
c0 = None
proj = None
p_i = None
p_f = None
p_o = None
if bias:
Bi = rand(4 * hidden_dim)
Bh = rand(4 * hidden_dim)
if not zero_init:
h0 = np.random.normal(size=(batch_size, out_dim)).astype("float32")
c0 = np.random.normal(size=(batch_size, hidden_dim)).astype("float32")
if proj_dim > 0:
proj = rand(proj_dim, hidden_dim)
if peephole:
p_i, p_f, p_o = [rand(batch_size, hidden_dim) for _ in range(3)]
hs, cs = tvm.topi.testing.lstm_python(
Xs,
Wi,
Wh,
Bi=Bi,
Bh=Bh,
h_init=h0,
c_init=c0,
proj=proj,
p_i=p_i,
p_f=p_f,
p_o=p_o,
reverse=reverse,
weight_layout=weight_layout,
)
return [Xs, Wi, Wh, Bi, Bh, h0, c0, proj, p_i, p_f, p_o], [hs, cs]
args_np, (hs_np, cs_np) = get_ref_data()
args = [te.placeholder(a.shape, "float32") if a is not None else a for a in args_np]
real_args = [a for a in args if a is not None]
hs, cs = topi.nn.lstm(*args, reverse=reverse, weight_layout=weight_layout)
with tvm.target.Target(target):
sch = topi.generic.schedule_lstm([hs, cs])
func = tvm.build(sch, real_args + [hs, cs], target=target)
args_nd = [tvm.nd.array(a, dev) for a in args_np if a is not None]
hs_nd = tvm.nd.array(np.zeros((seq_len, batch_size, out_dim), "float32"), dev)
cs_nd = tvm.nd.array(np.zeros((seq_len, batch_size, hidden_dim), "float32"), dev)
func(*args_nd, hs_nd, cs_nd)
tvm.testing.assert_allclose(hs_nd.numpy(), hs_np, rtol=1e-4)
tvm.testing.assert_allclose(cs_nd.numpy(), cs_np, rtol=1e-4)
def test_lstm():
verify_lstm(
"llvm",
tvm.cpu(0),
1,
1,
1,
1,
0,
True,
True,
False,
False,
"IFGO",
)
verify_lstm(
"llvm",
tvm.cpu(0),
8,
4,
8,
16,
0,
True,
False,
False,
False,
"IFGO",
)
def test_lstm_proj():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 8, True, True, False, False, "IFGO")
def test_lstm_peephole():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 0, True, True, True, False, "IFGO")
def test_lstm_reverse():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 0, True, True, False, True, "IFGO")
def test_lstm_weight_layout_iofg():
# IOFG is used by ONNX, while IFGO is used by PyTorch
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 0, True, True, False, False, "IOFG")
def test_lstm_assorted():
verify_lstm("llvm", tvm.cpu(0), 8, 4, 16, 32, 16, True, False, True, True, "OIGF")
| 4,454 | 26.5 | 88 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_einsum.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import te
from tvm import topi
from tvm.topi.utils import get_const_tuple
def with_tvm(lam, shapes, ops, out_shape):
"""Take numpy arrays as args, convert them to TVM tensors and call `lam`.
Result of lambda is converted back to numpy array and returned.
"""
dev = tvm.cpu(0)
pls = [] # placeholders
vals_nd = [] # initial values
for i, (shape, arg) in enumerate(zip(shapes, ops)):
pls.append(te.placeholder(shape, name="pl" + str(i)))
vals_nd.append(tvm.nd.array(arg, dev))
out = lam(*pls)
out_nd = tvm.nd.array(np.zeros(out_shape).astype(out.dtype), device=dev)
s = te.create_schedule([out.op])
m = tvm.build(s, pls + [out], "llvm")
m(*(vals_nd + [out_nd]))
return out_nd.numpy()
def verify_einsum(subscripts, shapes, shape_dict={}):
ops = [] # ndarrays to be used as inputs
symbolic_shapes = [] # shapes to declare the placeholders
name_to_var = {}
def get_concrete_shape(shape):
return [shape_dict[s] if isinstance(s, str) else s for s in shape]
def get_symblic_shape_var(name, dtype="int32"):
if name not in name_to_var:
name_to_var[name] = te.var(name, dtype=dtype)
return name_to_var[name]
def get_symbolic_shape(shape):
return [get_symblic_shape_var(s) if isinstance(s, str) else s for s in shape]
for shape in shapes:
concrete_shape = get_concrete_shape(shape)
tmp = np.random.uniform(low=-1.0, high=1.0, size=concrete_shape).astype(np.float32)
ops.append(tmp)
symbolic_shape = get_symbolic_shape(shape)
symbolic_shapes.append(symbolic_shape)
c1 = np.einsum(subscripts, *ops)
out_shape = c1.shape
if len(ops) == 1:
c2 = with_tvm(lambda A: topi.einsum(subscripts, A), symbolic_shapes, ops, out_shape)
elif len(ops) == 2:
c2 = with_tvm(lambda A, B: topi.einsum(subscripts, A, B), symbolic_shapes, ops, out_shape)
elif len(ops) == 3:
c2 = with_tvm(
lambda A, B, C: topi.einsum(subscripts, A, B, C), symbolic_shapes, ops, out_shape
)
tvm.testing.assert_allclose(c1, c2, rtol=1e-5, atol=1e-5)
@pytest.mark.parametrize(
"equation,inputs",
[
("ii", [(5, 5)]),
("ii->i", [(5, 5)]),
("ij->i", [(5, 5)]),
("...j->...", [(5, 5)]),
("...j, j", [(5, 5), (5,)]),
("..., ...", [(), (2, 3)]),
("ijk, jil->kl", [(3, 4, 5), (4, 3, 2)]),
("ij, ij -> i", [(1, 4), (2, 4)]),
("...ij, ...jk -> ...ik", [(1, 4), (4, 2)]),
("...ij, ...ik -> ...jk", [(1, 1, 1, 4), (1, 1, 1, 3)]),
("...ik, ...jk, ...hk -> i...jh", [(3, 4, 4), (1, 5, 3, 8, 4), (2, 5, 3, 6, 4)]),
("ij,jk->ik", [(2, 3), (3, 4)]),
("ij,jk,km->im", [(2, 3), (3, 4), (4, 5)]),
],
)
def test_einsum(equation, inputs):
verify_einsum(equation, inputs)
@pytest.mark.parametrize(
"equation,inputs,shape_dict",
[
("ij,jk->ik", [(2, "K"), (1, "N")], {"K": 3, "N": 4}),
("ij,jk->ik", [(2, "K"), ("K2", "N")], {"K": 3, "N": 4, "K2": 3}),
("ij,jk->ik", [(2, "K"), ("K2", "N")], {"K": 3, "N": 4, "K2": 1}),
],
)
def test_einsum_symblic_shape(equation, inputs, shape_dict):
verify_einsum(equation, inputs, shape_dict)
if __name__ == "__main__":
tvm.testing.main()
| 4,190 | 34.218487 | 98 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_sort.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for vision package"""
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
_sort_implement = {
"generic": (topi.sort, topi.generic.schedule_sort),
"gpu": (topi.cuda.sort, topi.cuda.schedule_sort),
}
_argsort_implement = {
"generic": (topi.argsort, topi.generic.schedule_argsort),
"gpu": (topi.cuda.argsort, topi.cuda.schedule_argsort),
}
_topk_implement = {
"generic": (topi.topk, topi.generic.schedule_topk),
"gpu": (topi.cuda.topk, topi.cuda.schedule_topk),
}
axis = tvm.testing.parameter(0, -1, 1)
is_ascend = tvm.testing.parameter(True, False, ids=["is_ascend", "not_ascend"])
dtype = tvm.testing.parameter("int64", "float32")
topk = tvm.testing.parameter(0, 1, 5)
topk_ret_type = tvm.testing.parameter("values", "indices", "both")
def test_sort(target, dev, axis, is_ascend):
np.random.seed(0)
dshape = (20, 100)
data_dtype = "float32"
data = te.placeholder(dshape, name="data", dtype=data_dtype)
perm = np.arange(dshape[0] * dshape[1], dtype=data_dtype)
np.random.shuffle(perm)
np_data = perm.reshape(dshape)
if is_ascend:
np_sort = np.sort(np_data, axis=axis)
else:
np_sort = -np.sort(-np_data, axis=axis)
if axis == 0:
np_sort = np_sort[: dshape[axis], :]
else:
np_sort = np_sort[:, : dshape[axis]]
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _sort_implement)
out = fcompute(data, axis=axis, is_ascend=is_ascend)
s = fschedule(out)
tvm_data = tvm.nd.array(np_data, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data_dtype), dev)
f = tvm.build(s, [data, out], target)
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_sort, rtol=1e0)
def test_argsort(target, dev, axis, is_ascend):
dshape = (20, 100)
data_dtype = "float32"
data = te.placeholder(dshape, name="data", dtype=data_dtype)
perm = np.arange(dshape[0] * dshape[1], dtype=data_dtype)
np.random.shuffle(perm)
np_data = perm.reshape(dshape)
if is_ascend:
np_indices = np.argsort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
if axis == 0:
np_indices = np_indices[: dshape[axis], :]
else:
np_indices = np_indices[:, : dshape[axis]]
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _argsort_implement)
out = fcompute(data, axis=axis, is_ascend=is_ascend)
s = fschedule(out)
tvm_data = tvm.nd.array(np_data, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data_dtype), dev)
f = tvm.build(s, [data, out], target)
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_indices.astype(data_dtype), rtol=1e0)
def test_topk(target, dev, topk, axis, topk_ret_type, is_ascend, dtype):
np.random.seed(0)
shape = (20, 100)
data_dtype = "float32"
data = te.placeholder(shape, name="data", dtype=data_dtype)
np_data = np.random.uniform(size=shape).astype(data_dtype)
if is_ascend:
np_indices = np.argsort(np_data, axis=axis)
else:
np_indices = np.argsort(-np_data, axis=axis)
kk = topk if topk >= 1 else shape[axis]
if axis == 0:
np_indices = np_indices[:kk, :]
np_values = np.zeros(np_indices.shape).astype(data_dtype)
for i in range(shape[1]):
np_values[:, i] = np_data[np_indices[:, i], i]
else:
np_indices = np_indices[:, :kk]
np_values = np.zeros(np_indices.shape).astype(data_dtype)
for i in range(shape[0]):
np_values[i, :] = np_data[i, np_indices[i, :]]
np_indices = np_indices.astype(dtype)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _topk_implement)
outs = fcompute(data, topk, axis, topk_ret_type, is_ascend, dtype)
outs = outs if isinstance(outs, list) else [outs]
s = fschedule(outs)
tvm_data = tvm.nd.array(np_data, dev)
tvm_res = []
for t in outs:
tvm_res.append(tvm.nd.empty(t.shape, dtype=t.dtype, device=dev))
f = tvm.build(s, [data] + outs, target)
f(tvm_data, *tvm_res)
if topk_ret_type == "both":
tvm.testing.assert_allclose(tvm_res[0].numpy(), np_values)
tvm.testing.assert_allclose(tvm_res[1].numpy(), np_indices)
elif topk_ret_type == "values":
tvm.testing.assert_allclose(tvm_res[0].numpy(), np_values)
else:
tvm.testing.assert_allclose(tvm_res[0].numpy(), np_indices)
if __name__ == "__main__":
tvm.testing.main()
| 5,494 | 32.711656 | 89 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_bnn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for binary neural network operators."""
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm import topi
from tvm.topi.utils import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
def verify_binary_dense(batch, in_dim, out_dim):
A = te.placeholder((batch, in_dim), name="A")
B = te.placeholder((out_dim, in_dim), name="B")
bnn_A = topi.nn.binarize_pack(A)
bnn_B = topi.nn.binarize_pack(B)
# binary dense
bnn_A1 = te.placeholder(bnn_A.shape, dtype=bnn_A.dtype)
bnn_B1 = te.placeholder(bnn_B.shape, dtype=bnn_B.dtype)
bnn_C = topi.nn.binary_dense(bnn_A1, bnn_B1)
# schedule
with tvm.target.Target("llvm"):
s1 = topi.x86.schedule_binarize_pack(bnn_A)
s2 = topi.x86.schedule_binarize_pack(bnn_B)
s3 = topi.x86.schedule_binary_dense(bnn_C)
dtype = A.dtype
@memoize("topi.tests.test_topi_binary_dense")
def get_ref_data():
# generate random matrix of +1 or -1 value
a_np = (np.random.randint(2, size=(batch, in_dim)) * 2 - 1).astype(dtype)
b_np = (np.random.randint(2, size=(out_dim, in_dim)) * 2 - 1).astype(dtype)
c_np = np.dot(a_np, b_np.T)
return a_np, b_np, c_np
a_np, b_np, c_np = get_ref_data()
dev = tvm.cpu(0)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
bnn_a = tvm.nd.array(np.zeros(get_const_tuple(bnn_A.shape), dtype=bnn_A.dtype), dev)
bnn_b = tvm.nd.array(np.zeros(get_const_tuple(bnn_B.shape), dtype=bnn_B.dtype), dev)
bnn_c = tvm.nd.array(np.zeros(get_const_tuple(bnn_C.shape), dtype=bnn_C.dtype), dev)
f1 = tvm.build(s1, [A, bnn_A], "llvm")
f2 = tvm.build(s2, [B, bnn_B], "llvm")
f3 = tvm.build(s3, [bnn_A1, bnn_B1, bnn_C], "llvm")
f1(a, bnn_a)
f2(b, bnn_b)
f3(bnn_a, bnn_b, bnn_c)
tvm.testing.assert_allclose(bnn_c.numpy(), c_np, rtol=1e-5)
def test_binary_dense():
verify_binary_dense(1, 4096, 1024)
verify_binary_dense(1, 1024, 1000)
if __name__ == "__main__":
test_binary_dense()
| 2,837 | 36.342105 | 88 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_vision.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for vision package"""
import math
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.topi.vision import ssd, non_max_suppression, get_valid_counts
_get_valid_counts_implement = {
"generic": (topi.vision.get_valid_counts, topi.generic.schedule_get_valid_counts),
"gpu": (topi.cuda.get_valid_counts, topi.cuda.schedule_get_valid_counts),
}
_nms_implement = {
"generic": (topi.vision.non_max_suppression, topi.generic.schedule_nms),
"gpu": (topi.cuda.non_max_suppression, topi.cuda.schedule_nms),
}
_multibox_prior_implement = {
"generic": (topi.vision.ssd.multibox_prior, topi.generic.schedule_multibox_prior),
"gpu": (topi.cuda.multibox_prior, topi.cuda.schedule_multibox_prior),
}
_multibox_detection_implement = {
"generic": (topi.vision.ssd.multibox_detection, topi.generic.schedule_multibox_detection),
"gpu": (topi.cuda.multibox_detection, topi.cuda.schedule_multibox_detection),
}
_roi_align_implement = {
"generic": (topi.vision.roi_align_nchw, topi.generic.schedule_roi_align),
"cpu": (topi.x86.roi_align_nchw, topi.generic.schedule_roi_align),
"gpu": (topi.vision.roi_align_nchw, topi.cuda.schedule_roi_align),
}
_roi_pool_schedule = {
"generic": topi.generic.schedule_roi_pool,
"gpu": topi.cuda.schedule_roi_pool,
}
_proposal_implement = {
"generic": (topi.vision.rcnn.proposal, topi.generic.schedule_proposal),
"gpu": (topi.cuda.proposal, topi.cuda.schedule_proposal),
}
_all_class_nms_implement = {
"generic": (topi.vision.all_class_non_max_suppression, topi.generic.schedule_nms),
"gpu": (topi.cuda.all_class_non_max_suppression, topi.cuda.schedule_nms),
}
class TestValidCounts:
dshape, score_threshold, id_index, score_index = tvm.testing.parameters(
((1, 1000, 5), 0.5, -1, 0),
((1, 2500, 6), 0, 0, 1),
((1, 2500, 5), -1, -1, 0),
((3, 1000, 6), 0.55, 1, 0),
((16, 500, 5), 0.95, -1, 1),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture(cache_return_value=True)
def ref_data(self, dtype, dshape, score_threshold, id_index, score_index):
batch_size, num_anchor, elem_length = dshape
np_data = np.random.uniform(low=-2, high=2, size=dshape).astype(dtype)
np_out1 = np.zeros(shape=(batch_size,))
np_out2 = np.zeros(shape=dshape).astype(dtype)
np_out3 = np.zeros(shape=(batch_size, num_anchor))
for i in range(batch_size):
np_out1[i] = 0
inter_idx = 0
for j in range(num_anchor):
score = np_data[i, j, score_index]
if score > score_threshold and (id_index < 0 or np_data[i, j, id_index] >= 0):
for k in range(elem_length):
np_out2[i, inter_idx, k] = np_data[i, j, k]
np_out1[i] += 1
np_out3[i, inter_idx] = j
inter_idx += 1
if j >= np_out1[i]:
for k in range(elem_length):
np_out2[i, j, k] = -1.0
np_out3[i, j] = -1
return np_data, np_out1, np_out2, np_out3
def test_get_valid_counts(
self, target, dev, ref_data, dtype, dshape, score_threshold, id_index, score_index
):
np_data, np_out1, np_out2, np_out3 = ref_data
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _get_valid_counts_implement)
data = te.placeholder(dshape, name="data", dtype=dtype)
outs = fcompute(data, score_threshold, id_index, score_index)
s = fschedule(outs)
tvm_input_data = tvm.nd.array(np_data, dev)
tvm_out1 = tvm.nd.array(np.zeros(np_out1.shape, dtype="int32"), dev)
tvm_out2 = tvm.nd.array(np.zeros(np_out2.shape, dtype=dtype), dev)
tvm_out3 = tvm.nd.array(np.zeros(np_out3.shape, dtype="int32"), dev)
f = tvm.build(s, [data, outs[0], outs[1], outs[2]], target)
f(tvm_input_data, tvm_out1, tvm_out2, tvm_out3)
tvm.testing.assert_allclose(tvm_out1.numpy(), np_out1, rtol=1e-3)
tvm.testing.assert_allclose(tvm_out2.numpy(), np_out2, rtol=1e-3)
tvm.testing.assert_allclose(tvm_out3.numpy(), np_out3, rtol=1e-3)
def verify_non_max_suppression(
target,
dev,
np_data,
np_valid_count,
np_indices,
np_result,
np_indices_result,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start,
score_index,
id_index,
):
dshape = np_data.shape
batch, num_anchors, _ = dshape
indices_dshape = (batch, num_anchors)
data = te.placeholder(dshape, name="data")
valid_count = te.placeholder((batch,), dtype="int32", name="valid_count")
indices = te.placeholder((batch, num_anchors), dtype="int32", name="indices")
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _nms_implement)
out = fcompute(
data,
valid_count,
indices,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start=coord_start,
score_index=score_index,
id_index=id_index,
return_indices=False,
)
indices_out = fcompute(
data,
valid_count,
indices,
max_output_size,
iou_threshold,
force_suppress,
top_k,
coord_start=coord_start,
score_index=score_index,
id_index=id_index,
return_indices=True,
)
s = fschedule(out)
indices_s = fschedule(indices_out)
tvm_data = tvm.nd.array(np_data, dev)
tvm_valid_count = tvm.nd.array(np_valid_count, dev)
tvm_indices = tvm.nd.array(np_indices, dev)
tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data.dtype), dev)
f = tvm.build(s, [data, valid_count, indices, out], target)
f(tvm_data, tvm_valid_count, tvm_indices, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_result, rtol=1e-4)
tvm_indices_out = tvm.nd.array(np.zeros(indices_dshape, dtype="int32"), dev)
f = tvm.build(indices_s, [data, valid_count, indices, indices_out[0]], target)
f(tvm_data, tvm_valid_count, tvm_indices, tvm_indices_out)
tvm.testing.assert_allclose(tvm_indices_out.numpy(), np_indices_result, rtol=1e-4)
def test_non_max_suppression(target, dev):
np_data = np.array(
[
[
[0, 0.8, 1, 20, 25, 45],
[1, 0.7, 30, 60, 50, 80],
[0, 0.4, 4, 21, 19, 40],
[2, 0.9, 35, 61, 52, 79],
[1, 0.5, 100, 60, 70, 110],
]
]
).astype("float32")
np_valid_count = np.array([4]).astype("int32")
np_indices = np.array([[0, 1, 2, 3, 4]]).astype("int32")
max_output_size = -1
np_result = np.array(
[
[
[2, 0.9, 35, 61, 52, 79],
[0, 0.8, 1, 20, 25, 45],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
]
]
)
np_indices_result = np.array([[3, 0, -1, -1, -1]])
verify_non_max_suppression(
target,
dev,
np_data,
np_valid_count,
np_indices,
np_result,
np_indices_result,
max_output_size,
0.7,
True,
2,
2,
1,
0,
)
np_data = np.array(
[
[
[0.8, 1, 20, 25, 45],
[0.7, 30, 60, 50, 80],
[0.4, 4, 21, 19, 40],
[0.9, 35, 61, 52, 79],
[0.5, 100, 60, 70, 110],
]
]
).astype("float32")
np_valid_count = np.array([4]).astype("int32")
np_indices = np.array([[0, 1, 2, 3, 4]]).astype("int32")
max_output_size = 2
np_result = np.array(
[
[
[0.9, 35, 61, 52, 79],
[0.8, 1, 20, 25, 45],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
]
]
)
np_indices_result = np.array([[3, 0, -1, -1, -1]])
verify_non_max_suppression(
target,
dev,
np_data,
np_valid_count,
np_indices,
np_result,
np_indices_result,
max_output_size,
0.7,
False,
2,
1,
0,
-1,
)
class TestMultiboxPrior:
dshape, sizes, ratios, steps, offsets, clip = tvm.testing.parameters(
((1, 3, 50, 50), (1,), (1,), (-1, -1), (0.5, 0.5), False),
((1, 3, 224, 224), (0.5, 0.25, 0.1), (1, 2, 0.5), (-1, -1), (0.5, 0.5), False),
((1, 32, 32, 32), (0.5, 0.25), (1, 2), (2, 2), (0.5, 0.5), True),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture(cache_return_value=True)
def ref_data(self, dtype, dshape, sizes, ratios, offsets, steps, clip):
in_height = dshape[2]
in_width = dshape[3]
num_sizes = len(sizes)
num_ratios = len(ratios)
size_ratio_concat = sizes + ratios
steps_h = steps[0] if steps[0] > 0 else 1.0 / in_height
steps_w = steps[1] if steps[1] > 0 else 1.0 / in_width
offset_h = offsets[0]
offset_w = offsets[1]
out_shape = (1, in_height * in_width * (num_sizes + num_ratios - 1), 4)
np_in = np.random.uniform(size=dshape).astype(dtype)
np_out = np.zeros(out_shape).astype(dtype)
for i in range(in_height):
center_h = (i + offset_h) * steps_h
for j in range(in_width):
center_w = (j + offset_w) * steps_w
for k in range(num_sizes + num_ratios - 1):
w = (
size_ratio_concat[k] * in_height / in_width / 2.0
if k < num_sizes
else size_ratio_concat[0]
* in_height
/ in_width
* math.sqrt(size_ratio_concat[k + 1])
/ 2.0
)
h = (
size_ratio_concat[k] / 2.0
if k < num_sizes
else size_ratio_concat[0] / math.sqrt(size_ratio_concat[k + 1]) / 2.0
)
count = (
i * in_width * (num_sizes + num_ratios - 1)
+ j * (num_sizes + num_ratios - 1)
+ k
)
np_out[0][count][0] = center_w - w
np_out[0][count][1] = center_h - h
np_out[0][count][2] = center_w + w
np_out[0][count][3] = center_h + h
if clip:
np_out = np.clip(np_out, 0, 1)
return np_in, np_out
def test_multibox_prior(
self, target, dev, dtype, dshape, ref_data, sizes, ratios, steps, offsets, clip
):
np_in, np_out = ref_data
data = te.placeholder(dshape, name="data", dtype=dtype)
fcompute, fschedule = tvm.topi.testing.dispatch(target, _multibox_prior_implement)
with tvm.target.Target(target):
out = fcompute(data, sizes, ratios, steps, offsets, clip)
s = fschedule(out)
tvm_input_data = tvm.nd.array(np_in, dev)
tvm_out = tvm.nd.array(np.zeros(np_out.shape, dtype=dtype), dev)
f = tvm.build(s, [data, out], target)
f(tvm_input_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_out, rtol=1e-3)
class TestMultiboxDetection:
(batch_size,) = tvm.testing.parameters((1,), (6,))
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
self,
batch_size,
):
# Manually create test case
np_cls_prob = np.array([[[0.2, 0.5, 0.3], [0.25, 0.3, 0.45], [0.7, 0.1, 0.2]]] * batch_size)
np_loc_preds = np.array(
[[0.1, -0.2, 0.3, 0.2, 0.2, 0.4, 0.5, -0.3, 0.7, -0.2, -0.4, -0.8]] * batch_size
)
np_anchors = np.array(
[[[-0.1, -0.1, 0.1, 0.1], [-0.2, -0.2, 0.2, 0.2], [1.2, 1.2, 1.5, 1.5]]] * batch_size
)
expected_np_out = np.array(
[
[
[1, 0.69999999, 0, 0, 0.10818365, 0.10008108],
[0, 0.44999999, 1, 1, 1, 1],
[0, 0.30000001, 0, 0, 0.22903419, 0.20435292],
]
]
* batch_size
)
return np_cls_prob, np_loc_preds, np_anchors, expected_np_out
def test_multibox_detection(self, target, dev, ref_data):
np_cls_prob, np_loc_preds, np_anchors, expected_np_out = ref_data
batch_size = np_cls_prob.shape[0]
num_anchors = 3
num_classes = 3
cls_prob = te.placeholder((batch_size, num_anchors, num_classes), name="cls_prob")
loc_preds = te.placeholder((batch_size, num_anchors * 4), name="loc_preds")
anchors = te.placeholder((batch_size, num_anchors, 4), name="anchors")
fcompute, fschedule = tvm.topi.testing.dispatch(target, _multibox_detection_implement)
with tvm.target.Target(target):
out = fcompute(cls_prob, loc_preds, anchors)
s = fschedule(out)
tvm_cls_prob = tvm.nd.array(np_cls_prob.astype(cls_prob.dtype), dev)
tvm_loc_preds = tvm.nd.array(np_loc_preds.astype(loc_preds.dtype), dev)
tvm_anchors = tvm.nd.array(np_anchors.astype(anchors.dtype), dev)
tvm_out = tvm.nd.array(np.zeros((batch_size, num_anchors, 6)).astype(out.dtype), dev)
f = tvm.build(s, [cls_prob, loc_preds, anchors, out], target)
f(tvm_cls_prob, tvm_loc_preds, tvm_anchors, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), expected_np_out, rtol=1e-4)
class TestRoiAlign:
(
batch,
in_channel,
in_size,
num_roi,
pooled_size,
spatial_scale,
sample_ratio,
mode,
) = tvm.testing.parameters(
(1, 16, 32, 64, 7, 1.0, -1, 0),
(4, 16, 32, 64, 7, 0.5, 2, 0),
(1, 32, 32, 80, 8, 0.0625, 2, 0),
(1, 32, 500, 80, 8, 0.0625, 2, 0),
(1, 16, 32, 64, 7, 1.0, -1, 1),
(4, 16, 32, 64, 7, 0.5, 2, 1),
(1, 32, 32, 80, 8, 0.0625, 2, 1),
(1, 32, 500, 80, 8, 0.0625, 2, 1),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
self,
batch,
in_channel,
in_size,
num_roi,
pooled_size,
spatial_scale,
sample_ratio,
mode,
):
a_shape = (batch, in_channel, in_size, in_size)
rois_shape = (num_roi, 5)
a_np = np.random.uniform(-1, 1, size=a_shape).astype("float32")
rois_np = np.random.uniform(-1, 1, size=rois_shape).astype("float32") * in_size
rois_np[:, 0] = np.random.randint(low=0, high=batch, size=num_roi)
b_np = tvm.topi.testing.roi_align_nchw_python(
a_np,
rois_np,
pooled_size=pooled_size,
spatial_scale=spatial_scale,
sample_ratio=sample_ratio,
mode=mode,
)
return a_np, rois_np, b_np
def test_roi_align(
self,
target,
dev,
ref_data,
pooled_size,
spatial_scale,
sample_ratio,
mode,
):
# For mode, 0 = avg, 1 = max
a_np, rois_np, b_np = ref_data
a = te.placeholder(a_np.shape)
rois = te.placeholder(rois_np.shape)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _roi_align_implement)
b = fcompute(
a,
rois,
pooled_size=pooled_size,
spatial_scale=spatial_scale,
sample_ratio=sample_ratio,
mode=mode,
)
s = fschedule(b)
tvm_a = tvm.nd.array(a_np, dev)
tvm_rois = tvm.nd.array(rois_np, dev)
tvm_b = tvm.nd.array(np.zeros(b_np.shape, dtype=b.dtype), device=dev)
f = tvm.build(s, [a, rois, b], target)
f(tvm_a, tvm_rois, tvm_b)
tvm_val = tvm_b.numpy()
tvm.testing.assert_allclose(tvm_val, b_np, rtol=1e-3, atol=1e-4)
class TestRoiPool:
batch, in_channel, in_size, num_roi, pooled_size, spatial_scale = tvm.testing.parameters(
(1, 4, 16, 32, 7, 1.0),
(4, 4, 16, 32, 7, 0.5),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(self, batch, in_channel, in_size, num_roi, pooled_size, spatial_scale):
a_shape = (batch, in_channel, in_size, in_size)
rois_shape = (num_roi, 5)
a_np = np.random.uniform(size=a_shape).astype("float32")
rois_np = np.random.uniform(size=rois_shape).astype("float32") * in_size
rois_np[:, 0] = np.random.randint(low=0, high=batch, size=num_roi).astype("float32")
b_np = tvm.topi.testing.roi_pool_nchw_python(
a_np, rois_np, pooled_size=pooled_size, spatial_scale=spatial_scale
)
return a_np, rois_np, b_np
def test_roi_pool(self, target, dev, ref_data, pooled_size, spatial_scale):
a_np, rois_np, b_np = ref_data
a = te.placeholder(a_np.shape)
rois = te.placeholder(rois_np.shape)
with tvm.target.Target(target):
b = topi.vision.rcnn.roi_pool_nchw(
a, rois, pooled_size=pooled_size, spatial_scale=spatial_scale
)
s_func = tvm.topi.testing.dispatch(target, _roi_pool_schedule)
s = s_func(b)
tvm_a = tvm.nd.array(a_np, dev)
tvm_rois = tvm.nd.array(rois_np, dev)
tvm_b = tvm.nd.array(np.zeros(b_np.shape, dtype=b.dtype), device=dev)
f = tvm.build(s, [a, rois, b], target)
f(tvm_a, tvm_rois, tvm_b)
tvm.testing.assert_allclose(tvm_b.numpy(), b_np, rtol=1e-4)
def verify_proposal(target, dev, np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs):
cls_prob = te.placeholder(np_cls_prob.shape)
bbox_pred = te.placeholder(np_bbox_pred.shape)
im_info = te.placeholder(np_im_info.shape)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _proposal_implement)
out = fcompute(cls_prob, bbox_pred, im_info, **attrs)
s = fschedule(out)
f = tvm.build(s, [cls_prob, bbox_pred, im_info, out], target)
tvm_cls_prob = tvm.nd.array(np_cls_prob, device=dev)
tvm_bbox_pred = tvm.nd.array(np_bbox_pred, device=dev)
tvm_im_info = tvm.nd.array(np_im_info, device=dev)
tvm_out = tvm.nd.empty(device=dev, shape=out.shape, dtype=out.dtype)
f(tvm_cls_prob, tvm_bbox_pred, tvm_im_info, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), np_out, rtol=1e-4)
@tvm.testing.known_failing_targets("vulkan")
def test_proposal(target, dev):
attrs = {
"scales": (0.5,),
"ratios": (0.5,),
"feature_stride": 16,
"iou_loss": False,
"rpn_min_size": 16,
"threshold": 0.7,
"rpn_pre_nms_top_n": 200,
"rpn_post_nms_top_n": 4,
}
np_cls_prob = np.array(
[
[
[[0.3, 0.6, 0.2], [0.4, 0.7, 0.5], [0.1, 0.4, 0.3]],
[[0.7, 0.5, 0.3], [0.6, 0.4, 0.8], [0.9, 0.2, 0.5]],
]
],
dtype="float32",
)
np_bbox_pred = np.array(
[
[
[[0.5, 1.0, 0.6], [0.8, 1.2, 2.0], [0.9, 1.0, 0.8]],
[[0.5, 1.0, 0.7], [0.8, 1.2, 1.6], [2.1, 1.5, 0.7]],
[[1.0, 0.5, 0.7], [1.5, 0.9, 1.6], [1.4, 1.5, 0.8]],
[[1.0, 0.5, 0.6], [1.5, 0.9, 2.0], [1.8, 1.0, 0.9]],
]
],
dtype="float32",
)
np_im_info = np.array([[48.0, 48.0, 1.0]], dtype="float32")
np_out = np.array(
[
[0.0, 0.0, 2.8451548, 28.38012, 18.154846],
[0.0, 0.0, 15.354933, 41.96971, 41.245064],
[0.0, 18.019852, 1.0538368, 51.98015, 25.946163],
[0.0, 27.320923, -1.266357, 55.0, 24.666357],
],
dtype="float32",
)
verify_proposal(target, dev, np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)
np_out = np.array(
[
[0.0, -5.25, -2.5, 21.75, 19.0],
[0.0, 11.25, -2.0, 37.25, 18.5],
[0.0, 26.849998, -2.3000002, 53.45, 18.6],
[0.0, -4.95, 13.799999, 22.25, 35.5],
],
dtype="float32",
)
attrs["iou_loss"] = True
verify_proposal(target, dev, np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)
def verify_all_class_non_max_suppression(
target,
dev,
boxes_np,
scores_np,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected_indices,
):
dshape = boxes_np.shape
batch, num_boxes, _ = dshape
_, num_class, _ = scores_np.shape
boxes = te.placeholder(dshape, name="boxes")
scores = te.placeholder(scores_np.shape, dtype="float32", name="scores")
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _all_class_nms_implement)
out = fcompute(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold)
s = fschedule(out)
tvm_boxes = tvm.nd.array(boxes_np, dev)
tvm_scores = tvm.nd.array(scores_np, dev)
selected_indices = tvm.nd.array(np.zeros((batch * num_class * num_boxes, 3), "int64"), dev)
num_detections = tvm.nd.array(np.zeros((1,), "int64"), dev)
f = tvm.build(s, [boxes, scores, out[0], out[1]], target)
f(tvm_boxes, tvm_scores, selected_indices, num_detections)
tvm_res = selected_indices.numpy()[: num_detections.numpy()[0]]
np.testing.assert_equal(tvm_res, expected_indices)
def test_all_class_non_max_suppression(target, dev):
boxes = np.array(
[
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[0.5, 0.5, 1.0, 1.0],
],
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.5, 0.5, 0.95, 0.95],
[0.5, 0.5, 0.96, 0.96],
[0.5, 0.5, 1.0, 1.0],
],
]
).astype("float32")
scores = np.array(
[
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
]
).astype("float32")
max_output_boxes_per_class = 2
iou_threshold = 0.8
score_threshold = 0.0
expected = np.array(
[[0, 0, 4], [0, 0, 2], [0, 1, 4], [0, 1, 2], [1, 0, 4], [1, 0, 1], [1, 1, 4], [1, 1, 1]]
)
verify_all_class_non_max_suppression(
target,
dev,
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected,
)
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0],
]
]
).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = 3
iou_threshold = 0.5
score_threshold = 0.4
expected = np.array([[0, 0, 3], [0, 0, 0]])
verify_all_class_non_max_suppression(
target,
dev,
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected,
)
if __name__ == "__main__":
tvm.testing.main()
| 24,823 | 32.682497 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_bitserial_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
def generate_quantized_np(shape, bits, out_dtype):
min_val = 0
max_val = 1 << bits
return np.random.randint(min_val, max_val, size=shape).astype(out_dtype)
def verify_bitserial_conv2d_nchw(
batch,
in_size,
in_channel,
num_filter,
kernel,
stride,
padding,
activation_bits,
weight_bits,
unipolar,
):
in_height = in_width = in_size
input_dtype = "uint32"
out_dtype = "int32"
with tvm.target.Target("llvm"):
A = te.placeholder((batch, in_channel, in_height, in_width), dtype=input_dtype, name="A")
W = te.placeholder((num_filter, in_channel, kernel, kernel), dtype=input_dtype, name="W")
B = topi.x86.bitserial_conv2d_nchw(
A, W, stride, padding, activation_bits, weight_bits, input_dtype, out_dtype, unipolar
)
s = topi.x86.schedule_bitserial_conv2d_nchw([B])
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
@memoize("topi.tests.test_topi_bitseral_conv2d_nchw")
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_dtype)
w_np = generate_quantized_np(get_const_tuple(w_shape), weight_bits, input_dtype)
if unipolar:
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=["readwrite"]):
x[...] = 1 if x == 1 else -1
b_np = tvm.topi.testing.conv2d_nchw_python(a_np.astype(out_dtype), w_, stride, padding)
else:
b_np = tvm.topi.testing.conv2d_nchw_python(a_np, w_np, stride, padding)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
dev = tvm.cpu(0)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], "llvm")
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
def verify_bitserial_conv2d_nhwc(
batch,
in_size,
in_channel,
num_filter,
kernel,
stride,
padding,
activation_bits,
weight_bits,
unipolar,
):
in_height = in_width = in_size
input_dtype = "uint32"
out_dtype = "int32"
with tvm.target.Target("llvm"):
A = te.placeholder((batch, in_height, in_width, in_channel), dtype=input_dtype, name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), dtype=input_dtype, name="W")
B = topi.x86.bitserial_conv2d_nhwc(
A, W, stride, padding, activation_bits, weight_bits, input_dtype, out_dtype, unipolar
)
s = topi.x86.schedule_bitserial_conv2d_nhwc([B])
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
@memoize("topi.tests.test_topi_bitseral_conv2d_nhwc")
def get_ref_data():
a_np = generate_quantized_np(get_const_tuple(a_shape), activation_bits, input_dtype)
w_np = generate_quantized_np(get_const_tuple(w_shape), weight_bits, input_dtype)
if unipolar:
w_ = np.copy(w_np).astype(out_dtype)
for x in np.nditer(w_, op_flags=["readwrite"]):
x[...] = 1 if x == 1 else -1
b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_, stride, padding).astype(out_dtype)
else:
b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, w_np, stride, padding).astype(
out_dtype
)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
dev = tvm.cpu(0)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], "llvm")
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
def test_bitserial_conv2d():
in_size = 56
ic, oc = 64, 64
k = 3
stride = 1
pad = 1
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 1, 1, True)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 1, True)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 1, 1, False)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 1, False)
verify_bitserial_conv2d_nchw(1, in_size, ic, oc, k, stride, pad, 2, 2, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, True)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 1, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 1, False)
verify_bitserial_conv2d_nhwc(1, in_size, ic, oc, k, stride, pad, 2, 2, False)
if __name__ == "__main__":
test_bitserial_conv2d()
| 5,754 | 35.194969 | 99 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv2d_nhwc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import os
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv2d_nhwc_implement = {
"generic": (topi.nn.conv2d_nhwc, topi.generic.schedule_conv2d_nhwc),
"gpu": (topi.gpu.conv2d_nhwc, topi.gpu.schedule_conv2d_nhwc),
"cpu": (topi.nn.conv2d_nhwc, topi.x86.schedule_conv2d_nhwc),
"arm_cpu": (
topi.arm_cpu.conv2d_nhwc_spatial_pack,
topi.arm_cpu.schedule_conv2d_nhwc_spatial_pack,
),
"mali": (
topi.mali.conv2d_nhwc_spatial_pack,
topi.mali.schedule_conv2d_nhwc_spatial_pack,
),
"bifrost": (
topi.mali.conv2d_nhwc_spatial_pack,
topi.mali.schedule_conv2d_nhwc_spatial_pack,
),
"hls": (topi.nn.conv2d_nhwc, topi.hls.schedule_conv2d_nhwc),
}
dtype = tvm.testing.parameter("float32")
batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation = tvm.testing.parameters(
(1, 256, 32, 256, 3, 1, "SAME", 1),
(4, 128, 16, 128, 5, 2, "SAME", 1),
(4, 128, 16, 256, 5, 2, "SAME", 1),
(1, 256, 32, 256, 3, 1, "VALID", 1),
(1, 256, 32, 256, 3, 1, "VALID", 1),
(4, 128, 16, 128, 5, 2, "VALID", 1),
(4, 128, 16, 256, 5, 2, "VALID", 1),
(1, 128, 16, 256, 3, 2, (0, 0, 1, 1), 1),
(1, 128, 16, 256, 3, 2, (1, 1, 2, 2), 1),
(1, 128, 16, 128, 5, 2, (3, 3, 2, 2), 1),
(1, 128, 16, 256, 3, 2, (0, 1, 2, 3), 1),
(1, 256, 32, 256, 3, 1, "SAME", 2),
(1, 256, 32, 256, 3, 1, (1, 1, 2, 2), 2),
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(dtype, batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation):
in_height = in_width = in_size
a_shape = (batch, in_height, in_width, in_channel)
w_shape = (kernel, kernel, in_channel, num_filter)
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
b_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, b_np
def test_conv2d_nhwc_hwio(target, dev, ref_data, dtype, stride, padding, dilation):
a_np, w_np, b_np = ref_data
A = te.placeholder(a_np.shape, name="A", dtype=dtype)
W = te.placeholder(w_np.shape, name="W", dtype=dtype)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv2d_nhwc_implement)
B = fcompute(A, W, stride, padding, dilation, dtype)
s = fschedule([B])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], target)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
def test_conv2d_nhwc_ohwi(ref_data, dtype, stride, padding, dilation):
# only test on CPU target because topi doesn't have schedules for this layout
target = "llvm"
dev = tvm.device(target, 0)
a_np, w_np_hwio, b_np = ref_data
w_np_ohwi = w_np_hwio.transpose(3, 0, 1, 2) # HWIO -> OHWI
A = te.placeholder(a_np.shape, name="A", dtype=dtype)
W = te.placeholder(w_np_ohwi.shape, name="W", dtype=dtype)
B = topi.nn.conv2d(
A,
W,
stride,
padding,
dilation,
data_layout="NHWC",
kernel_layout="OHWI",
out_dtype="float32",
)
s = tvm.te.create_schedule(B.op)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np_ohwi, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], target)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 4,674 | 35.24031 | 99 | py |
tvm | tvm-main/tests/python/topi/python/common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Common utility for topi test"""
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
class Int8Fallback(autotvm.FallbackContext):
def _query_inside(self, target, workload):
key = (target, workload)
if key in self.memory:
return self.memory[key]
cfg = FallbackConfigEntity()
self.memory[key] = cfg
cfg.is_fallback = False
return cfg
| 1,214 | 36.96875 | 62 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_batch_to_space_nd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for batch to space"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
def verify_batch_to_space_nd(input_shape, block_shape, crop_begin_list, crop_end_list):
out_shape = []
out_shape.append(int((input_shape[0] / np.prod(block_shape))))
for i in range(1, len(block_shape) + 1):
crop = crop_begin_list[i - 1] + crop_end_list[i - 1]
out_shape.append(input_shape[i] * block_shape[i - 1] - crop)
for i in range(len(block_shape) + 1, len(input_shape)):
out_shape.append(input_shape[i])
A = te.placeholder(input_shape, name="A", dtype="float32")
dtype = A.dtype
a_np = np.random.uniform(size=input_shape).astype(dtype)
B = topi.nn.batch_to_space_nd(A, block_shape, crop_begin_list, crop_end_list)
b_np = tvm.topi.testing.batch_to_space_nd_python(
a_np, block_shape, crop_begin_list, crop_end_list
)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.create(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
@tvm.testing.uses_gpu
def test_batch_to_space():
# Without crops
verify_batch_to_space_nd([4, 1, 1, 1], [2, 2], [0, 0], [0, 0])
# With crops
verify_batch_to_space_nd([8, 1, 3, 1], [2, 2], [0, 2], [0, 0])
verify_batch_to_space_nd([18, 2, 1, 2], [2, 3], [1, 1], [0, 0])
verify_batch_to_space_nd([20, 5, 8, 7], [2, 2], [1, 1], [1, 1])
if __name__ == "__main__":
test_batch_to_space()
| 2,657 | 36.43662 | 87 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_space_to_depth.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for space to depth"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
def verify_space_to_depth(block_size, batch, in_channel, in_height, in_width, layout="NCHW"):
out_channel = int(in_channel * (block_size * block_size))
out_height = int(in_height / block_size)
out_width = int(in_width / block_size)
if layout == "NCHW":
in_shape = [batch, in_channel, in_height, in_width]
out_shape = [batch, out_channel, out_height, out_width]
elif layout == "NHWC":
in_shape = [batch, in_height, in_width, in_channel]
out_shape = [batch, out_height, out_width, out_channel]
else:
raise NotImplementedError("Layout not supported {}".format(layout))
A = te.placeholder(in_shape, name="A", dtype="float32")
dtype = A.dtype
a_np = np.random.uniform(size=in_shape).astype(dtype)
B = topi.nn.space_to_depth(A, block_size=block_size, layout=layout)
if layout == "NHWC":
a_np = np.transpose(a_np, axes=[0, 3, 1, 2])
b_np = tvm.topi.testing.space_to_depth_python(a_np, block_size)
if layout == "NHWC":
a_np = np.transpose(a_np, axes=[0, 2, 3, 1])
b_np = np.transpose(b_np, axes=[0, 2, 3, 1])
def check_device(device, dev):
print("Running on target: %s" % device)
with tvm.target.Target(device):
s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for device, dev in tvm.testing.enabled_targets():
check_device(device, dev)
@tvm.testing.uses_gpu
def test_space_to_depth():
for layout in ["NCHW", "NHWC"]:
# Simplest possible case
verify_space_to_depth(2, 1, 1, 2, 2, layout=layout)
# Average input size
verify_space_to_depth(2, 1, 32, 32, 32, layout=layout)
# Large block size
verify_space_to_depth(8, 1, 32, 64, 64, layout=layout)
# Large batch size
verify_space_to_depth(4, 8, 32, 32, 32, layout=layout)
# Large input size
verify_space_to_depth(4, 8, 32, 128, 128, layout=layout)
if __name__ == "__main__":
test_space_to_depth()
| 3,166 | 37.156627 | 93 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_math.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import pytest
import scipy
from scipy import special
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.topi import utils
def test_util():
x = tvm.tir.const(100, "int32")
assert utils.get_const_int(x) == 100
assert utils.get_const_tuple((x, x)) == (100, 100)
ewise_operations = {
"floor": {"topi": topi.floor, "ref": np.floor, "input_range": (-100, 100)},
"ceil": {"topi": topi.ceil, "ref": np.ceil, "input_range": (-100, 100)},
"sign": {
"topi": topi.sign,
"ref": np.sign,
"input_range": (-100, 100),
"skip_name_check": True,
},
"trunc": {"topi": topi.trunc, "ref": np.trunc, "input_range": (-100, 100)},
"fabs": {"topi": topi.abs, "ref": np.fabs, "input_range": (-100, 100)},
"round": {"topi": topi.round, "ref": np.round, "input_range": (-100, 100), "check_round": True},
"exp": {"topi": topi.exp, "ref": np.exp, "input_range": (-1, 1)},
"tanh": {
"topi": topi.tanh,
"ref": np.tanh,
"input_range": (-10, 10),
"shape": (128, 128),
"dtype": ["float32", "float64"],
},
"sigmoid": {
"topi": topi.sigmoid,
"ref": lambda x: 1 / (1 + np.exp(-x)),
"input_range": (-1, 1),
},
"log": {"topi": topi.log, "ref": np.log, "input_range": (0, 100)},
"sqrt": {"topi": topi.sqrt, "ref": np.sqrt, "input_range": (0, 100)},
"rsqrt": {
"topi": topi.rsqrt,
"ref": lambda x: np.ones_like(x) / np.sqrt(x),
"input_range": (0, 100),
"skip_name_check": True,
},
"cos": {"topi": topi.cos, "ref": np.cos, "input_range": (-2.0 * np.pi, 2.0 * np.pi)},
"tan": {
"topi": topi.tan,
"ref": np.tan,
"input_range": (-2.0 * np.pi, 2.0 * np.pi),
"dtypes": ["float32", "float64"],
},
"sin": {"topi": topi.sin, "ref": np.sin, "input_range": (-2.0 * np.pi, 2.0 * np.pi)},
"erf": {"topi": topi.erf, "ref": scipy.special.erf, "input_range": (-0.1, 0.1)},
"isnan": {
"topi": topi.isnan,
"ref": np.isnan,
"input_range": (-1, 1),
"replace_with_nan": True,
},
"isfinite": {
"topi": topi.isfinite,
"ref": np.isfinite,
"input_range": (0, 1),
"shape": (8, 8),
"skip_name_check": True,
"replace_with_nan": True,
"replace_with_inf": True,
"dtypes": ["float32", "float64", "int32", "int16"],
},
"isinf": {
"topi": topi.isinf,
"ref": np.isinf,
"input_range": (0, 1),
"shape": (8, 8),
"skip_name_check": True,
"replace_with_nan": True,
"replace_with_inf": True,
"dtypes": ["float32", "float64", "int32", "int16"],
},
"fast_exp": {
"topi": topi.fast_exp,
"ref": np.exp,
"skip_name_check": True,
"input_range": (-88, 88),
"step": 0.01,
},
"fast_erf": {
"topi": topi.fast_erf,
"ref": scipy.special.erf,
"skip_name_check": True,
"input_range": (-10, 10),
"step": 0.01,
"dtypes": ["float32", "float16"],
"cast_output": True,
"tolerance": [1e-5, 1e-1],
},
"fast_tanh": {
"topi": topi.fast_tanh,
"ref": np.tanh,
"skip_name_check": True,
"input_range": (-10, 10),
"step": 0.01,
},
}
topi_name, dtype, tolerance = tvm.testing.parameters(
*[
(name, dtype, config.get("tolerance", [1e-5] * len(dtype))[i])
for name, config in ewise_operations.items()
for i, dtype in enumerate(config.get("dtypes", ["float32"]))
]
)
@tvm.testing.fixture(cache_return_value=True)
def ewise_ref_data(topi_name, dtype):
config = ewise_operations[topi_name]
input_range = config["input_range"]
shape = config.get("shape", (20, 3))
a_np = np.random.uniform(*input_range, size=shape).astype(dtype)
if dtype.startswith("float"):
if config.get("replace_with_nan", False):
a_np.ravel()[np.random.choice(a_np.size, int(a_np.size * 0.5), replace=False)] = np.nan
if config.get("replace_with_inf", False):
a_np.ravel()[
np.random.choice(a_np.size, int(a_np.size * 0.5), replace=False)
] = np.infty
# avoid round check too close to boundary
if topi_name == "round":
a_np += ((np.abs(np.fmod(a_np, 1)) - 0.5) < 1e-6) * 1e-4
b_np = config["ref"](a_np)
if config.get("cast_output", False):
b_np = b_np.astype(dtype)
return a_np, b_np
def test_ewise(target, dev, topi_name, dtype, tolerance, ewise_ref_data):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and topi_name in ["tan", "erf", "isnan", "isfinite", "isinf"]:
pytest.xfail(f"Vulkan runtime doesn't support {topi_name} yet")
topi_op = ewise_operations[topi_name]["topi"]
skip_name_check = ewise_operations[topi_name].get("skip_name_check", False)
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), dtype=dtype, name="A")
B = topi_op(A)
assert tuple(B.shape) == tuple(A.shape)
if not skip_name_check:
assert B.op.body[0].op.name == "tir." + topi_name
a_np, b_np = ewise_ref_data
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name=topi_name)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros_like(b_np), dev)
foo(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=tolerance, atol=tolerance)
from_dtype, to_dtype = tvm.testing.parameters(
("int32", "float32"),
("int32", "float64"),
("int32", "bool"),
("float16", "float32"),
("float16", "float64"),
("float32", "int32"),
("float32", "float64"),
("float32", "bool"),
# disable this due to llvm5+ bug https://github.com/llvm/llvm-project/issues/56204
# TODO (yongwww): pattern match f64->f16 to f64->f32->f16 as a workaround
# ("float64", "float16"),
("float64", "float32"),
("bool", "float32"),
("bool", "int32"),
)
@tvm.testing.fixture(cache_return_value=True)
def cast_ref_data(from_dtype, to_dtype):
shape = (5, 4)
input_range = (-100, 100)
if from_dtype == "bool":
a_np = np.random.choice([True, False], size=shape)
else:
a_np = np.random.uniform(*input_range, size=shape).astype(from_dtype)
if to_dtype == "bool":
a_np = a_np - a_np[2, 3]
b_np = a_np.astype(to_dtype)
return a_np, b_np
def test_cast(target, dev, cast_ref_data, from_dtype, to_dtype):
m = te.var("m")
l = te.var("l")
A = te.placeholder((m, l), dtype=from_dtype, name="A")
B = topi.cast(A, to_dtype)
a_np, b_np = cast_ref_data
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.empty(b_np.shape, dtype=to_dtype, device=dev)
foo(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np)
if __name__ == "__main__":
tvm.testing.main()
| 7,935 | 30.367589 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv1d_transpose_ncw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for transposed convolution."""
import itertools
import os
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.topi.utils import get_const_tuple
_conv1d_transpose_ncw_implement = {
"generic": (topi.nn.conv1d_transpose_ncw, topi.generic.schedule_conv1d_transpose_ncw),
"gpu": (topi.cuda.conv1d_transpose_ncw, topi.cuda.schedule_conv1d_transpose_ncw),
}
(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
output_padding,
) = tvm.testing.parameters(
(1, 3, 224, 32, 5, 1, 0, (0,)),
(1, 3, 224, 32, 7, 1, 2, (0,)),
(1, 3, 224, 32, 5, 2, 1, (0,)),
(1, 3, 224, 32, 5, 2, 1, (1,)),
(1, 3, 224, 32, 5, 2, 0, (0,)),
(1, 32, 32, 128, 5, 1, 0, (0,)),
(1, 32, 32, 128, 5, 2, 1, (0,)),
(1, 1, 1024, 1, 512, 1, 256, (0,)),
(1, 1, 1024, 1, 512, 2, 256, (0,)),
(1, 1, 1024, 1, 512, 5, 256, (0,)),
(1, 1, 1024, 1, 512, 5, 256, (3,)),
(1, 2, 1024, 1, 128, 128, 0, (0,)),
(1, 1, 1024, 2, 128, 128, 0, (0,)),
(1, 1, 1024, 2, 2, 2, 0, (0,)),
(1, 1, 10, 1, 5, 1, (0, 3), (0,)),
(1, 1, 10, 1, 5, 1, (1, 3), (0,)),
(1, 1, 10, 1, 5, 1, (2, 3), (0,)),
(1, 257, 128, 1, 512, 128, 256, (0,)),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
dtype, batch, in_channel, in_size, num_filter, kernel, stride, padding, output_padding
):
dtype = "float32"
a_shape = (batch, in_channel, in_size)
w_shape = (in_channel, num_filter, kernel)
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = tvm.topi.testing.conv1d_transpose_ncw_python(a_np, w_np, stride, padding, output_padding)
c_np = np.maximum(b_np, 0)
return a_np, w_np, b_np, c_np
@tvm.testing.known_failing_targets("vulkan")
def test_conv1d_transpose_ncw(
target,
dev,
ref_data,
dtype,
stride,
padding,
output_padding,
):
a_np, w_np, b_np, c_np = ref_data
A = te.placeholder(a_np.shape, name="A", dtype=dtype)
W = te.placeholder(w_np.shape, name="W", dtype=dtype)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv1d_transpose_ncw_implement)
B = fcompute(A, W, stride, padding, A.dtype, output_padding)
C = topi.nn.relu(B)
s1 = fschedule([B])
s2 = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func1 = tvm.build(s1, [A, W, B], target)
func2 = tvm.build(s2, [A, W, C], target)
func1(a, w, b)
func2(a, w, c)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 3,779 | 29.983607 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv2d_tensordot_opts.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for functions in tvm.topi.arm_cpu.mprofile.dsp.micro_kernel.tensordot.
Contains a few unit tests, followed by integration tests for common use cases. Note that we do not
run the generated code - we just make sure the strings match exactly.
Note that a *lot* of instruction reordering happens during compilation from C to assembly (by GCC or
Clang). I've verified that this instruction reordering happens correctly for all the functions here.
For more details on why the generated code is the way it is, see `tensordot_int16_impl`."""
import textwrap
from tvm.topi.arm_cpu.mprofile.dsp.micro_kernel.tensordot import (
_get_tensor_halfwords,
_get_kernel_halfwords,
tensordot_int16_impl,
)
def test_get_tensor_halfwords():
"""Tests the _get_tensor_halfwords helper function in tensordot.py.
This function loads the logical indices of the data that will be stored in memory at the tensor
pointer. See the function docstring for more details.
"""
# fmt: off
# A simple 3x3 depthwise convolution computing one output and with in_stride = 1. Note that each
# row is padded with None at the end to make the rows word-aligned.
assert _get_tensor_halfwords((48, 3, 3), 0, 1, 1) == [
(0, 0), (0, 1), (0, 2), None,
(1, 0), (1, 1), (1, 2), None,
(2, 0), (2, 1), (2, 2), None
]
# If the tensor width is odd, padding alternates before/after every row.
assert _get_tensor_halfwords((49, 3, 3), 0, 1, 1) == [
(0, 0), (0, 1), (0, 2), None,
None, (1, 0), (1, 1), (1, 2),
(2, 0), (2, 1), (2, 2), None
]
# If we are computing multiple outputs, more tensor data becomes relevant.
assert _get_tensor_halfwords((48, 3, 3), 0, 2, 1) == [
(0, 0), (0, 1), (0, 2), (0, 3),
(1, 0), (1, 1), (1, 2), (1, 3),
(2, 0), (2, 1), (2, 2), (2, 3)
]
# If offset=1, relevant data starts one halfword after the kernel pointer.
assert _get_tensor_halfwords((48, 3, 3), 1, 1, 1) == [
None, (0, 0), (0, 1), (0, 2),
None, (1, 0), (1, 1), (1, 2),
None, (2, 0), (2, 1), (2, 2)
]
# These adjustments can be (and often are) used together.
assert _get_tensor_halfwords((49, 3, 3), 1, 2, 2) == [
None, (0, 0), (0, 1), (0, 2), (0, 3), (0, 4),
(1, 0), (1, 1), (1, 2), (1, 3), (1, 4), None,
None, (2, 0), (2, 1), (2, 2), (2, 3), (2, 4)
]
# fmt: on
def test_get_kernel_halfwords():
"""Tests the _get_kernel_halfwords helper function in tensordot.py.
This function loads the logical indices of the data that will be stored in memory at the kernel
pointer. See the function docstring for more details.
"""
# fmt: off
# Example of a kernel for a 3x3 depthwise convolution channel
assert _get_kernel_halfwords((96, 3, 3), 0) == [
(0, 0), (0, 1), (0, 2),
(1, 0), (1, 1), (1, 2),
(2, 0), (2, 1), (2, 2),
None,
]
# Example of a kernel for a 1x1 regular convolution with 4 channels
assert _get_kernel_halfwords((48, 1, 4), 1) == [
None, (0, 0), (0, 1), (0, 2), (0, 3), None,
]
# fmt: on
def test_write_3x3_depthwise_code():
"""This is the function that would be generated for a 1x4x48x48 NCHW input tensor with "SAME"
padding. We are only computing one sum at once, so we don't need stride or output. Note that
this is pretty inefficient - it would be much better to compute a few sums concurrently.
When inlined, this code compiles (with armv7-a clang 11) into:
tensordot_opt_x1_int16_w48_3x3_000(int*, int*, int*, int*, int*):
ldr.w lr, [r3]
ldrd r11, r4, [r1]
ldrd r5, r9, [r1, #96]
ldrd r10, r8, [r1, #192]
ldm.w r2, {r1, r6, r7}
ldr.w r12, [sp, #36]
smlad r1, r11, r1, lr
smlabb r1, r4, r6, r1
smlatb r1, r6, r5, r1
ldrd r3, r2, [r2, #12]
smlatb r1, r5, r7, r1
smlatb r1, r7, r9, r1
smlad r1, r10, r3, r1
ldr.w r3, [r12]
smlabb r1, r8, r2, r1
smmul r1, r3, r1
ssat r1, #8, r1, asr #8
strh r1, [r0]
"""
_, code = tensordot_int16_impl(1, (48, 3, 3), (0, 0, 0), (1, 1))
assert code == textwrap.dedent(
"""
#ifndef TENSORDOT_OPT_X1_INT16_W48_3X3_000_EXISTS
#define TENSORDOT_OPT_X1_INT16_W48_3X3_000_EXISTS
#include <arm_acle.h>
__attribute__((always_inline)) static inline int32_t tensordot_opt_x1_int16_w48_3x3_000(
int32_t *output, int32_t *tensor, int32_t *kernel, int32_t *bias, int32_t *scale
) {
int32_t sum_0 = *bias;
int32_t tensor__y00_x00__y00_x01 = tensor[0];
int32_t tensor__y00_x02__unknown = tensor[1];
int32_t tensor__y01_x00__y01_x01 = tensor[24];
int32_t tensor__y01_x02__unknown = tensor[25];
int32_t tensor__y02_x00__y02_x01 = tensor[48];
int32_t tensor__y02_x02__unknown = tensor[49];
int32_t kernel__y00_x00__y00_x01 = kernel[0];
int32_t kernel__y00_x02__y01_x00 = kernel[1];
int32_t kernel__y01_x01__y01_x02 = kernel[2];
int32_t kernel__y02_x00__y02_x01 = kernel[3];
int32_t kernel__y02_x02__unknown = kernel[4];
sum_0 = __smlad(tensor__y00_x00__y00_x01, kernel__y00_x00__y00_x01, sum_0);
sum_0 = __smlabb(tensor__y00_x02__unknown, kernel__y00_x02__y01_x00, sum_0);
sum_0 = __smlabt(tensor__y01_x00__y01_x01, kernel__y00_x02__y01_x00, sum_0);
sum_0 = __smlatb(tensor__y01_x00__y01_x01, kernel__y01_x01__y01_x02, sum_0);
sum_0 = __smlabt(tensor__y01_x02__unknown, kernel__y01_x01__y01_x02, sum_0);
sum_0 = __smlad(tensor__y02_x00__y02_x01, kernel__y02_x00__y02_x01, sum_0);
sum_0 = __smlabb(tensor__y02_x02__unknown, kernel__y02_x02__unknown, sum_0);
int32_t scale_val = *scale;
int32_t requant_0 = (sum_0 * (int64_t) scale_val) >> 32;
requant_0 = (requant_0 + 1) >> 1;
requant_0 = __ssat(requant_0 + -128, 8);
((int16_t*) output)[0] = (int16_t) requant_0;
return 0;
}
#endif
"""
)
def test_odd_width_3x3_depthwise_strides_code():
"""This is the function that would be generated for a 1x4x48x48 NCHW input tensor with "SAME"
padding and (2, 2) strides, being written into NHWC layout. The layout change is encoded by
out_stride = 4. This is a common use case seen in MobileNetV1, among others.
Note that despite the rows not being word-aligned, the *tensor pointer will always be word
aligned (satisfying this requirement) since y_stride = 2."""
_, code = tensordot_int16_impl(2, (49, 3, 3), (0, 0, 0), (2, 4))
assert code == textwrap.dedent(
"""
#ifndef TENSORDOT_OPT_X2_INT16_W49_3X3_000_2_4_EXISTS
#define TENSORDOT_OPT_X2_INT16_W49_3X3_000_2_4_EXISTS
#include <arm_acle.h>
__attribute__((always_inline)) static inline int32_t tensordot_opt_x2_int16_w49_3x3_000_2_4(
int32_t *output, int32_t *tensor, int32_t *kernel, int32_t *bias, int32_t *scale
) {
int32_t sum_0 = *bias, sum_1 = *bias;
int32_t tensor__y00_x00__y00_x01 = tensor[0];
int32_t tensor__y00_x02__y00_x03 = tensor[1];
int32_t tensor__y00_x04__unknown = tensor[2];
int32_t tensor__unknown__y01_x00 = tensor[24];
int32_t tensor__y01_x01__y01_x02 = tensor[25];
int32_t tensor__y01_x03__y01_x04 = tensor[26];
int32_t tensor__y02_x00__y02_x01 = tensor[49];
int32_t tensor__y02_x02__y02_x03 = tensor[50];
int32_t tensor__y02_x04__unknown = tensor[51];
int32_t kernel__y00_x00__y00_x01 = kernel[0];
int32_t kernel__y00_x02__y01_x00 = kernel[1];
int32_t kernel__y01_x01__y01_x02 = kernel[2];
int32_t kernel__y02_x00__y02_x01 = kernel[3];
int32_t kernel__y02_x02__unknown = kernel[4];
sum_0 = __smlad(tensor__y00_x00__y00_x01, kernel__y00_x00__y00_x01, sum_0);
sum_0 = __smlabb(tensor__y00_x02__y00_x03, kernel__y00_x02__y01_x00, sum_0);
sum_0 = __smlatt(tensor__unknown__y01_x00, kernel__y00_x02__y01_x00, sum_0);
sum_0 = __smlad(tensor__y01_x01__y01_x02, kernel__y01_x01__y01_x02, sum_0);
sum_0 = __smlad(tensor__y02_x00__y02_x01, kernel__y02_x00__y02_x01, sum_0);
sum_0 = __smlabb(tensor__y02_x02__y02_x03, kernel__y02_x02__unknown, sum_0);
sum_1 = __smlad(tensor__y00_x02__y00_x03, kernel__y00_x00__y00_x01, sum_1);
sum_1 = __smlabb(tensor__y00_x04__unknown, kernel__y00_x02__y01_x00, sum_1);
sum_1 = __smlatt(tensor__y01_x01__y01_x02, kernel__y00_x02__y01_x00, sum_1);
sum_1 = __smlad(tensor__y01_x03__y01_x04, kernel__y01_x01__y01_x02, sum_1);
sum_1 = __smlad(tensor__y02_x02__y02_x03, kernel__y02_x00__y02_x01, sum_1);
sum_1 = __smlabb(tensor__y02_x04__unknown, kernel__y02_x02__unknown, sum_1);
int32_t scale_val = *scale;
int32_t requant_0 = (sum_0 * (int64_t) scale_val) >> 32;
requant_0 = (requant_0 + 1) >> 1;
requant_0 = __ssat(requant_0 + -128, 8);
int32_t requant_1 = (sum_1 * (int64_t) scale_val) >> 32;
requant_1 = (requant_1 + 1) >> 1;
requant_1 = __ssat(requant_1 + -128, 8);
((int16_t*) output)[0] = (int16_t) requant_0;
((int16_t*) output)[4] = (int16_t) requant_1;
return 0;
}
#endif
"""
)
def test_1x1x8_convolution_code():
"""This is the function that would be generated for a 1x48x48x8 NHWC input tensor under
standard convolution with a 1x1 kernel. This is a common use case seen in MobileNetV1,
among others. In this scenario, a very high amount of memory re-use means that summing
four channels at once makes us faster."""
_, code = tensordot_int16_impl(4, (48 * 8, 1, 8), (0, 0, 0), (8, 1))
assert code == textwrap.dedent(
"""
#ifndef TENSORDOT_OPT_X4_INT16_W384_1X8_000_8_1_EXISTS
#define TENSORDOT_OPT_X4_INT16_W384_1X8_000_8_1_EXISTS
#include <arm_acle.h>
__attribute__((always_inline)) static inline int32_t tensordot_opt_x4_int16_w384_1x8_000_8_1(
int32_t *output, int32_t *tensor, int32_t *kernel, int32_t *bias, int32_t *scale
) {
int32_t sum_0 = *bias, sum_1 = *bias, sum_2 = *bias, sum_3 = *bias;
int32_t tensor__y00_x00__y00_x01 = tensor[0];
int32_t tensor__y00_x02__y00_x03 = tensor[1];
int32_t tensor__y00_x04__y00_x05 = tensor[2];
int32_t tensor__y00_x06__y00_x07 = tensor[3];
int32_t tensor__y00_x08__y00_x09 = tensor[4];
int32_t tensor__y00_x0a__y00_x0b = tensor[5];
int32_t tensor__y00_x0c__y00_x0d = tensor[6];
int32_t tensor__y00_x0e__y00_x0f = tensor[7];
int32_t tensor__y00_x10__y00_x11 = tensor[8];
int32_t tensor__y00_x12__y00_x13 = tensor[9];
int32_t tensor__y00_x14__y00_x15 = tensor[10];
int32_t tensor__y00_x16__y00_x17 = tensor[11];
int32_t tensor__y00_x18__y00_x19 = tensor[12];
int32_t tensor__y00_x1a__y00_x1b = tensor[13];
int32_t tensor__y00_x1c__y00_x1d = tensor[14];
int32_t tensor__y00_x1e__y00_x1f = tensor[15];
int32_t kernel__y00_x00__y00_x01 = kernel[0];
int32_t kernel__y00_x02__y00_x03 = kernel[1];
int32_t kernel__y00_x04__y00_x05 = kernel[2];
int32_t kernel__y00_x06__y00_x07 = kernel[3];
sum_0 = __smlad(tensor__y00_x00__y00_x01, kernel__y00_x00__y00_x01, sum_0);
sum_0 = __smlad(tensor__y00_x02__y00_x03, kernel__y00_x02__y00_x03, sum_0);
sum_0 = __smlad(tensor__y00_x04__y00_x05, kernel__y00_x04__y00_x05, sum_0);
sum_0 = __smlad(tensor__y00_x06__y00_x07, kernel__y00_x06__y00_x07, sum_0);
sum_1 = __smlad(tensor__y00_x08__y00_x09, kernel__y00_x00__y00_x01, sum_1);
sum_1 = __smlad(tensor__y00_x0a__y00_x0b, kernel__y00_x02__y00_x03, sum_1);
sum_1 = __smlad(tensor__y00_x0c__y00_x0d, kernel__y00_x04__y00_x05, sum_1);
sum_1 = __smlad(tensor__y00_x0e__y00_x0f, kernel__y00_x06__y00_x07, sum_1);
sum_2 = __smlad(tensor__y00_x10__y00_x11, kernel__y00_x00__y00_x01, sum_2);
sum_2 = __smlad(tensor__y00_x12__y00_x13, kernel__y00_x02__y00_x03, sum_2);
sum_2 = __smlad(tensor__y00_x14__y00_x15, kernel__y00_x04__y00_x05, sum_2);
sum_2 = __smlad(tensor__y00_x16__y00_x17, kernel__y00_x06__y00_x07, sum_2);
sum_3 = __smlad(tensor__y00_x18__y00_x19, kernel__y00_x00__y00_x01, sum_3);
sum_3 = __smlad(tensor__y00_x1a__y00_x1b, kernel__y00_x02__y00_x03, sum_3);
sum_3 = __smlad(tensor__y00_x1c__y00_x1d, kernel__y00_x04__y00_x05, sum_3);
sum_3 = __smlad(tensor__y00_x1e__y00_x1f, kernel__y00_x06__y00_x07, sum_3);
int32_t scale_val = *scale;
int32_t requant_0 = (sum_0 * (int64_t) scale_val) >> 32;
requant_0 = (requant_0 + 1) >> 1;
requant_0 = __ssat(requant_0 + -128, 8);
int32_t requant_1 = (sum_1 * (int64_t) scale_val) >> 32;
requant_1 = (requant_1 + 1) >> 1;
requant_1 = __ssat(requant_1 + -128, 8);
int32_t requant_2 = (sum_2 * (int64_t) scale_val) >> 32;
requant_2 = (requant_2 + 1) >> 1;
requant_2 = __ssat(requant_2 + -128, 8);
int32_t requant_3 = (sum_3 * (int64_t) scale_val) >> 32;
requant_3 = (requant_3 + 1) >> 1;
requant_3 = __ssat(requant_3 + -128, 8);
int packed_res_0;
__asm__ ("pkhbt %0, %1, %2, lsl #16" : "=r" (packed_res_0) : "r" (requant_0), "r" (requant_1));
int packed_res_1;
__asm__ ("pkhbt %0, %1, %2, lsl #16" : "=r" (packed_res_1) : "r" (requant_2), "r" (requant_3));
output[0] = packed_res_0;
output[1] = packed_res_1;
return 0;
}
#endif
"""
)
def test_3x3x3_offset_convolution_code():
"""This is the function that would be generated for a 1x96x96x3 NHWC input tensor under
standard convolution with a 3x3x3 kernel - the first layer of MobileNetV1. This is special, as
it means that every other kernel channel will not start on an even numbered halfword. We won't
have this issue for the input tensor, as we will always compute two positions at a time.
To solve this 'every other' issue, we will need two different version of this function to
alternate between. This alternation will be handled in TIR scheduling. Here, we just test the
version where the kernel is not word aligned.
Also tests the requantize_shift and output_zero_point keyword args. These might be needed for
some ResNet models (like image classification from MLPerf Tiny).
"""
_, code = tensordot_int16_impl(
1,
(96 * 3, 3, 9),
(1, 1, 1),
(3, 1),
requantize_shift=40,
output_zero_point=4,
)
assert code == textwrap.dedent(
"""
#ifndef TENSORDOT_OPT_X1_INT16_W288_3X9_111_EXISTS
#define TENSORDOT_OPT_X1_INT16_W288_3X9_111_EXISTS
#include <arm_acle.h>
__attribute__((always_inline)) static inline int32_t tensordot_opt_x1_int16_w288_3x9_111(
int32_t *output, int32_t *tensor, int32_t *kernel, int32_t *bias, int32_t *scale
) {
int32_t sum_0 = *bias;
int32_t tensor__unknown__y00_x00 = tensor[0];
int32_t tensor__y00_x01__y00_x02 = tensor[1];
int32_t tensor__y00_x03__y00_x04 = tensor[2];
int32_t tensor__y00_x05__y00_x06 = tensor[3];
int32_t tensor__y00_x07__y00_x08 = tensor[4];
int32_t tensor__unknown__y01_x00 = tensor[144];
int32_t tensor__y01_x01__y01_x02 = tensor[145];
int32_t tensor__y01_x03__y01_x04 = tensor[146];
int32_t tensor__y01_x05__y01_x06 = tensor[147];
int32_t tensor__y01_x07__y01_x08 = tensor[148];
int32_t tensor__unknown__y02_x00 = tensor[288];
int32_t tensor__y02_x01__y02_x02 = tensor[289];
int32_t tensor__y02_x03__y02_x04 = tensor[290];
int32_t tensor__y02_x05__y02_x06 = tensor[291];
int32_t tensor__y02_x07__y02_x08 = tensor[292];
int32_t kernel__unknown__y00_x00 = kernel[0];
int32_t kernel__y00_x01__y00_x02 = kernel[1];
int32_t kernel__y00_x03__y00_x04 = kernel[2];
int32_t kernel__y00_x05__y00_x06 = kernel[3];
int32_t kernel__y00_x07__y00_x08 = kernel[4];
int32_t kernel__y01_x00__y01_x01 = kernel[5];
int32_t kernel__y01_x02__y01_x03 = kernel[6];
int32_t kernel__y01_x04__y01_x05 = kernel[7];
int32_t kernel__y01_x06__y01_x07 = kernel[8];
int32_t kernel__y01_x08__y02_x00 = kernel[9];
int32_t kernel__y02_x01__y02_x02 = kernel[10];
int32_t kernel__y02_x03__y02_x04 = kernel[11];
int32_t kernel__y02_x05__y02_x06 = kernel[12];
int32_t kernel__y02_x07__y02_x08 = kernel[13];
sum_0 = __smlatt(tensor__unknown__y00_x00, kernel__unknown__y00_x00, sum_0);
sum_0 = __smlad(tensor__y00_x01__y00_x02, kernel__y00_x01__y00_x02, sum_0);
sum_0 = __smlad(tensor__y00_x03__y00_x04, kernel__y00_x03__y00_x04, sum_0);
sum_0 = __smlad(tensor__y00_x05__y00_x06, kernel__y00_x05__y00_x06, sum_0);
sum_0 = __smlad(tensor__y00_x07__y00_x08, kernel__y00_x07__y00_x08, sum_0);
sum_0 = __smlatb(tensor__unknown__y01_x00, kernel__y01_x00__y01_x01, sum_0);
sum_0 = __smlabt(tensor__y01_x01__y01_x02, kernel__y01_x00__y01_x01, sum_0);
sum_0 = __smlatb(tensor__y01_x01__y01_x02, kernel__y01_x02__y01_x03, sum_0);
sum_0 = __smlabt(tensor__y01_x03__y01_x04, kernel__y01_x02__y01_x03, sum_0);
sum_0 = __smlatb(tensor__y01_x03__y01_x04, kernel__y01_x04__y01_x05, sum_0);
sum_0 = __smlabt(tensor__y01_x05__y01_x06, kernel__y01_x04__y01_x05, sum_0);
sum_0 = __smlatb(tensor__y01_x05__y01_x06, kernel__y01_x06__y01_x07, sum_0);
sum_0 = __smlabt(tensor__y01_x07__y01_x08, kernel__y01_x06__y01_x07, sum_0);
sum_0 = __smlatb(tensor__y01_x07__y01_x08, kernel__y01_x08__y02_x00, sum_0);
sum_0 = __smlatt(tensor__unknown__y02_x00, kernel__y01_x08__y02_x00, sum_0);
sum_0 = __smlad(tensor__y02_x01__y02_x02, kernel__y02_x01__y02_x02, sum_0);
sum_0 = __smlad(tensor__y02_x03__y02_x04, kernel__y02_x03__y02_x04, sum_0);
sum_0 = __smlad(tensor__y02_x05__y02_x06, kernel__y02_x05__y02_x06, sum_0);
sum_0 = __smlad(tensor__y02_x07__y02_x08, kernel__y02_x07__y02_x08, sum_0);
int32_t scale_val = *scale;
int32_t requant_0 = (sum_0 * (int64_t) scale_val) >> 39;
requant_0 = (requant_0 + 1) >> 1;
requant_0 = __ssat(requant_0 + 4, 8);
((int16_t*) output)[1] = (int16_t) requant_0;
return 0;
}
#endif
"""
)
| 19,142 | 44.796651 | 101 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_pooling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements, unused-argument
"""Test code for pooling"""
import math
import pytest
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi, TVMError
from tvm.topi.utils import get_const_tuple
_pool_schedule = {
"generic": topi.generic.schedule_pool,
"cpu": topi.x86.schedule_pool,
"gpu": topi.cuda.schedule_pool,
"hls": topi.hls.schedule_pool,
}
_adaptive_pool_schedule = {
"generic": topi.generic.schedule_adaptive_pool,
"cpu": topi.x86.schedule_adaptive_pool,
"gpu": topi.cuda.schedule_adaptive_pool,
"hls": topi.hls.schedule_adaptive_pool,
}
_pool_grad_schedule = {
"generic": topi.generic.schedule_pool_grad,
"gpu": topi.cuda.schedule_pool_grad,
}
def verify_pool_grad(
n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_pad=True, add_relu=False
):
"""verify function of pool_grad"""
iw = ih
kw = kh
sw = sh
pt, pl, pb, pr = padding
A = te.placeholder((n, ic, ih, iw), name="A")
B = topi.nn.pool2d(
A,
kernel=[kh, kw],
stride=[sh, sw],
dilation=[1, 1],
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout="NCHW",
count_include_pad=count_include_pad,
)
dtype = A.dtype
bshape = get_const_tuple(B.shape)
ashape = get_const_tuple(A.shape)
if ceil_mode:
assert bshape[2] == int(math.ceil(float(ashape[2] - kh + pt + pb) / sh) + 1)
assert bshape[3] == int(math.ceil(float(ashape[3] - kw + pl + pr) / sw) + 1)
else:
assert bshape[2] == int(math.floor(float(ashape[2] - kh + pt + pb) / sh) + 1)
assert bshape[3] == int(math.floor(float(ashape[3] - kw + pl + pr) / sw) + 1)
OutGrad = te.placeholder(bshape, name="OutGrad")
PoolGrad = topi.nn.pool_grad(
OutGrad,
A,
kernel=[kh, kw],
stride=[sh, sw],
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout="NCHW",
count_include_pad=count_include_pad,
)
if add_relu:
PoolGrad = topi.nn.relu(PoolGrad)
a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype)
out_grad_np = np.random.uniform(low=0.001, size=bshape).astype(dtype)
pool_grad_np = tvm.topi.testing.pool_grad_nchw(
a_np,
out_grad_np,
pool_size=(kh, kw),
strides=(sh, sw),
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
if add_relu:
pool_grad_np = np.maximum(pool_grad_np, 0.0)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _pool_grad_schedule)
s = s_func(PoolGrad)
a = tvm.nd.array(a_np, dev)
out_grad = tvm.nd.array(out_grad_np, dev)
pool_grad = tvm.nd.array(np.zeros(get_const_tuple(PoolGrad.shape), dtype=dtype), dev)
f = tvm.build(s, [A, OutGrad, PoolGrad], target)
f(a, out_grad, pool_grad)
tvm.testing.assert_allclose(pool_grad.numpy(), pool_grad_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_pool_grad():
"""test cases of pool_grad"""
verify_pool_grad(1, 256, 32, 3, 2, [1, 1, 1, 1], "avg", False, False)
verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], "avg", False, True)
verify_pool_grad(1, 256, 31, 3, 3, [1, 2, 1, 2], "avg", False, True)
verify_pool_grad(1, 256, 32, 2, 2, [1, 2, 1, 2], "avg", False, False)
verify_pool_grad(1, 256, 31, 4, 4, [2, 2, 2, 2], "avg", False, False)
verify_pool_grad(1, 256, 31, 4, 4, [0, 0, 0, 0], "avg", False, False)
verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], "max", False)
verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 2, 1], "max", False)
verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 2, 1], "max", True)
verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 0, 3], "avg", False, True)
verify_pool_grad(1, 256, 32, 2, 2, [0, 3, 2, 1], "avg", False, False)
verify_pool_grad(1, 256, 31, 3, 3, [1, 0, 3, 2], "max", False)
verify_pool_grad(1, 256, 31, 3, 3, [3, 2, 1, 0], "max", True)
verify_pool_grad(1, 256, 32, 3, 2, [1, 1, 1, 1], "max", False)
verify_pool_grad(1, 256, 32, 1, 2, [1, 1, 1, 1], "avg", False, False)
verify_pool_grad(1, 256, 31, 4, 4, [0, 0, 0, 0], "avg", False, False, add_relu=True)
verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], "max", False, add_relu=True)
def verify_global_pool(dshape, pool_type, layout="NCHW"):
"""verify function of global_pool"""
assert layout in ["NCHW", "NHWC"]
A = te.placeholder(shape=dshape, name="A")
B = topi.nn.global_pool(A, pool_type=pool_type, layout=layout)
B = topi.nn.relu(B)
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
axis = (layout.find("H"), layout.find("W"))
if pool_type == "avg":
b_np = np.mean(a_np, axis=axis, keepdims=True)
elif pool_type == "max":
b_np = np.max(a_np, axis=axis, keepdims=True)
b_np = np.maximum(b_np, 0.0)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _adaptive_pool_schedule)
if target == "cuda":
s = s_func(B, layout)
else:
s = s_func(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_global_pool():
"""test cases of global_pool"""
verify_global_pool((1, 1024, 7, 7), "avg")
verify_global_pool((4, 1024, 7, 7), "avg")
verify_global_pool((1, 1024, 7, 7), "max")
verify_global_pool((4, 1024, 7, 7), "max")
verify_global_pool((1, 7, 7, 1024), "avg", "NHWC")
verify_global_pool((4, 7, 7, 1024), "avg", "NHWC")
verify_global_pool((1, 7, 7, 1024), "max", "NHWC")
verify_global_pool((4, 7, 7, 1024), "max", "NHWC")
def verify_adaptive_pool(dshape, out_size, pool_type, layout="NCHW", dtype="float32"):
"""verify function of adaptive_pool"""
np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype)
np_out = tvm.topi.testing.adaptive_pool(np_data, out_size, pool_type, layout)
oshape = np_out.shape
data = te.placeholder(dshape, name="data", dtype=dtype)
if len(out_size) == 2:
out = topi.nn.adaptive_pool(data, out_size, pool_type, layout)
else:
assert len(out_size) == 3
out = topi.nn.adaptive_pool3d(data, out_size, pool_type, layout)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _adaptive_pool_schedule)
if target == "cuda":
s = s_func(out, layout)
else:
s = s_func(out)
a = tvm.nd.array(np_data, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(oshape), dtype=out.dtype), dev)
f = tvm.build(s, [data, out], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), np_out, rtol=4e-5, atol=1e-6)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_adaptive_pool():
"""test cases of adaptive_pool"""
verify_adaptive_pool((1, 3, 224, 224), (1, 1), "max")
verify_adaptive_pool((1, 3, 224, 224), (1, 1), "avg")
verify_adaptive_pool((1, 14, 56, 78), (34, 13), "max")
verify_adaptive_pool((1, 5, 46, 97), (4, 96), "avg")
verify_adaptive_pool((1, 224, 224, 3), (1, 1), "max", layout="NHWC")
verify_adaptive_pool((1, 5, 46, 97), (4, 96), "avg", layout="NHWC")
verify_adaptive_pool((1, 16, 32, 32, 32), (1, 1, 1), "max", layout="NCDHW")
verify_adaptive_pool((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NCDHW")
verify_adaptive_pool((1, 16, 32, 32, 32), (2, 2, 2), "avg", layout="NCDHW")
verify_adaptive_pool((1, 16, 64, 32, 32), (7, 8, 9), "avg", layout="NCDHW")
verify_adaptive_pool((1, 16, 64, 32, 32), (8, 16, 16), "avg", layout="NCDHW")
verify_adaptive_pool((1, 16, 32, 32, 32), (1, 1, 1), "avg", layout="NDHWC")
verify_adaptive_pool((1, 16, 32, 32, 32), (2, 2, 2), "max", layout="NDHWC")
verify_adaptive_pool((1, 16, 32, 32, 32), (2, 4, 4), "max", layout="NDHWC")
def verify_poolnd(
n,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
layout,
count_include_pad=True,
):
"""verify function of pool1d"""
A = te.placeholder(input_shape, name="A")
if n == 1:
B = topi.nn.pool1d(
A,
kernel=kernel,
stride=stride,
dilation=dilation,
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
elif n == 2:
B = topi.nn.pool2d(
A,
kernel=kernel,
stride=stride,
dilation=dilation,
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
elif n == 3:
B = topi.nn.pool3d(
A,
kernel=kernel,
stride=stride,
dilation=dilation,
padding=padding,
pool_type=pool_type,
ceil_mode=ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
else:
raise ValueError(f"PoolND only supports n=1, 2, 3 got n={n}")
B = topi.nn.relu(B)
dtype = A.dtype
output_shape = [int(i) for i in B.shape]
input_np = np.random.uniform(low=0.001, size=input_shape).astype(dtype)
padding_before = padding[:n]
padding_after = padding[n:]
ref_np = tvm.topi.testing.poolnd_python(
input_np,
kernel,
stride,
dilation,
padding_before,
padding_after,
pool_type,
count_include_pad,
ceil_mode,
layout=layout,
)
np.testing.assert_equal(tuple(output_shape), tuple(ref_np.shape))
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _pool_schedule)
s = s_func(B, layout)
a = tvm.nd.array(input_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), ref_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def verify_pool3d(
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad=True,
layout="NCDHW",
):
verify_poolnd(
3,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
@tvm.testing.uses_gpu
def test_pool3d():
"""test cases of pool3d"""
verify_pool3d(
[1, 16, 32, 32, 32], [2, 2, 2], [2, 2, 2], [1, 1, 1], [0, 0, 0, 0, 0, 0], "avg", False, True
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [1, 1, 2, 2, 2, 1], "avg", False, True
)
verify_pool3d(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[1, 1, 2, 2, 2, 1],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[3, 3, 3, 3, 3, 3],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 32, 32, 32], [2, 2, 2], [2, 2, 2], [1, 1, 1], [0, 0, 0, 0, 0, 0], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [2, 2, 1, 1, 1, 2], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [2, 2, 1, 1, 1, 2], "max", True
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [2, 1, 0, 5, 4, 3], "avg", False, True
)
verify_pool3d(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [1, 0, 5, 4, 3, 2], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [1, 1, 1], [3, 2, 1, 0, 5, 4], "max", True
)
# Test non-1 dilation
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [3, 3, 3], [2, 1, 0, 5, 4, 3], "avg", False, True
)
verify_pool3d(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [2, 1, 3], [1, 0, 5, 4, 3, 2], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [2, 2, 3], [3, 2, 1, 0, 5, 4], "max", True
)
# Test channel last layouts
verify_pool3d(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"avg",
False,
True,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[1, 1, 2, 2, 2, 1],
"avg",
False,
True,
layout="NDHWC",
)
verify_pool3d(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[1, 1, 2, 2, 2, 1],
"avg",
False,
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[3, 3, 3, 3, 3, 3],
"avg",
False,
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[4, 4, 4],
[4, 4, 4],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"avg",
False,
False,
layout="NDHWC",
)
verify_pool3d(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 0, 0, 0, 0, 0],
"max",
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 2, 1, 1, 1, 2],
"max",
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 2, 1, 1, 1, 2],
"max",
True,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[2, 1, 0, 5, 4, 3],
"avg",
False,
True,
layout="NDHWC",
)
verify_pool3d(
[1, 32, 32, 32, 16],
[2, 2, 2],
[2, 2, 2],
[1, 1, 1],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[1, 0, 5, 4, 3, 2],
"max",
False,
layout="NDHWC",
)
verify_pool3d(
[1, 31, 31, 31, 16],
[3, 3, 3],
[3, 3, 3],
[1, 1, 1],
[3, 2, 1, 0, 5, 4],
"max",
True,
layout="NDHWC",
)
# Test non-1 dilation
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [3, 3, 3], [2, 1, 0, 5, 4, 3], "avg", False, True
)
verify_pool3d(
[1, 16, 32, 32, 32],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[0, 5, 4, 3, 2, 1],
"avg",
False,
False,
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [2, 1, 3], [1, 0, 5, 4, 3, 2], "max", False
)
verify_pool3d(
[1, 16, 31, 31, 31], [3, 3, 3], [3, 3, 3], [2, 2, 3], [3, 2, 1, 0, 5, 4], "max", True
)
def verify_pool2d(
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad=True,
layout="NCHW",
):
verify_poolnd(
2,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
@tvm.testing.uses_gpu
def test_pool2d():
"""test cases of pool"""
verify_pool2d([1, 16, 32, 32], [2, 2], [2, 2], [1, 1], [0, 0, 0, 0], "avg", False, True)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [1, 2, 1, 2], "avg", False, True)
verify_pool2d([1, 16, 32, 32], [2, 2], [2, 2], [1, 1], [1, 2, 1, 2], "avg", False, False)
verify_pool2d([1, 16, 31, 31], [4, 4], [4, 4], [1, 1], [3, 3, 3, 3], "avg", False, False)
verify_pool2d([1, 16, 31, 31], [4, 4], [4, 4], [1, 1], [0, 0, 0, 0], "avg", False, False)
verify_pool2d([1, 16, 32, 32], [2, 3], [2, 2], [1, 1], [0, 0, 0, 0], "max", False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", True)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [2, 1, 0, 3], "avg", False, True)
verify_pool2d([1, 16, 32, 32], [2, 3], [2, 2], [1, 1], [0, 3, 2, 1], "avg", False, False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [1, 0, 3, 2], "max", False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [1, 1], [3, 2, 1, 0], "max", True)
# Test non-1 dilations
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [2, 1], [2, 1, 0, 3], "avg", False, True)
verify_pool2d([1, 16, 32, 32], [2, 3], [2, 2], [2, 3], [0, 3, 2, 1], "avg", False, False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [3, 3], [1, 0, 3, 2], "max", False)
verify_pool2d([1, 16, 31, 31], [3, 3], [3, 3], [2, 2], [3, 2, 1, 0], "max", True)
# Test channel last
verify_pool2d(
[1, 32, 32, 16], [2, 2], [2, 2], [1, 1], [0, 0, 0, 0], "avg", False, True, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [1, 2, 1, 2], "avg", False, True, layout="NHWC"
)
verify_pool2d(
[1, 32, 32, 16], [2, 2], [2, 2], [1, 1], [1, 2, 1, 2], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [4, 4], [4, 4], [1, 1], [3, 3, 3, 3], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [4, 4], [4, 4], [1, 1], [0, 0, 0, 0], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 32, 32, 16], [2, 3], [2, 2], [1, 1], [0, 0, 0, 0], "max", False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", False, layout="NHWC"
)
verify_pool2d([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 2, 1], "max", True, layout="NHWC")
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [2, 1, 0, 3], "avg", False, True, layout="NHWC"
)
verify_pool2d(
[1, 32, 32, 16], [2, 3], [2, 2], [1, 1], [0, 3, 2, 1], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [1, 0, 3, 2], "max", False, layout="NHWC"
)
verify_pool2d([1, 31, 31, 16], [3, 3], [3, 3], [1, 1], [3, 2, 1, 0], "max", True, layout="NHWC")
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [2, 1], [2, 1, 0, 3], "avg", False, True, layout="NHWC"
)
verify_pool2d(
[1, 32, 32, 16], [2, 3], [2, 2], [2, 3], [0, 3, 2, 1], "avg", False, False, layout="NHWC"
)
verify_pool2d(
[1, 31, 31, 16], [3, 3], [3, 3], [3, 3], [1, 0, 3, 2], "max", False, layout="NHWC"
)
verify_pool2d([1, 31, 31, 16], [3, 3], [3, 3], [2, 2], [3, 2, 1, 0], "max", True, layout="NHWC")
def verify_pool1d(
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
count_include_pad=True,
layout="NCW",
):
verify_poolnd(
1,
input_shape,
kernel,
stride,
dilation,
padding,
pool_type,
ceil_mode,
layout=layout,
count_include_pad=count_include_pad,
)
@tvm.testing.uses_gpu
def test_pool1d():
"""test cases of pool1d"""
verify_pool1d([1, 16, 32], [2], [2], [1], [0, 0], "avg", False, True)
verify_pool1d([1, 16, 31], [3], [3], [1], [1, 2], "avg", False, True)
verify_pool1d([1, 16, 32], [2], [2], [1], [1, 2], "avg", False, False)
verify_pool1d([1, 16, 31], [4], [4], [1], [3, 3], "avg", False, False)
verify_pool1d([1, 16, 31], [4], [4], [1], [0, 0], "avg", False, False)
verify_pool1d([1, 16, 32], [2], [2], [1], [0, 0], "max", False)
verify_pool1d([1, 16, 31], [3], [3], [1], [2, 1], "max", False)
verify_pool1d([1, 16, 31], [3], [3], [1], [2, 1], "max", True)
verify_pool1d([1, 16, 31], [3], [3], [1], [2, 5], "avg", False, True)
verify_pool1d([1, 16, 32], [2], [2], [1], [0, 3], "avg", False, False)
verify_pool1d([1, 16, 31], [3], [3], [1], [1, 4], "max", False)
verify_pool1d([1, 16, 31], [3], [3], [1], [3, 0], "max", True)
# Test non-1 dilations
verify_pool1d([1, 16, 31], [3], [3], [2], [2, 5], "avg", False, True)
verify_pool1d([1, 16, 32], [2], [2], [3], [0, 3], "avg", False, False)
verify_pool1d([1, 16, 31], [3], [3], [2], [1, 4], "max", False)
verify_pool1d([1, 16, 31], [3], [3], [3], [3, 0], "max", True)
# Test Channel last
verify_pool1d([1, 32, 16], [2], [2], [1], [0, 0], "avg", False, True, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [1, 2], "avg", False, True, layout="NWC")
verify_pool1d([1, 32, 16], [2], [2], [1], [1, 2], "avg", False, False, layout="NWC")
verify_pool1d([1, 31, 16], [4], [4], [1], [3, 3], "avg", False, False, layout="NWC")
verify_pool1d([1, 31, 16], [4], [4], [1], [0, 0], "avg", False, False, layout="NWC")
verify_pool1d([1, 32, 16], [2], [2], [1], [0, 0], "max", False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [2, 1], "max", False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [2, 1], "max", True, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [2, 5], "avg", False, True, layout="NWC")
verify_pool1d([1, 31, 16], [2], [2], [1], [0, 3], "avg", False, False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [1, 4], "max", False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [1], [3, 0], "max", True, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [2], [2, 5], "avg", False, True, layout="NWC")
verify_pool1d([1, 32, 16], [2], [2], [3], [0, 3], "avg", False, False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [2], [1, 4], "max", False, layout="NWC")
verify_pool1d([1, 31, 16], [3], [3], [3], [3, 0], "max", True, layout="NWC")
def test_pool_invalid_tiled_layout():
with pytest.raises(TVMError, match="Unsupported layout NCHWD4d"):
A_3d = te.placeholder([1, 16, 32, 32, 32], name="A")
B = topi.nn.pool3d(
A_3d,
kernel=[2, 2, 2],
stride=[2, 2, 2],
dilation=[1, 1, 1],
padding=[0, 0, 0, 0, 0, 0],
pool_type="avg",
ceil_mode=False,
count_include_pad=True,
layout="NCHWD4d",
)
with pytest.raises(TVMError, match="Unsupported layout NCHW4h4w"):
A_2d = te.placeholder([1, 16, 32, 32], name="A")
B = topi.nn.pool2d(
A_2d,
kernel=[2, 2],
stride=[2, 2],
dilation=[1, 1],
padding=[0, 0, 0, 0],
pool_type="avg",
ceil_mode=False,
count_include_pad=True,
layout="NCHW4h4w",
)
with pytest.raises(TVMError, match="Unsupported layout NCW4w"):
A_1d = te.placeholder([1, 16, 32], name="A")
B = topi.nn.pool1d(
A_1d,
kernel=[2],
stride=[2],
dilation=[1],
padding=[0, 0],
pool_type="avg",
ceil_mode=False,
count_include_pad=True,
layout="NCW4w",
)
if __name__ == "__main__":
tvm.testing.main()
| 26,034 | 31.02337 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_clip.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for clip operator"""
import numpy as np
import tvm
from tvm import te, tir
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
def verify_clip(N, a_min, a_max, dtype):
A = te.placeholder((N, N), dtype=dtype, name="A")
B = topi.clip(A, a_min, a_max)
s = te.create_schedule([B.op])
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_clip")
def get_ref_data(a_min, a_max):
a_np = np.random.uniform(a_min * 2, a_max * 2, size=(N, N)).astype(dtype)
b_np = np.clip(a_np, a_min, a_max)
return a_np, b_np
a_min = a_min.value if isinstance(a_min, (tir.FloatImm, tir.IntImm)) else a_min
a_max = a_max.value if isinstance(a_max, (tir.FloatImm, tir.IntImm)) else a_max
a_np, b_np = get_ref_data(a_min, a_max)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [A, B], target, name="clip")
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_clip():
verify_clip(1024, -127, 127, "float32")
verify_clip(1024, -127, 127, "int16")
verify_clip(1024, -127, 127, "int8")
@tvm.testing.uses_gpu
def test_clip_floaimm_intimm():
verify_clip(1024, tir.FloatImm("float32", -127), tir.FloatImm("float32", 127), "float32")
verify_clip(1024, tir.IntImm("int32", -127), tir.IntImm("int32", 127), "int16")
verify_clip(1024, tir.IntImm("int32", -127), tir.IntImm("int32", 127), "int8")
if __name__ == "__main__":
test_clip()
test_clip_floaimm_intimm()
| 2,788 | 35.697368 | 93 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv2d_nhwc_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-arguments
# pylint: disable=bad-whitespace
"""Example code to do convolution."""
import numpy as np
import tvm
from tvm import topi
import tvm.topi.testing
from tvm import te
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv2d_nhwc_winograd_tensorcore = {
"cuda": (
topi.cuda.conv2d_nhwc_winograd_tensorcore,
topi.cuda.schedule_conv2d_nhwc_winograd_tensorcore,
)
}
_conv2d_nhwc_winograd_direct = {
"cuda": (topi.cuda.conv2d_nhwc_winograd_direct, topi.cuda.schedule_conv2d_nhwc_winograd_direct)
}
def verify_conv2d_nhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
devices="cuda",
bgemm="direct",
):
"""Test the conv2d with winograd for nhwc layout"""
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_height, in_width, in_channel), name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
bias = te.placeholder((1, 1, 1, num_filter), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_nhwc_winograd.verify_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
print("Running on target: %s" % device)
with tvm.target.Target(device):
if bgemm == "direct":
fcompute, fschedule = tvm.topi.testing.dispatch(
device, _conv2d_nhwc_winograd_direct
)
elif bgemm == "tensorcore":
fcompute, fschedule = tvm.topi.testing.dispatch(
device, _conv2d_nhwc_winograd_tensorcore
)
C = fcompute(A, W, stride, padding, dilation, "float32")
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=2e-3)
check_device(devices)
@tvm.testing.requires_cuda
@tvm.testing.requires_gpu
def test_conv2d_nhwc_winograd_direct():
"""Test the conv2d with winograd for nhwc layout"""
# resnet 18 workloads
print("test_winograd_direct...")
verify_conv2d_nhwc(1, 64, 56, 64, 3, 1, 1, bgemm="direct")
verify_conv2d_nhwc(1, 128, 28, 128, 3, 1, 1)
verify_conv2d_nhwc(1, 256, 14, 256, 3, 1, 1)
verify_conv2d_nhwc(1, 512, 7, 512, 3, 1, 1)
verify_conv2d_nhwc(1, 48, 35, 64, 5, 1, 2)
# weird workloads
verify_conv2d_nhwc(1, 1, 1, 1, 3, 1, 1)
verify_conv2d_nhwc(3, 3, 3, 3, 3, 1, 1)
verify_conv2d_nhwc(2, 13, 71, 59, 3, 1, 1)
# Asymmetric padding
verify_conv2d_nhwc(1, 512, 7, 512, 3, 1, "SAME")
verify_conv2d_nhwc(2, 48, 56, 48, 3, 1, (1, 1), add_relu=True)
verify_conv2d_nhwc(2, 48, 56, 48, 3, 1, "SAME", add_relu=True, add_bias=True)
verify_conv2d_nhwc(1, 48, 35, 48, 5, 1, "VALID")
@tvm.testing.requires_cuda
@tvm.testing.requires_tensorcore
def test_conv2d_nhwc_winograd_tensorcore():
"""Test the conv2d with winograd for nhwc layout"""
verify_conv2d_nhwc(8, 64, 56, 64, 3, 1, 1, bgemm="tensorcore")
verify_conv2d_nhwc(8, 128, 28, 128, 3, 1, 1, bgemm="tensorcore")
verify_conv2d_nhwc(8, 256, 14, 256, 3, 1, 1, bgemm="tensorcore")
verify_conv2d_nhwc(2, 64, 56, 64, 3, 1, (1, 1), add_relu=True, bgemm="tensorcore")
verify_conv2d_nhwc(2, 64, 56, 64, 3, 1, "SAME", add_relu=True, bgemm="tensorcore")
if __name__ == "__main__":
test_conv2d_nhwc_winograd_direct()
test_conv2d_nhwc_winograd_tensorcore()
| 6,371 | 34.597765 | 99 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv2d_transpose_nchw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for transposed convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv2d_transpose_nchw_implement = {
"generic": (topi.nn.conv2d_transpose_nchw, topi.generic.schedule_conv2d_transpose_nchw),
"cpu": (topi.x86.conv2d_transpose_nchw, topi.x86.schedule_conv2d_transpose_nchw),
"arm_cpu": (topi.arm_cpu.conv2d_transpose_nchw, topi.arm_cpu.schedule_conv2d_transpose_nchw),
"gpu": (topi.cuda.conv2d_transpose_nchw, topi.cuda.schedule_conv2d_transpose_nchw),
"hls": (topi.nn.conv2d_transpose_nchw, topi.hls.schedule_conv2d_transpose_nchw),
}
def verify_conv2d_transpose_nchw(
batch, in_channel, in_size, num_filter, kernel, stride, padding, output_padding
):
in_height, in_width = in_size
kernel_height, kernel_width = kernel
stride_height, stride_width = stride
pad_top, pad_left, pad_bottom, pad_right = padding
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
W = te.placeholder((in_channel, num_filter, kernel_height, kernel_width), name="W")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_transpose.verify_conv2d_transpose_nchw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = tvm.topi.testing.conv2d_transpose_nchw_python(
a_np, w_np, stride, padding, output_padding
)
c_np = np.maximum(b_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check(fcompute, fschedule, target, dev):
B = fcompute(
A,
W,
[stride_height, stride_width],
[pad_top, pad_left, pad_bottom, pad_right],
A.dtype,
output_padding,
)
C = topi.nn.relu(B)
s1 = fschedule([B])
s2 = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
func1 = tvm.build(s1, [A, W, B], target)
func2 = tvm.build(s2, [A, W, C], target)
func1(a, w, b)
func2(a, w, c)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
def check_generic(target, dev):
print("Running generic on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = _conv2d_transpose_nchw_implement["generic"]
check(fcompute, fschedule, target, dev)
check_generic("llvm", tvm.cpu(0))
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(
target, _conv2d_transpose_nchw_implement
)
check(fcompute, fschedule, target, dev)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_conv2d_transpose_nchw():
verify_conv2d_transpose_nchw(1, 3, (224, 224), 1, (1, 1), (1, 1), (0, 0, 0, 0), (0, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (1, 1), (0, 0, 0, 0), (0, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (3, 3), (0, 0, 0, 0), (0, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (1, 1), (0, 0, 0, 0), (0, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (2, 2), (1, 1, 1, 1), (0, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (3, 3), (2, 2), (1, 1, 1, 1), (1, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (2, 2), (2, 2), (0, 0, 0, 0), (0, 0))
verify_conv2d_transpose_nchw(1, 3, (224, 224), 32, (2, 2), (2, 2), (0, 0, 0, 0), (1, 1))
verify_conv2d_transpose_nchw(1, 32, (32, 32), 128, (5, 5), (1, 1), (0, 0, 0, 0), (0, 0))
verify_conv2d_transpose_nchw(1, 32, (32, 32), 128, (5, 5), (2, 2), (1, 1, 1, 1), (0, 0))
verify_conv2d_transpose_nchw(16, 32, (8192, 1), 8, (31, 1), (2, 1), (14, 0, 15, 0), (0, 0))
verify_conv2d_transpose_nchw(16, 512, (8, 1), 128, (31, 1), (2, 1), (14, 0, 15, 0), (0, 0))
verify_conv2d_transpose_nchw(16, 512, (8, 1), 128, (31, 1), (2, 1), (14, 0, 15, 0), (1, 0))
if __name__ == "__main__":
test_conv2d_transpose_nchw()
| 5,444 | 41.539063 | 97 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_tensor.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for tensor operator"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.contrib.nvcc import have_fp16
import tvm.testing
def verify_elemwise_sum(num_args, dtype):
shape = (3, 5, 4)
tvm_placeholders = []
for i in range(num_args):
tvm_placeholders.append(te.placeholder(shape, name="data" + str(i), dtype=dtype))
esum = topi.elemwise_sum(tvm_placeholders)
s = te.create_schedule([esum.op])
@memoize("topi.tests.test_topi_elemwise_sum")
def get_ref_data():
np_nd = [np.random.uniform(0, 10, size=shape).astype(dtype) for i in range(num_args)]
return np_nd
np_nd = get_ref_data()
def check_target(target):
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
dev = tvm.device(target, 0)
out = tvm.nd.array(np.zeros(shape, dtype=dtype), dev)
f = tvm.build(s, tvm_placeholders + [esum], target, name="elemwise_sum")
tvm_nd = [tvm.nd.array(nd, dev) for nd in np_nd] + [out]
f(*tvm_nd)
np_out = np.sum(np.array(np_nd), axis=0)
tvm.testing.assert_allclose(out.numpy(), np_out, rtol=1e-5)
for target in ["llvm"]:
check_target(target)
def verify_full(shape, dtype, fill_value):
A = te.placeholder(shape, dtype=dtype, name="A")
B = topi.full_like(A, fill_value=fill_value)
C = topi.full(shape=shape, dtype=dtype, fill_value=fill_value)
s1 = te.create_schedule([B.op])
s2 = te.create_schedule([C.op])
@memoize("topi.tests.test_topi_full")
def get_ref_data():
return np.full(shape, fill_value, dtype)
np_nd = get_ref_data()
def check_target(target):
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
dev = tvm.device(target, 0)
out = tvm.nd.array(np.zeros(shape, dtype=dtype), dev)
f = tvm.build(s1, [A, B], target, name="full_like")
f(tvm.nd.array(np.zeros(shape, dtype), dev), out)
tvm.testing.assert_allclose(out.numpy(), np_nd, rtol=1e-5)
f = tvm.build(s2, [C], target, name="full")
f(out)
tvm.testing.assert_allclose(out.numpy(), np_nd, rtol=1e-5)
for target in ["llvm"]:
check_target(target)
def verify_vectorization(n, m, dtype):
def check_targeta(targeta):
if not tvm.testing.device_enabled(targeta):
print("Skip because %s is not enabled" % targeta)
return
if dtype == "float16" and targeta == "cuda" and not have_fp16(tvm.cuda(0).compute_version):
print("Skip because gpu does not have fp16 support")
return
with tvm.target.Target(targeta):
dev = tvm.device(targeta, 0)
A = te.placeholder((n, m), name="A", dtype=dtype)
B = te.compute((n, m), lambda i, j: A[i, j] + tvm.tir.const(1, A.dtype), name="B")
S = tvm.topi.testing.get_elemwise_schedule(targeta)(B)
fun = tvm.build(S, [A, B], targeta)
np_A = tvm.nd.empty((n, m), A.dtype, dev).copyfrom(np.random.uniform(size=(n, m)))
np_B = tvm.nd.empty((n, m), B.dtype, dev)
fun(np_A, np_B)
tvm.testing.assert_allclose(np_B.numpy(), np_A.numpy() + 1, rtol=1e-5)
for targeta in ["cuda"]:
check_targeta(targeta)
@tvm.testing.requires_gpu
@tvm.testing.requires_cuda
def test_vectorization():
verify_vectorization(128, 64, "float16")
def test_elemwise_sum():
verify_elemwise_sum(1, "float32")
verify_elemwise_sum(5, "float32")
verify_elemwise_sum(4, "int32")
def test_full():
verify_full((3, 4, 5), "float32", 3.14)
verify_full((10,), "int32", 7)
if __name__ == "__main__":
test_elemwise_sum()
test_full()
test_vectorization()
| 4,729 | 33.275362 | 99 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv2d_winograd.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm.task.space import FallbackConfigEntity
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.utils import get_pad_tuple
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv2d_nchw_winograd_implement = {
"arm_cpu": (topi.arm_cpu.conv2d_nchw_winograd, topi.arm_cpu.schedule_conv2d_nchw_winograd),
"cuda": (topi.cuda.conv2d_nchw_winograd, topi.cuda.schedule_conv2d_nchw_winograd),
"mali": (topi.mali.conv2d_nchw_winograd, topi.mali.schedule_conv2d_nchw_winograd),
}
def verify_conv2d_nchw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
add_bias=False,
add_relu=False,
devices=["cuda", "llvm -device=arm_cpu", "opencl -device=mali"],
):
pad_top, pad_left, pad_bottom, pad_right = get_pad_tuple(padding, (kernel, kernel))
padding_sum = pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d)"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation)
)
in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
W = te.placeholder((num_filter, in_channel, kernel, kernel), name="W")
bias = te.placeholder((num_filter, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_winograd.verify_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation))
c_np = tvm.topi.testing.conv2d_nchw_python(a_np, dw_np, stride, padding)
if add_bias:
b_np = np.random.uniform(size=bias_shape).astype(dtype)
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nchw_winograd_implement)
C = fcompute(A, W, stride, padding, dilation, dtype)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
device,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
rtol = 1e-3
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=rtol)
for device in devices:
check_device(device)
@tvm.testing.uses_gpu
def test_conv2d_nchw():
# inception v3 workloads
verify_conv2d_nchw(1, 128, 17, 192, 7, 1, 3, devices=["cuda"])
verify_conv2d_nchw(1, 128, 17, 128, 7, 1, 3, devices=["cuda"])
verify_conv2d_nchw(1, 160, 17, 160, 7, 1, 3, devices=["cuda"])
# resnet 18 workloads
verify_conv2d_nchw(1, 64, 56, 64, 3, 1, 1)
verify_conv2d_nchw(1, 128, 28, 128, 3, 1, 1)
verify_conv2d_nchw(1, 256, 14, 256, 3, 1, 1)
verify_conv2d_nchw(1, 512, 7, 512, 3, 1, 1)
# batch size = 2
verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1)
# relu, bias
verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1, add_bias=True)
verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1, add_relu=True)
verify_conv2d_nchw(2, 64, 56, 64, 3, 1, 1, add_relu=True, add_bias=True)
# weird workloads
verify_conv2d_nchw(1, 1, 1, 1, 3, 1, 1)
verify_conv2d_nchw(3, 3, 3, 3, 3, 1, 1)
verify_conv2d_nchw(2, 13, 71, 59, 3, 1, 1)
verify_conv2d_nchw(1, 48, 35, 64, 5, 1, 2, devices=["cuda"])
# Asymmetric padding
verify_conv2d_nchw(1, 48, 56, 48, 3, 1, (1, 1, 1, 1))
verify_conv2d_nchw(1, 64, 28, 64, 3, 1, (1, 1, 1, 1))
verify_conv2d_nchw(1, 128, 14, 128, 3, 1, (1, 1))
verify_conv2d_nchw(1, 512, 7, 512, 3, 1, "SAME")
verify_conv2d_nchw(2, 13, 71, 59, 3, 1, (1, 1, 1, 1))
verify_conv2d_nchw(2, 48, 56, 48, 3, 1, (1, 1, 1, 1), add_bias=True)
verify_conv2d_nchw(2, 48, 56, 48, 3, 1, (1, 1), add_relu=True)
verify_conv2d_nchw(2, 48, 56, 48, 3, 1, "SAME", add_relu=True, add_bias=True)
verify_conv2d_nchw(1, 64, 17, 192, 7, 1, (3, 1), devices=["cuda"])
verify_conv2d_nchw(1, 64, 17, 64, 7, 1, (3, 3, 2, 2), devices=["cuda"])
verify_conv2d_nchw(1, 160, 17, 160, 7, 1, "SAME", devices=["cuda"])
verify_conv2d_nchw(1, 48, 35, 48, 5, 1, "VALID", devices=["cuda"])
def verify_conv2d_nhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
):
# This version is intented to be used by the auto-scheduler,
# so we only test the correctness of compute declaration
# with the default naive schedule in cpu
A = te.placeholder((batch, in_size, in_size, in_channel), name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
bias = te.placeholder((1, 1, 1, num_filter), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_winograd.verify_conv2d_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c_np = tvm.topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
target = "llvm"
dev = tvm.device(target)
C = topi.nn.conv2d_winograd_nhwc(A, W, stride, padding, dilation, dtype)
s = te.create_schedule([C.op])
a = tvm.nd.array(a_np, device=dev)
w = tvm.nd.array(w_np, device=dev)
b = tvm.nd.array(b_np, device=dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), device=dev)
func = tvm.build(s, [A, W, C], target=target)
func(a, w, c)
rtol = 1e-3
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=rtol)
def test_conv2d_nhwc():
# This version is intented to be used by the auto-scheduler,
# so we only test the correctness of compute declaration
# with the default naive schedule in cpu
# resnet 18 workloads
verify_conv2d_nhwc(1, 64, 56, 64, 3, 1, 1)
verify_conv2d_nhwc(1, 128, 28, 128, 3, 1, 1)
verify_conv2d_nhwc(1, 256, 14, 256, 3, 1, 1)
verify_conv2d_nhwc(1, 512, 7, 512, 3, 1, 1)
# more shapes
verify_conv2d_nhwc(2, 64, 56, 64, 3, 1, 1)
verify_conv2d_nhwc(1, 1, 1, 1, 3, 1, 1)
verify_conv2d_nhwc(3, 3, 3, 3, 3, 1, 1)
verify_conv2d_nhwc(2, 13, 71, 59, 3, 1, 1)
# Asymmetric padding
verify_conv2d_nhwc(1, 3, 7, 3, 3, 1, "SAME")
verify_conv2d_nhwc(1, 48, 35, 48, 3, 1, "VALID")
if __name__ == "__main__":
test_conv2d_nchw()
test_conv2d_nhwc()
| 9,025 | 35.248996 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_softmax.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for softmax"""
import logging
import os
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import te, topi
from tvm.topi.utils import get_const_tuple
_softmax_schedule = {
"generic": topi.generic.schedule_softmax,
"cpu": topi.x86.schedule_softmax,
"gpu": topi.cuda.schedule_softmax,
"hls": topi.hls.schedule_softmax,
}
dtype = tvm.testing.parameter("float32", "float64")
configs = {
"softmax": {
"topi": topi.nn.softmax,
"ref": tvm.topi.testing.softmax_python,
"dimensions": [1, 2, 4],
"axis": [0, 1, 2, 3],
},
"log_softmax": {
"topi": topi.nn.log_softmax,
"ref": tvm.topi.testing.log_softmax_python,
"dimensions": [2, 3],
"axis": [1],
},
}
shapes = [(32, 10), (3, 4), (1, 16, 256, 256), (32,)]
softmax_operation, shape, axis = tvm.testing.parameters(
*[
(name, shape, axis)
for name, config in configs.items()
for shape in shapes
if len(shape) in config["dimensions"]
for axis in range(len(shape))
if axis in config["axis"]
]
)
@tvm.testing.fixture(cache_return_value=True)
def ref_data(shape, dtype, softmax_operation, axis):
ref_func = configs[softmax_operation]["ref"]
a_np = np.random.uniform(size=shape).astype(dtype)
perm = list(range(a_np.ndim))
perm[-1], perm[axis] = perm[axis], perm[-1]
trans_shape = [a_np.shape[i] for i in perm]
a_np_2d = a_np.transpose(perm).reshape(-1, trans_shape[-1])
b_np_2d = ref_func(a_np_2d)
b_np = b_np_2d.reshape(*trans_shape).transpose(perm)
return a_np, b_np
def test_softmax(target, dev, shape, dtype, ref_data, softmax_operation, axis):
target = tvm.target.Target(target)
if target.kind.name == "vulkan" and dtype == "float64":
# https://www.khronos.org/registry/SPIR-V/specs/1.0/GLSL.std.450.html
pytest.xfail("Vulkan GLSL.std.450 does not support 64-bit floats")
A = te.placeholder(shape, dtype=dtype, name="A")
topi_op = configs[softmax_operation]["topi"]
B = topi_op(A, axis=axis)
with tvm.target.Target(target):
fschedule = tvm.topi.testing.dispatch(target, _softmax_schedule)
s = fschedule(B)
a_np, b_np = ref_data
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 3,356 | 29.243243 | 79 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_batch_matmul_tensorcore.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for batch_matmul operator"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm.contrib.pickle_memoize import memoize
import tvm.testing
_batch_matmul_implement = {
"gpu": (topi.cuda.batch_matmul_tensorcore, topi.cuda.schedule_batch_matmul_tensorcore),
}
def convert_int32_into_int4(a_int32):
"""convert int32 values into int4
Parameters
----------
a_int32 : int
Return
------
a_int4 : int
"""
B, K, L = a_int32.shape
assert L % 8 == 0
a_int4 = np.zeros(shape=(B, K, L // 8), dtype=np.int32)
for b in range(B):
for k in range(K):
for l in range(L // 8):
for m in range(min(8, L - l * 8)):
a_int4[b, k, l] = a_int4[b, k, l] | (
(a_int32[b, k, l * 8 + m] & 0xF) << ((7 - m) * 4)
)
return a_int4
def verify_batch_matmul(x_batch, y_batch, M, N, K, dtype):
x = te.placeholder((x_batch, M, K), name="x", dtype=dtype)
y = te.placeholder((y_batch, N, K), name="y", dtype=dtype)
assert dtype in ["int4", "int8", "float16"]
out_dtype = "float32"
if dtype in ["int8", "int4"]:
out_dtype = "int32"
# use memoize to pickle the test data for next time use
@memoize("topi.tests.test_topi_batch_matmul_tensorcore")
def get_ref_data():
if dtype == "int4":
a_np = np.random.randint(low=-8, high=7, size=(x_batch, M, K))
b_np = np.random.randint(low=-8, high=7, size=(y_batch, N, K))
elif dtype == "int8":
a_np = np.random.randint(low=-128, high=127, size=(x_batch, M, K)).astype(dtype)
b_np = np.random.randint(low=-128, high=127, size=(y_batch, N, K)).astype(dtype)
else:
a_np = np.random.uniform(size=(x_batch, M, K)).astype(dtype)
b_np = np.random.uniform(size=(y_batch, N, K)).astype(dtype)
c_np = tvm.topi.testing.batch_matmul(a_np, b_np, out_dtype)
return (a_np, b_np, c_np)
# get the test data
a_np, b_np, c_np = get_ref_data()
if dtype == "int4":
a_np = convert_int32_into_int4(a_np)
b_np = convert_int32_into_int4(b_np)
def check_device(device):
dev = tvm.device(device, 0)
print("Running on target: %s" % device)
with tvm.target.Target(device):
fcompute, fschedule = tvm.topi.testing.dispatch(device, _batch_matmul_implement)
out = fcompute(x, y, None, out_dtype)
s = fschedule([out])
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(out.shape), dtype=out_dtype), dev)
f = tvm.build(s, [x, y, out], device, name="batch_matmul")
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3)
check_device("cuda")
@tvm.testing.requires_tensorcore
def test_batch_matmul():
for dtype in ["float16", "int8", "int4"]:
verify_batch_matmul(1, 1, 16, 16, 32, dtype)
verify_batch_matmul(5, 5, 16, 16, 32, dtype)
verify_batch_matmul(5, 5, 16, 32, 32, dtype)
verify_batch_matmul(30, 30, 16, 32, 32, dtype)
if __name__ == "__main__":
test_batch_matmul()
| 4,088 | 34.556522 | 92 | py |
tvm | tvm-main/tests/python/topi/python/test_fifo_buffer.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for FIFO buffer"""
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
import numpy as np
from tvm.contrib.pickle_memoize import memoize
def verify_fifo_buffer(buffer_shape, data_shape, axis, dtype="float32"):
buffer = te.placeholder(buffer_shape, name="buffer", dtype=dtype)
data = te.placeholder(data_shape, name="data", dtype=dtype)
# Use memoize, pickle the test data for next time use
@memoize("topi.tests.test_fifo_buffer")
def get_ref_data():
buffer_np = np.random.uniform(size=buffer_shape).astype(dtype)
data_np = np.random.uniform(size=data_shape).astype(dtype)
# Reference implementation of FIFO queue
begin = data_np.shape[axis]
end = buffer_np.shape[axis] + data_np.shape[axis]
ndim = len(buffer_np.shape)
ss = tuple((slice(begin, end, 1) if x == axis else slice(None)) for x in range(ndim))
out_np = np.concatenate((buffer_np, data_np), axis=axis)[ss]
return (buffer_np, data_np, out_np)
# Get the test data
buffer_np, data_np, out_np = get_ref_data()
def check_device(target, dev):
print(" Running on target: {}".format(target))
with tvm.target.Target(target):
out = topi.nn.fifo_buffer(data, buffer, axis=axis)
s = tvm.topi.testing.get_injective_schedule(target)([out])
buffer_tvm = tvm.nd.array(buffer_np, device=dev)
data_tvm = tvm.nd.array(data_np, device=dev)
out_tvm = tvm.nd.empty(shape=buffer_shape, device=dev, dtype=dtype)
f = tvm.build(s, [data, buffer, out], target, name="fifo")
f(data_tvm, buffer_tvm, out_tvm)
tvm.testing.assert_allclose(out_tvm.numpy(), out_np)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_conv1d_integration():
batch_size = 1
num_channel = 1
num_filter = 1
# Note: TVM doesn't have a separate op for 1D convolution, so we use conv2d instead.
# We set height=1 to indicate that convolution is really 1D.
stride = (1, 1)
dilate = (1, 1)
padding = (0, 0)
kernel_size = (1, 3)
input_window_size = (1, 10)
inc_input_size = (1, 2)
context_size = (1, 4)
inc_output_size = (1, 2)
output_window_size = (1, 8)
num_iteration = 20
buffer_axis = 3
kernel_shape = (num_filter, num_channel, kernel_size[0], kernel_size[1])
input_window_shape = (batch_size, num_channel, input_window_size[0], input_window_size[1])
inc_input_shape = (batch_size, num_channel, inc_input_size[0], inc_input_size[1])
inc_output_shape = (batch_size, num_filter, inc_output_size[0], inc_output_size[1])
context_shape = (batch_size, num_channel, context_size[0], context_size[1])
output_window_shape = (batch_size, num_filter, output_window_size[0], output_window_size[1])
# Rule: Convolution of Tensor[context_shape] and Tensor[kernel_shape]
# produces Tensor[inc_input_shape]
dtype = "float32"
inc_input = te.placeholder(inc_input_shape, name="inc_input", dtype=dtype)
input_window = te.placeholder(input_window_shape, name="input_window", dtype=dtype)
context = te.placeholder(context_shape, name="context", dtype=dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=dtype)
inc_output = te.placeholder(inc_input_shape, name="inc_output", dtype=dtype)
output_window = te.placeholder(output_window_shape, name="output_window", dtype=dtype)
# Use memoize, pickle the test data for next time use
@memoize("topi.tests.test_fifo_buffer_conv1d_integration")
def get_data():
# Generate [num_iteration] slices of input
inc_input_np = np.random.uniform(
size=tuple([num_iteration] + list(inc_input_shape))
).astype(dtype)
input_window_np = np.zeros(input_window_shape, dtype=dtype)
kernel_np = np.random.uniform(size=kernel_shape).astype(dtype)
context_np = np.zeros(context_shape, dtype=dtype)
output_window_np = np.zeros(output_window_shape, dtype=dtype)
return (inc_input_np, input_window_np, kernel_np, context_np, output_window_np)
# Get the test data
inc_input_np, input_window_np, kernel_np, context_np, output_window_np = get_data()
def check_device(target, dev):
print(" Running on target: {}".format(target))
conv2d_nchw, schedule_conv2d_nchw = tvm.topi.testing.get_conv2d_nchw_implement(target)
with tvm.target.Target(target):
out = topi.nn.fifo_buffer(inc_input, context, axis=buffer_axis)
s = tvm.topi.testing.get_injective_schedule(target)([out])
update_context = tvm.build(s, [inc_input, context, out], target, name="update_context")
out = conv2d_nchw(context, kernel, stride, padding, dilate, dtype)
s = schedule_conv2d_nchw([out])
conv2d_inc = tvm.build(s, [context, kernel, out], target, name="conv2d_inc")
out = topi.nn.fifo_buffer(inc_output, output_window, axis=buffer_axis)
s = tvm.topi.testing.get_injective_schedule(target)([out])
update_output_window = tvm.build(
s, [inc_output, output_window, out], target, name="update_output_window"
)
out = topi.nn.fifo_buffer(inc_input, input_window, axis=buffer_axis)
s = tvm.topi.testing.get_injective_schedule(target)([out])
update_input_window = tvm.build(
s, [inc_input, input_window, out], target, name="update_input_window"
)
out = conv2d_nchw(input_window, kernel, stride, padding, dilate, dtype)
s = schedule_conv2d_nchw([out])
conv2d = tvm.build(s, [input_window, kernel, out], target, name="conv2d")
input_window_tvm = tvm.nd.array(input_window_np, device=dev)
new_input_window_tvm = tvm.nd.empty(shape=input_window_shape, device=dev, dtype=dtype)
kernel_tvm = tvm.nd.array(kernel_np, device=dev)
context_tvm = tvm.nd.array(context_np, device=dev)
new_context_tvm = tvm.nd.empty(shape=context_shape, device=dev, dtype=dtype)
inc_output_tvm = tvm.nd.empty(shape=inc_output_shape, device=dev, dtype=dtype)
output_window_tvm = tvm.nd.array(output_window_np, device=dev)
new_output_window_tvm = tvm.nd.empty(shape=output_window_shape, device=dev, dtype=dtype)
output_window_ref_tvm = tvm.nd.empty(shape=output_window_shape, device=dev, dtype=dtype)
for i in range(num_iteration):
# Take i-th slice of inc_input_np
inc_input_tvm = tvm.nd.array(inc_input_np[i], device=dev)
# Compute new output window incrementally, using the FIFO buffer op
update_context(inc_input_tvm, context_tvm, new_context_tvm)
conv2d_inc(new_context_tvm, kernel_tvm, inc_output_tvm)
update_output_window(inc_output_tvm, output_window_tvm, new_output_window_tvm)
context_tvm = new_context_tvm
output_window_tvm = new_output_window_tvm
# Compute full input window, so that we have a baseline
update_input_window(inc_input_tvm, input_window_tvm, new_input_window_tvm)
input_window_tvm = new_input_window_tvm
conv2d(input_window_tvm, kernel_tvm, output_window_ref_tvm)
# Incrementally updating the output window should be equivalent to computing it from
# scratch using the input window
tvm.testing.assert_allclose(output_window_tvm.numpy(), output_window_ref_tvm.numpy())
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
@tvm.testing.uses_gpu
def test_fifo_buffer():
for ndim in [1, 2, 3, 4, 5, 6]:
for axis in range(ndim):
buffer_shape = tuple(7 for _ in range(ndim))
data_shape = tuple((2 if i == axis else 7) for i in range(ndim))
print(
"Testing FIFO buffer op: buffer_shape = {}, data_shape = {}, axis = {}".format(
buffer_shape, data_shape, axis
)
)
verify_fifo_buffer(buffer_shape, data_shape, axis)
@tvm.testing.uses_gpu
def test_conv1d_integration():
print("Testing FIFO buffer with 1D convolution")
verify_conv1d_integration()
if __name__ == "__main__":
test_fifo_buffer()
test_conv1d_integration()
| 9,222 | 43.129187 | 99 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_depth_to_space.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for depth to space"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
def verify_depth_to_space(
block_size, batch, in_channel, in_height, in_width, layout="NCHW", mode="DCR"
):
out_channel = int(in_channel / (block_size * block_size))
out_height = int(in_height * block_size)
out_width = int(in_width * block_size)
if layout == "NCHW":
in_shape = [batch, in_channel, in_height, in_width]
out_shape = [batch, out_channel, out_height, out_width]
elif layout == "NHWC":
in_shape = [batch, in_height, in_width, in_channel]
out_shape = [batch, out_height, out_width, out_channel]
else:
raise NotImplementedError("Layout not supported {}".format(layout))
A = te.placeholder(in_shape, name="A", dtype="float32")
dtype = A.dtype
a_np = np.random.uniform(size=in_shape).astype(dtype)
B = topi.nn.depth_to_space(A, block_size=block_size, layout=layout, mode=mode)
if layout == "NHWC":
a_np = np.transpose(a_np, axes=[0, 3, 1, 2])
b_np = tvm.topi.testing.depth_to_space_python(a_np, block_size, mode=mode)
if layout == "NHWC":
a_np = np.transpose(a_np, axes=[0, 2, 3, 1])
b_np = np.transpose(b_np, axes=[0, 2, 3, 1])
def check_device(device, dev):
print("Running on target: %s" % device)
with tvm.target.Target(device):
s = tvm.topi.testing.get_injective_schedule(device)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], device)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for device, dev in tvm.testing.enabled_targets():
check_device(device, dev)
@tvm.testing.uses_gpu
def test_depth_to_space():
for layout in ["NCHW", "NHWC"]:
for mode in ["DCR", "CDR"]:
# Simplest possible case
verify_depth_to_space(2, 1, 4, 1, 1, layout=layout, mode=mode)
# Average input size
verify_depth_to_space(2, 1, 32, 32, 32, layout=layout, mode=mode)
# Large block size
verify_depth_to_space(8, 1, 256, 32, 32, layout=layout, mode=mode)
# Large batch size
verify_depth_to_space(4, 8, 32, 32, 32, layout=layout, mode=mode)
# Large input size
verify_depth_to_space(4, 8, 32, 128, 128, layout=layout, mode=mode)
if __name__ == "__main__":
test_depth_to_space()
| 3,338 | 37.825581 | 82 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_conv2d_hwcn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import os
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
import tvm.testing
_conv2d_hwcn_implement = {
"generic": (topi.nn.conv2d_hwcn, topi.generic.schedule_conv2d_hwcn),
"gpu": (topi.cuda.conv2d_hwcn, topi.cuda.schedule_conv2d_hwcn),
"opencl": (topi.cuda.conv2d_hwcn, topi.cuda.schedule_conv2d_hwcn),
}
def verify_conv2d_hwcn(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1):
in_height = in_width = in_size
A = te.placeholder((in_height, in_width, in_channel, batch), name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
B = te.placeholder((1, num_filter, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
b_shape = get_const_tuple(B.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_hwcn.verify_hwcn")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=b_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (dilation, dilation, 1, 1))
c1_np = tvm.topi.testing.conv2d_hwcn_python(a_np, dw_np, stride, padding)
c2_np = c1_np + b_np
c3_np = np.maximum(c2_np, 0)
return a_np, w_np, b_np, c1_np, c2_np, c3_np
a_np, w_np, b_np, c1_np, c2_np, c3_np = get_ref_data()
def check_target(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv2d_hwcn_implement)
t_conv = fcompute(A, W, stride, padding, dilation)
t_bias = topi.add(t_conv, B)
t_relu = topi.nn.relu(t_bias)
s1 = fschedule([t_conv])
s2 = fschedule([t_bias])
s3 = fschedule([t_relu])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
conv_out = tvm.nd.array(np.zeros(get_const_tuple(t_conv.shape), dtype=t_conv.dtype), dev)
bias_out = tvm.nd.array(np.zeros(get_const_tuple(t_bias.shape), dtype=t_bias.dtype), dev)
relu_out = tvm.nd.array(np.zeros(get_const_tuple(t_relu.shape), dtype=t_relu.dtype), dev)
func1 = tvm.build(s1, [A, W, t_conv], target)
func2 = tvm.build(s2, [A, W, B, t_bias], target)
func3 = tvm.build(s3, [A, W, B, t_relu], target)
func1(a, w, conv_out)
func2(a, w, b, bias_out)
func3(a, w, b, relu_out)
tvm.testing.assert_allclose(conv_out.numpy(), c1_np, rtol=1e-5)
tvm.testing.assert_allclose(bias_out.numpy(), c2_np, rtol=1e-5)
tvm.testing.assert_allclose(relu_out.numpy(), c3_np, rtol=1e-5)
for target in ["cuda", "opencl", "metal", "rocm", "vulkan", "nvptx"]:
check_target(target)
@tvm.testing.requires_gpu
def test_conv2d_hwcn():
verify_conv2d_hwcn(1, 256, 32, 128, 3, 1, "SAME")
verify_conv2d_hwcn(1, 256, 32, 256, 3, 1, "SAME")
verify_conv2d_hwcn(4, 128, 16, 128, 5, 2, "SAME")
verify_conv2d_hwcn(4, 128, 16, 256, 5, 2, "SAME")
verify_conv2d_hwcn(1, 256, 32, 128, 3, 1, "VALID")
verify_conv2d_hwcn(1, 256, 32, 256, 3, 1, "VALID")
verify_conv2d_hwcn(4, 128, 16, 128, 5, 2, "VALID")
verify_conv2d_hwcn(4, 128, 16, 256, 5, 2, "VALID")
# dilation = 2
verify_conv2d_hwcn(1, 256, 32, 256, 3, 1, "SAME", dilation=2)
# Pass stride as tuple
verify_conv2d_hwcn(1, 256, 32, 128, 3, (1, 1), "SAME")
verify_conv2d_hwcn(1, 256, 32, 256, 3, (1, 1), "SAME")
verify_conv2d_hwcn(4, 128, 16, 128, 5, (2, 2), "SAME")
verify_conv2d_hwcn(4, 128, 16, 256, 5, (2, 2), "SAME")
verify_conv2d_hwcn(1, 256, 32, 128, 3, (1, 1), "VALID")
verify_conv2d_hwcn(1, 256, 32, 256, 3, (1, 1), "VALID")
verify_conv2d_hwcn(4, 128, 16, 128, 5, (2, 2), "VALID")
verify_conv2d_hwcn(4, 128, 16, 256, 5, (2, 2), "VALID")
if __name__ == "__main__":
test_conv2d_hwcn()
| 5,123 | 41.347107 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_scan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Callable
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import topi
topi_funcs = {
"cumsum": {"generic": topi.cumsum, "cuda": topi.cuda.cumsum},
"cumprod": {"generic": topi.cumprod, "cuda": topi.cuda.cumprod},
}
identity_value = {"cumsum": 0, "cumprod": 1}
def get_implementations(name, axis, dtype, exclusive):
topi_func_generic = topi_funcs[name]["generic"]
topi_func_cuda = topi_funcs[name]["cuda"]
return {
"generic": (
lambda x: topi_func_generic(x, axis, dtype, exclusive=exclusive),
topi.generic.schedule_extern,
),
"cuda": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
"nvptx": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
"vulkan": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
"metal": (
lambda x: topi_func_cuda(x, axis, dtype, exclusive=exclusive),
topi.cuda.schedule_scan,
),
}
def _run_tests(
dev,
target,
op_name: str = "cumsum",
gt_func: Callable[..., np.array] = np.cumsum,
):
def check_scan(np_ref, data, axis=None, dtype=None, exclusive=False):
implementations = get_implementations(op_name, axis, dtype, exclusive)
fcompute, fschedule = tvm.topi.testing.dispatch(target, implementations)
tvm.topi.testing.compare_numpy_tvm([data], np_ref, target, dev, fcompute, fschedule)
data = np.array([2, 3, 0])
check_scan(gt_func(data), data)
data = np.random.rand(10) > 0.5
data = data.astype(np.int32)
check_scan(gt_func(data, dtype=np.int32), data)
check_scan(gt_func(data), data, dtype="int64")
data = np.random.rand(10) > 0.5
check_scan(gt_func(data, dtype=np.int32), data, dtype="int32")
for in_dtype in ["float32", "float64"]:
if target == "metal" and in_dtype == "float64":
# float64 is not supported in metal
continue
data = np.random.randn(10, 10).astype(in_dtype)
check_scan(gt_func(data), data)
check_scan(gt_func(data, axis=0), data, axis=0)
check_scan(gt_func(data, axis=1), data, axis=1)
data = np.random.randn(10, 5, 10).astype(in_dtype)
check_scan(gt_func(data), data)
check_scan(gt_func(data, axis=0), data, axis=0)
check_scan(gt_func(data, axis=1), data, axis=1)
check_scan(gt_func(data, axis=-1), data, axis=-1)
for in_dtype in ["int32", "int64"]:
data = np.random.randint(-100, 100, size=(100, 100)).astype(in_dtype)
check_scan(gt_func(data, dtype=in_dtype), data)
check_scan(gt_func(data), data, dtype="int64")
check_scan(gt_func(data, axis=0, dtype=in_dtype), data, axis=0)
check_scan(gt_func(data, axis=1, dtype=in_dtype), data, axis=1)
data = np.random.randint(1 << 30, (1 << 31) - 1, size=(100)).astype(in_dtype)
check_scan(gt_func(data), data, dtype="int64")
data = np.random.randint(-100, 100, size=(100, 100)).astype("int64")
expected_result = np.roll(gt_func(data), 1)
expected_result[0] = identity_value[op_name]
check_scan(expected_result, data, dtype="int64", exclusive=True)
expected_result = np.roll(gt_func(data, axis=0, dtype=in_dtype), 1, axis=0)
expected_result[0, :] = identity_value[op_name]
check_scan(expected_result, data, axis=0, exclusive=True)
expected_result = np.roll(gt_func(data, axis=1, dtype=in_dtype), 1, axis=1)
expected_result[:, 0] = identity_value[op_name]
check_scan(gt_func(data, axis=1, dtype=in_dtype), data, axis=1)
@tvm.testing.parametrize_targets
def test_cumsum(dev, target):
_run_tests(dev, target, op_name="cumsum", gt_func=np.cumsum)
@tvm.testing.parametrize_targets
def test_cumprod(dev, target):
_run_tests(dev, target, op_name="cumprod", gt_func=np.cumprod)
if __name__ == "__main__":
test_cumsum(tvm.device("cpu"), tvm.target.Target("llvm"))
test_cumsum(tvm.device("cuda"), tvm.target.Target("cuda"))
test_cumsum(tvm.device("nvptx"), tvm.target.Target("nvptx"))
test_cumsum(tvm.device("vulkan"), tvm.target.Target("vulkan"))
test_cumsum(tvm.device("metal"), tvm.target.Target("metal"))
test_cumprod(tvm.device("cpu"), tvm.target.Target("llvm"))
test_cumprod(tvm.device("cuda"), tvm.target.Target("cuda"))
test_cumprod(tvm.device("nvptx"), tvm.target.Target("nvptx"))
test_cumprod(tvm.device("vulkan"), tvm.target.Target("vulkan"))
test_cumprod(tvm.device("metal"), tvm.target.Target("metal"))
| 5,538 | 37.2 | 92 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_upsampling.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for upsampling"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
import math
from tvm.topi.utils import nchw_pack_layout
def verify_upsampling(
batch,
in_channel,
in_height,
in_width,
scale_h,
scale_w,
layout="NCHW",
method="nearest_neighbor",
in_batch_block=0,
in_channel_block=0,
):
if layout == "NCHW":
A = te.placeholder((batch, in_channel, in_height, in_width), name="A")
dtype = A.dtype
out_shape = (
batch,
in_channel,
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
)
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
elif nchw_pack_layout(layout):
A = te.placeholder(
(batch, in_channel, in_height, in_width, in_batch_block, in_channel_block), name="A"
)
dtype = A.dtype
out_shape = (
batch,
in_channel,
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
in_batch_block,
in_channel_block,
)
a_np = np.random.uniform(
size=(batch, in_channel, in_height, in_width, in_batch_block, in_channel_block)
).astype(dtype)
elif layout == "NHWC":
A = te.placeholder((batch, in_height, in_width, in_channel), name="A")
dtype = A.dtype
out_shape = (
batch,
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
in_channel,
)
a_np = np.random.uniform(size=(batch, in_height, in_width, in_channel)).astype(dtype)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.nn.upsampling(A, scale_h, scale_w, layout=layout, method=method, align_corners=False)
b_np = tvm.topi.testing.resize2d_python(
a_np,
(scale_h, scale_w),
layout,
method[2:] if method[0:2] == "bi" else method,
"asymmetric",
)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
def test_int_div_upsampling():
"""Test whether upsampling op is tilable when scale_h and scale_w is integer.
Compute_at cannot work correctly in the original floating-point multiplication.
After using integer division,compute_at can work correctly and reduce the
capacity of cache buffer.
In this test case, scale_h and scale_w are set to integers, the size
of cache buffer should be equal to (h_i/scale_h * w_i/scale_w * c_i).
"""
dtype = "int8"
scale_h = 2
scale_w = 2
x = te.placeholder([1, 32, 64, 64], dtype, "x")
y = topi.nn.upsampling(x, scale_h, scale_w)
func = te.create_prim_func([x, y])
s = tvm.tir.Schedule(func)
block = s.get_block("resize")
cache = s.cache_read(block, 0, "local")
n, c, h, w = s.get_loops(block)
s_factor = 8
c_o, c_i = s.split(c, factors=[None, s_factor])
h_o, h_i = s.split(h, factors=[None, s_factor])
w_o, w_i = s.split(w, factors=[None, s_factor])
s.reorder(n, c_o, h_o, w_o, h_i, w_i, c_i)
s.compute_at(cache, w_o)
wanted_rt = s_factor**3 / (scale_h * scale_w)
def analyze_upsampling_allocate(stmt):
if isinstance(stmt, tvm.tir.stmt.Allocate):
tvm.testing.assert_allclose(stmt.extents[0].value, wanted_rt)
lowerd_irmodule = tvm.lower(s.mod["main"])
tvm.tir.stmt_functor.post_order_visit(
lowerd_irmodule.functions.items()[0][1].body, analyze_upsampling_allocate
)
@tvm.testing.uses_gpu
def test_upsampling():
# nearest_neighbor - NCHW
verify_upsampling(8, 16, 32, 32, 2.0, 2.0)
verify_upsampling(2, 32, 64, 64, 3.0, 3.0)
verify_upsampling(1, 64, 22, 32, 1.954545497894287, 2.0)
## nearest_neighbor - NHWC
verify_upsampling(8, 16, 32, 32, 2.0, 2.0, layout="NHWC")
verify_upsampling(2, 32, 64, 64, 3.0, 3.0, layout="NHWC")
verify_upsampling(1, 64, 22, 32, 1.954545497894287, 2.0, layout="NHWC")
# bilinear - NCHW
verify_upsampling(2, 2, 32, 32, 2.0, 2.0, method="bilinear")
verify_upsampling(2, 2, 32, 32, 3.0, 3.0, method="bilinear")
verify_upsampling(1, 64, 22, 32, 1.954545497894287, 2.0, method="bilinear")
# nearest_neighbor - NCHWinic
verify_upsampling(2, 2, 32, 32, in_batch_block=4, in_channel_block=8, scale_h=2.0, scale_w=2.0)
verify_upsampling(2, 2, 64, 64, in_batch_block=1, in_channel_block=16, scale_h=3.0, scale_w=3.0)
verify_upsampling(
1, 4, 22, 32, in_batch_block=1, in_channel_block=16, scale_h=1.954545497894287, scale_w=2.0
)
# bilinear - NCHWinic
verify_upsampling(
2,
2,
32,
32,
in_batch_block=1,
in_channel_block=1,
scale_h=2.0,
scale_w=2.0,
method="bilinear",
)
verify_upsampling(
2,
2,
32,
32,
in_batch_block=1,
in_channel_block=1,
scale_h=3.0,
scale_w=3.0,
method="bilinear",
)
verify_upsampling(
2,
4,
22,
32,
in_batch_block=1,
in_channel_block=16,
scale_h=1.954545497894287,
scale_w=2.0,
layout="NCHW1n16c",
method="bilinear",
)
# bilinear - NHWC
verify_upsampling(2, 2, 32, 32, 2.0, 2.0, layout="NHWC", method="bilinear")
verify_upsampling(2, 2, 32, 32, 3.0, 3.0, layout="NHWC", method="bilinear")
verify_upsampling(1, 64, 22, 32, 3.0, 3.0, layout="NHWC", method="bilinear")
def verify_upsampling3d(
batch,
in_channel,
in_depth,
in_height,
in_width,
scale_d,
scale_h,
scale_w,
layout="NCDHW",
method="nearest_neighbor",
):
if layout == "NCDHW":
A = te.placeholder((batch, in_channel, in_depth, in_height, in_width), name="A")
dtype = A.dtype
out_shape = (
batch,
in_channel,
int(round(in_depth * scale_d)),
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
)
a_np = np.random.uniform(size=(batch, in_channel, in_depth, in_height, in_width)).astype(
dtype
)
elif layout == "NDHWC":
A = te.placeholder((batch, in_depth, in_height, in_width, in_channel), name="A")
dtype = A.dtype
out_shape = (
batch,
int(round(in_depth * scale_d)),
int(round(in_height * scale_h)),
int(round(in_width * scale_w)),
in_channel,
)
a_np = np.random.uniform(size=(batch, in_depth, in_height, in_width, in_channel)).astype(
dtype
)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.nn.upsampling3d(
A,
scale_d,
scale_h,
scale_w,
layout=layout,
method=method,
coordinate_transformation_mode="asymmetric",
)
b_np = tvm.topi.testing.resize3d_python(
a_np,
(scale_d, scale_h, scale_w),
layout,
method[3:] if method[0:3] == "tri" else method,
"asymmetric",
)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_upsampling3d():
# nearest_neighbor - NCDHW
verify_upsampling3d(8, 8, 16, 16, 16, 2.0, 2.0, 2.0)
verify_upsampling3d(2, 16, 32, 32, 32, 3.0, 3.0, 3.0)
verify_upsampling3d(1, 8, 11, 16, 6, 1.954545497894287, 2.0, 1.5)
## nearest_neighbor - NDHWC
verify_upsampling3d(8, 8, 16, 16, 16, 2.0, 2.0, 2.0, layout="NDHWC")
verify_upsampling3d(2, 16, 32, 32, 32, 3.0, 3.0, 3.0, layout="NDHWC")
verify_upsampling3d(1, 8, 11, 16, 6, 1.954545497894287, 2.0, 1.5, layout="NDHWC")
# trilinear - NCDHW
verify_upsampling3d(2, 2, 16, 16, 16, 2.0, 2.0, 2.0, method="trilinear")
verify_upsampling3d(2, 2, 32, 32, 32, 3.0, 3.0, 3.0, method="trilinear")
verify_upsampling3d(1, 2, 11, 16, 6, 1.954545497894287, 2.0, 1.5, method="trilinear")
# trilinear - NDHWC
verify_upsampling3d(2, 2, 16, 16, 16, 2.0, 2.0, 2.0, layout="NDHWC", method="trilinear")
verify_upsampling3d(2, 2, 32, 32, 32, 3.0, 3.0, 3.0, layout="NDHWC", method="trilinear")
verify_upsampling3d(
1, 2, 11, 16, 6, 1.954545497894287, 2.0, 1.5, layout="NDHWC", method="trilinear"
)
if __name__ == "__main__":
test_upsampling()
test_upsampling3d()
test_int_div_upsampling()
| 10,226 | 31.674121 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_relu.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for relu activation"""
import sys
import os
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
from tvm.contrib.nvcc import have_fp16
import pytest
import tvm.testing
m, n, dtype = tvm.testing.parameters(
(10, 128, "float32"),
(128, 64, "float16"),
# Commented due to weird killed
# (1024 * 100, 512, "float32"),
)
def test_relu(target, dev, m, n, dtype):
A = te.placeholder((m, n), name="A", dtype=dtype)
B = topi.nn.relu(A)
a_np = np.random.uniform(low=-1.0, high=1.0, size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0)
if dtype == "float16" and target == "cuda" and not have_fp16(tvm.cuda(0).compute_version):
pytest.skip("Skip because %s does not have fp16 support" % target)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_elemwise_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
# Building with the CSE pass disabled
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
foo = tvm.build(s, [A, B], target, name="relu")
foo(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
size, alpha = tvm.testing.parameters((100, 0.1))
def test_leaky_relu(size, alpha):
A = te.placeholder((size,), name="A")
B = topi.nn.leaky_relu(A, alpha)
s = te.create_schedule([B.op])
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
b_np = a_np * (a_np > 0) + a_np * (a_np < 0) * alpha
dev = tvm.cpu(0)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
# Building with the CSE pass disabled
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
foo = tvm.build(s, [A, B], "llvm", name="leaky_relu")
foo(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
x, w, axis, weight_reshape = tvm.testing.parameters(
((1, 3, 2, 2), (3,), 1, (3, 1, 1)),
((1, 3, 2, 2), (2,), 2, (2, 1)),
((1, 3), (3,), 1, (3,)),
)
def test_prelu(x, w, axis, weight_reshape):
X = te.placeholder((x), name="X")
W = te.placeholder((w), name="W")
x_np = np.random.uniform(low=-1.0, high=1.0, size=get_const_tuple(X.shape)).astype(X.dtype)
w_np = np.random.uniform(low=-1.0, high=1.0, size=get_const_tuple(W.shape)).astype(W.dtype)
def _prelu_numpy(x, W):
return (x < 0) * (x * W.reshape(weight_reshape)) + (x >= 0) * x
B = topi.nn.prelu(X, W, axis)
s = te.create_schedule([B.op])
dev = tvm.cpu(0)
x_tvm = tvm.nd.array(x_np, dev)
w_tvm = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(X.shape), dtype=B.dtype), dev)
# Building with the CSE pass disabled
with tvm.transform.PassContext(opt_level=3, disabled_pass=["tir.CommonSubexprElimTIR"]):
foo = tvm.build(s, [X, W, B], "llvm", name="prelu")
foo(x_tvm, w_tvm, b)
out_np = _prelu_numpy(x_np, w_np)
tvm.testing.assert_allclose(b.numpy(), out_np, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 4,082 | 33.897436 | 95 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.