repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
tvm | tvm-main/tests/python/topi/python/test_topi_conv3d_ncdhw.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.utils import get_pad_tuple3d
from tvm.topi.utils import get_const_tuple
_conv3d_ncdhw_implement = {
"generic": (topi.nn.conv3d_ncdhw, topi.generic.schedule_conv3d_ncdhw),
"cpu": (topi.x86.conv3d_ncdhw, topi.x86.schedule_conv3d_ncdhw),
"gpu": (topi.cuda.conv3d_ncdhw, topi.cuda.schedule_conv3d_ncdhw),
}
def verify_conv3d_ncdhw(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
groups=1,
add_bias=False,
add_relu=False,
):
if isinstance(kernel, (tuple, list)):
if len(kernel) == 3:
kernel_d = kernel[0]
kernel_h = kernel[1]
kernel_w = kernel[2]
else:
raise ValueError("Size of kernel can only be 3")
elif isinstance(kernel, int):
kernel_d = kernel_h = kernel_w = kernel
else:
raise ValueError("Unknown kernel option %s" % kernel)
pad_front, pad_top, pad_left, pad_back, pad_bottom, pad_right = get_pad_tuple3d(
padding, (kernel_d, kernel_h, kernel_w)
)
padding_sum = pad_front + pad_back + pad_top + pad_left + pad_bottom + pad_right
print(
"Workload: (%d, %d, %d, %d, %d, %d, %d, %d, %d, %d)"
% (
batch,
in_channel,
in_size,
num_filter,
kernel_d,
kernel_h,
kernel_w,
stride,
padding_sum,
dilation,
)
)
in_depth = in_height = in_width = in_size
A = te.placeholder((batch, in_channel, in_depth, in_height, in_width), name="A")
W = te.placeholder((num_filter, in_channel // groups, kernel_d, kernel_h, kernel_w), name="W")
bias = te.placeholder((num_filter, 1, 1, 1), name="bias")
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
bias_shape = get_const_tuple(bias.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv3d_ncdhw.verify_conv3d_ncdhw")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
b_np = np.random.uniform(size=bias_shape).astype(dtype)
dw_np = tvm.topi.testing.dilate_python(w_np, (1, 1, dilation, dilation, dilation))
c_np = tvm.topi.testing.conv3d_ncdhw_python(a_np, dw_np, stride, padding, groups)
if add_bias:
c_np += b_np
if add_relu:
c_np = np.maximum(c_np, 0)
return a_np, w_np, b_np, c_np
a_np, w_np, b_np, c_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv3d_ncdhw_implement)
with tvm.target.Target(target):
C = fcompute(
A,
W,
(stride, stride, stride),
padding,
(dilation, dilation, dilation),
groups,
dtype,
)
if add_bias:
C = topi.add(C, bias)
if add_relu:
C = topi.nn.relu(C)
s = fschedule([C])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
if add_bias:
func = tvm.build(
s,
[A, W, bias, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel_d,
kernel_h,
kernel_w,
stride,
padding_sum,
dilation,
groups,
),
)
func(a, w, b, c)
else:
func = tvm.build(
s,
[A, W, C],
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d_%d"
% (
batch,
in_channel,
in_size,
num_filter,
kernel_d,
kernel_h,
kernel_w,
stride,
padding_sum,
dilation,
groups,
),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-4, atol=1e-6)
for target, dev in tvm.testing.enabled_targets():
with autotvm.tophub.context(target): # load tophub pre-tuned parameters
check_target(target, dev)
@tvm.testing.uses_gpu
def test_conv3d_ncdhw():
# 3DCNN workloads
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, 0)
verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, 0)
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, 1)
verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, 1)
# bias, relu
verify_conv3d_ncdhw(1, 64, 56, 3, 1, 1, 1, add_relu=True)
verify_conv3d_ncdhw(1, 64, 56, 3, 1, 1, 1, add_bias=True)
verify_conv3d_ncdhw(1, 64, 56, 3, 1, 1, 1, add_bias=True, add_relu=True)
# dilation = 2
verify_conv3d_ncdhw(1, 64, 56, 3, 3, 1, 1, dilation=2)
# batch size
verify_conv3d_ncdhw(4, 64, 56, 5, 3, 1, 1)
# weird workloads
verify_conv3d_ncdhw(2, 2, 2, 2, 2, 2, 2)
verify_conv3d_ncdhw(3, 3, 3, 3, 3, 3, 3)
# Asymmetric padding
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, (0, 0, 0, 1, 1, 1))
verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, (2, 1, 2, 1, 2, 1))
verify_conv3d_ncdhw(1, 64, 56, 3, 3, 1, (2, 2, 2, 1, 1, 1), dilation=2)
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, (0, 1, 1))
verify_conv3d_ncdhw(1, 32, 32, 1, 1, 1, (2, 1, 0))
verify_conv3d_ncdhw(1, 32, 32, 1, 3, 1, "VALID")
verify_conv3d_ncdhw(1, 32, 32, 5, 1, 1, "VALID")
# DHW kernel layout
verify_conv3d_ncdhw(1, 32, 56, 16, (3, 5, 7), 2, (1, 2, 3))
verify_conv3d_ncdhw(1, 3, 56, 16, (3, 7, 7), 2, (1, 2, 3, 0, 3, 2))
verify_conv3d_ncdhw(1, 3, 56, 16, (3, 3, 7), 2, (1, 2, 3))
verify_conv3d_ncdhw(1, 3, 56, 16, (3, 7, 3), 2, (1, 3, 1))
# grouped workloads
verify_conv3d_ncdhw(1, 32, 32, 8, 1, 1, 0, groups=4)
verify_conv3d_ncdhw(1, 32, 32, 4, 1, 1, 0, groups=4)
verify_conv3d_ncdhw(1, 32, 32, 8, 1, 1, 1, groups=4)
verify_conv3d_ncdhw(1, 32, 32, 4, 1, 1, 1, groups=4)
if __name__ == "__main__":
test_conv3d_ncdhw()
| 7,571 | 32.504425 | 98 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for broadcasting operators."""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import topi
from tvm import relay
import tvm.topi.testing
from tvm.contrib.nvcc import have_fp16
import tvm.testing
def verify_expand_dims(in_shape, out_shape, axis, num_newaxis):
A = te.placeholder(shape=in_shape, name="A")
B = topi.expand_dims(A, axis, num_newaxis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="expand_dims")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = data_npy.reshape(out_shape)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_reinterpret(in_shape, in_dtype, out_dtype, generator):
A = te.placeholder(shape=in_shape, name="A", dtype=in_dtype)
B = topi.reinterpret(A, out_dtype)
def check_device(target, dev):
if in_dtype == "float16" and target == "cuda" and not have_fp16(dev.compute_version):
print("Skip because %s does not have fp16 support" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_elemwise_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="reinterpret")
data_npy = generator(in_shape).astype(in_dtype)
out_npy = data_npy.view(B.dtype)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(in_shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
np.testing.assert_equal(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_transpose(in_shape, axes):
A = te.placeholder(shape=in_shape, name="A")
B = topi.transpose(A, axes)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="transpose")
data_npy = np.arange(np.prod(in_shape)).reshape(in_shape).astype(A.dtype)
out_npy = data_npy.transpose(axes)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_reshape(src_shape, dst_shape):
A = te.placeholder(shape=src_shape, name="A")
B = topi.reshape(A, dst_shape)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="reshape")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.reshape(data_npy, newshape=dst_shape)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.empty(dst_shape, device=dev, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_squeeze(src_shape, axis):
A = te.placeholder(shape=src_shape, name="A")
B = topi.squeeze(A, axis=axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="squeeze")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npy = np.squeeze(data_npy, axis=axis)
data_nd = tvm.nd.array(data_npy, dev)
out_nd_shape = out_npy.shape
out_nd = tvm.nd.empty(out_nd_shape, device=dev, dtype=B.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_concatenate(shapes, axis):
def get_concat_schedule(target):
schedule_map = {
"cpu": topi.x86.schedule_concatenate,
"arm_cpu": topi.arm_cpu.schedule_concatenate,
}
if isinstance(target, str):
target = tvm.target.Target(target)
for key in target.keys:
if key in schedule_map:
return schedule_map[key]
return tvm.topi.testing.get_injective_schedule(target)
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(te.placeholder(shape, name="A" + str(i)))
out_tensor = topi.concatenate(a_tuple=tensor_l, axis=axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = get_concat_schedule(target)(out_tensor)
foo = tvm.build(s, tensor_l + [out_tensor], target, name="concatenate")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
out_npy = np.concatenate(data_npys, axis=axis)
data_nds = [tvm.nd.array(data_npy, dev) for data_npy in data_npys]
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=out_tensor.dtype)
foo(*(data_nds + [out_nd]))
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_stack(shapes, axis):
tensor_l = []
for i, shape in enumerate(shapes):
tensor_l.append(te.placeholder(shape, name="A" + str(i)))
out_tensor = topi.stack(tensor_l, axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(out_tensor)
foo = tvm.build(s, tensor_l + [out_tensor], target, name="stack")
data_npys = [np.random.normal(size=shape).astype(tensor_l[0].dtype) for shape in shapes]
out_npy = np.stack(data_npys, axis=axis)
data_nds = [tvm.nd.array(data_npy, dev) for data_npy in data_npys]
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=out_tensor.dtype)
foo(*(data_nds + [out_nd]))
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_split(src_shape, indices_or_sections, axis):
A = te.placeholder(shape=src_shape, name="A")
tensor_l = topi.split(A, indices_or_sections, axis=axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(tensor_l)
foo = tvm.build(s, [A] + list(tensor_l), target, name="split")
data_npy = np.random.normal(size=src_shape).astype(A.dtype)
out_npys = np.split(data_npy, indices_or_sections, axis=axis)
data_nd = tvm.nd.array(data_npy, dev)
out_nds = [
tvm.nd.empty(out_npy.shape, device=dev, dtype=tensor_l[0].dtype) for out_npy in out_npys
]
foo(*([data_nd] + out_nds))
for out_nd, out_npy in zip(out_nds, out_npys):
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_expand_like(in_shape, out_shape, axis):
A = te.placeholder(shape=in_shape, name="A")
B = te.placeholder(shape=out_shape, name="B")
C = topi.expand_like(A, B, axis)
s = te.create_schedule([C.op])
def check_device(target):
print("Running on target: %s" % target)
dev = tvm.device(target, 0)
f = tvm.build(s, [A, B, C], target, name="expand_like")
input = np.random.uniform(size=in_shape).astype(A.dtype)
tvm_input = tvm.nd.array(input, dev)
odim = len(out_shape)
real_axis = [x if x >= 0 else x + odim for x in axis]
real_axis = sorted(real_axis)
for x in real_axis:
input = np.expand_dims(input, x).astype(A.dtype)
for x in real_axis:
input = np.concatenate([input] * out_shape[x], axis=x).astype(A.dtype)
assert input.shape == out_shape
tvm_shape_like = tvm.nd.array(np.zeros(out_shape).astype(B.dtype), dev)
out = tvm.nd.array(np.zeros(out_shape).astype(A.dtype), dev)
f(tvm_input, tvm_shape_like, out)
tvm.testing.assert_allclose(out.numpy(), input)
for target in ["llvm"]:
check_device(target)
def verify_flip(in_shape, axis):
A = te.placeholder(shape=in_shape, name="A")
B = topi.flip(A, axis) + 1
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="reverse")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.flip(x_np, axis) + 1
data_nd = tvm.nd.array(x_np, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target in ["llvm", "cuda", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
@tvm.testing.uses_gpu
def test_reverse_sequence():
def verify_reverse_sequence(in_data, seq_lengths, batch_axis, seq_axis, ref_res):
seq_lengths = np.array(seq_lengths).astype("int32")
A = te.placeholder(shape=in_data.shape, name="A", dtype=str(in_data.dtype))
B = te.placeholder(shape=seq_lengths.shape, name="B", dtype=str(seq_lengths.dtype))
C = topi.reverse_sequence(A, B, seq_axis, batch_axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(C)
foo = tvm.build(s, [A, B, C], target, name="reverse_sequence")
data_nd = tvm.nd.array(in_data, dev)
seq_lengths_nd = tvm.nd.array(seq_lengths, dev)
out_nd = tvm.nd.empty(in_data.shape, device=dev, dtype=A.dtype)
foo(data_nd, seq_lengths_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), ref_res)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 5, 10, 15], [4, 1, 6, 11], [8, 9, 2, 7], [12, 13, 14, 3]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 1, 0, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], -1, 0, np.array(result))
verify_reverse_sequence(
indata.astype("float32"), [1, 2, 3, 4], 1, 0, np.array(result).astype("float32")
)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3], [5, 4, 6, 7], [10, 9, 8, 11], [15, 14, 13, 12]]
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, 1, np.array(result))
verify_reverse_sequence(indata, [1, 2, 3, 4], 0, -1, np.array(result))
verify_reverse_sequence(
indata.astype("float32"), [1, 2, 3, 4], 0, 1, np.array(result).astype("float32")
)
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [15, 14, 13, 12]]
verify_reverse_sequence(indata, [-1, 0, 1, 5], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [
[
[[18, 19, 20], [21, 22, 23], [24, 25, 26]],
[[9, 10, 11], [12, 13, 14], [15, 16, 17]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
],
[
[[45, 46, 47], [48, 49, 50], [51, 52, 53]],
[[36, 37, 38], [39, 40, 41], [42, 43, 44]],
[[27, 28, 29], [30, 31, 32], [33, 34, 35]],
],
]
verify_reverse_sequence(indata, [3, 3], 0, 1, np.array(result))
indata = np.array(np.arange(0, 54)).reshape([2, 3, 3, 3]).astype("int32")
result = [
[
[[9, 10, 11], [21, 22, 23], [15, 16, 17]],
[[0, 1, 2], [12, 13, 14], [6, 7, 8]],
[[18, 19, 20], [3, 4, 5], [24, 25, 26]],
],
[
[[36, 37, 38], [48, 49, 50], [42, 43, 44]],
[[27, 28, 29], [39, 40, 41], [33, 34, 35]],
[[45, 46, 47], [30, 31, 32], [51, 52, 53]],
],
]
verify_reverse_sequence(indata, [2, 3, 2], 2, 1, np.array(result))
indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32")
result = []
with pytest.raises(Exception) as execinfo:
verify_reverse_sequence(indata, [2, 3, 2, 4, 5], 1, 0, np.array(result))
assert (
"For reverse_sequnece seq_lengths size should match with dimension of batch axis,"
" but got dimension of batch_axis = 4, and seq_length size = 5" in execinfo.value.args[0]
)
def verify_take(src_shape, indices_src, axis=None, mode="clip", indices_dtype="int32"):
src_dtype = "float32"
indices_src = np.array(indices_src, dtype=indices_dtype)
A = te.placeholder(shape=src_shape, dtype=src_dtype, name="A")
indices = te.placeholder(shape=indices_src.shape, dtype=indices_dtype, name="indices")
if axis is None:
out_tensor = topi.take(a=A, indices=indices, mode=mode)
else:
out_tensor = topi.take(a=A, indices=indices, axis=axis, mode=mode)
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out_tensor)
foo = tvm.build(s, [A] + [indices] + [out_tensor], target, name="take")
shape_size = 1
for i in range(len(src_shape)):
shape_size = shape_size * src_shape[i]
data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
if axis is None:
np_mode = "raise" if mode == "fast" else mode
out_npys = np.take(data_npy, indices_src, mode=np_mode)
else:
np_mode = "raise" if mode == "fast" else mode
out_npys = np.take(data_npy, indices_src, axis=axis, mode=np_mode)
data_nd = tvm.nd.array(data_npy, dev)
indices_nd = tvm.nd.array(indices_src, dev)
out_nd = tvm.nd.empty(out_npys.shape, device=dev, dtype=src_dtype)
foo(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npys)
for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
def verify_strided_slice(in_shape, begin, end, strides=None, axes=None):
A = te.placeholder(shape=in_shape, name="A")
strides = [1, 1, 1] if strides is None else strides
if axes:
strides = [strides[axis] for axis in axes]
B = topi.strided_slice(A, begin, end, strides, axes) + 1
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="stride_slice")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = tvm.topi.testing.strided_slice_python(x_np, begin, end, strides, axes=axes) + 1
data_nd = tvm.nd.array(x_np, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
def verify_dynamic_strided_slice(in_shape, begin, end, strides=None):
A = te.placeholder(shape=in_shape, name="A")
Begin = te.placeholder(shape=[len(in_shape)], name="begin", dtype="int64")
End = te.placeholder(shape=[len(in_shape)], name="end", dtype="int64")
Strides = te.placeholder(shape=[len(in_shape)], name="strides", dtype="int64")
strides = [1, 1, 1] if strides is None else strides
B = topi.strided_slice(A, Begin, End, Strides) + 1
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
foo = tvm.build(s, [A, Begin, End, Strides, B], target, name="stride_slice")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = tvm.topi.testing.strided_slice_python(x_np, begin, end, strides) + 1
data_nd = tvm.nd.array(x_np, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype)
begin_nd = tvm.nd.array(np.array(begin).astype("int64"), dev)
end_nd = tvm.nd.array(np.array(end).astype("int64"), dev)
strides_nd = tvm.nd.array(np.array(strides).astype("int64"), dev)
foo(data_nd, begin_nd, end_nd, strides_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
def verify_strided_set(in_shape, v_shape, begin, end, strides=None):
A = te.placeholder(shape=in_shape, name="A")
V = te.placeholder(shape=v_shape, name="V")
b = te.placeholder(shape=(len(begin),), name="b", dtype="int32")
e = te.placeholder(shape=(len(end),), name="e", dtype="int32")
if strides is not None:
st = te.placeholder(shape=(len(strides),), name="st", dtype="int32")
B = topi.strided_set(A, V, b, e, st) + 1
else:
B = topi.strided_set(A, V, b, e) + 1
def check_device(target):
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
if strides is not None:
foo = tvm.build(s, [A, V, b, e, st, B], target, name="stride_set")
s_np = np.asarray(strides).astype("int32")
s_nd = tvm.nd.array(s_np, dev)
else:
foo = tvm.build(s, [A, V, b, e, B], target, name="stride_set")
x_np = np.random.uniform(size=in_shape).astype(A.dtype)
v_np = np.random.uniform(size=v_shape).astype(V.dtype)
b_np = np.asarray(begin).astype("int32")
e_np = np.asarray(end).astype("int32")
out_npy = tvm.topi.testing.strided_set_python(x_np, v_np, begin, end, strides) + 1
data_nd = tvm.nd.array(x_np, dev)
v_nd = tvm.nd.array(v_np, dev)
b_nd = tvm.nd.array(b_np, dev)
e_nd = tvm.nd.array(e_np, dev)
out_nd = tvm.nd.empty(out_npy.shape, device=dev, dtype=A.dtype)
if strides is not None:
foo(data_nd, v_nd, b_nd, e_nd, s_nd, out_nd)
else:
foo(data_nd, v_nd, b_nd, e_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target in ["llvm", "opencl", "sdaccel", "aocl_sw_emu"]:
check_device(target)
def verify_gather(data, axis, indices):
data = np.asarray(data)
indices = np.asarray(indices)
var_data = te.placeholder(shape=data.shape, dtype=data.dtype.name, name="data")
var_indices = te.placeholder(shape=indices.shape, dtype=indices.dtype.name, name="indices")
out_tensor = topi.gather(var_data, axis, var_indices)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out_tensor)
func = tvm.build(s, [var_data, var_indices, out_tensor], target, name="gather")
out_npys = tvm.topi.testing.gather_python(data, axis, indices)
data_nd = tvm.nd.array(data, dev)
indices_nd = tvm.nd.array(indices, dev)
out_nd = tvm.nd.empty(out_npys.shape, device=dev, dtype=data.dtype.name)
func(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npys)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_gather_nd(src_shape, indices_src, indices_dtype):
src_dtype = "float32"
indices_src = np.array(indices_src, dtype=indices_dtype)
A = te.placeholder(shape=src_shape, dtype=src_dtype, name="A")
indices = te.placeholder(shape=indices_src.shape, dtype=indices_dtype, name="indices")
out_tensor = topi.gather_nd(a=A, indices=indices)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out_tensor)
func = tvm.build(s, [A, indices, out_tensor], target, name="take")
shape_size = 1
for i in range(len(src_shape)):
shape_size = shape_size * src_shape[i]
data_npy = np.arange(shape_size, dtype=src_dtype).reshape((src_shape))
out_npys = tvm.topi.testing.gather_nd_python(data_npy, indices_src)
data_nd = tvm.nd.array(data_npy, dev)
indices_nd = tvm.nd.array(indices_src, dev)
out_nd = tvm.nd.empty(out_npys.shape, device=dev, dtype=src_dtype)
func(data_nd, indices_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npys)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_arange(start, stop, step):
if start is None and step is None:
A = topi.arange(stop)
a_np = np.arange(stop)
elif start is None:
A = topi.arange(stop, step=step)
a_np = np.arange(stop, step=step)
elif step is None:
A = topi.arange(start, stop)
a_np = np.arange(start, stop)
else:
A = topi.arange(start, stop, step)
a_np = np.arange(start, stop, step)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(A)
f = tvm.build(s, [A], target, name="arange")
a_nd = tvm.nd.empty(a_np.shape, dtype="float32", device=dev)
f(a_nd)
tvm.testing.assert_allclose(a_nd.numpy(), a_np)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_repeat(in_shape, repeats, axis):
A = te.placeholder(shape=in_shape, name="A")
B = topi.repeat(A, repeats, axis)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="repeat")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.repeat(data_npy, repeats, axis)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_tile(in_shape, reps):
A = te.placeholder(shape=in_shape, name="A")
B = topi.tile(A, reps)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(B)
foo = tvm.build(s, [A, B], target, name="tile")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.tile(data_npy, reps)
data_nd = tvm.nd.array(data_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), dev)
foo(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_where(in_shape):
Cond = te.placeholder(shape=in_shape, name="cond")
dtype = Cond.dtype
A = te.placeholder(shape=in_shape, name="A")
B = te.placeholder(shape=in_shape, name="B")
C = topi.where(Cond, A, B)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_broadcast_schedule(target)(C)
f = tvm.build(s, [Cond, A, B, C], target, name="where")
cond_npy = np.random.uniform(low=-1, high=1, size=in_shape).astype(dtype)
x_npy = np.random.uniform(size=in_shape).astype(dtype)
y_npy = np.random.uniform(size=in_shape).astype(dtype)
out_npy = np.where(cond_npy, x_npy, y_npy)
cond_nd = tvm.nd.array(cond_npy, dev)
x_nd = tvm.nd.array(x_npy, dev)
y_nd = tvm.nd.array(y_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(C.dtype), dev)
f(cond_nd, x_nd, y_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_one_hot(indices_shape, depth, on_value, off_value, axis, dtype):
indices = te.placeholder(shape=indices_shape, name="indices", dtype="int32")
on_value_const = tvm.tir.const(on_value, dtype)
off_value_const = tvm.tir.const(off_value, dtype)
one_hot_result = topi.transform.one_hot(
indices, on_value_const, off_value_const, depth, axis, dtype
)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(one_hot_result)
fn = tvm.build(s, [indices, one_hot_result], target, name="one_hot")
indices_npy = np.random.randint(0, depth, size=indices_shape).astype(indices.dtype)
out_npy = tvm.topi.testing.one_hot(indices_npy, on_value, off_value, depth, axis, dtype)
indices_nd = tvm.nd.array(indices_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(one_hot_result.dtype), dev)
fn(indices_nd, out_nd)
out_topi = out_nd.numpy()
tvm.testing.assert_allclose(out_topi, out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_unravel_index(indices, shape, dtype, indice_dtype="int64"):
x_data = np.array(indices).astype(indice_dtype)
y_data = np.array(shape).astype(dtype)
if len(x_data.shape) == 1:
dst_shape = [y_data.shape[0], x_data.shape[0]]
else:
dst_shape = [y_data.shape[0]]
X = te.placeholder(shape=x_data.shape, dtype=indice_dtype, name="X")
Y = te.placeholder(shape=y_data.shape, dtype=dtype, name="Y")
Z = topi.unravel_index(X, Y)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(Z)
foo = tvm.build(s, [X, Y, Z], target, name="unravel_index")
out_npy = np.unravel_index(x_data, y_data)
datax_nd = tvm.nd.array(x_data, dev)
datay_nd = tvm.nd.array(y_data, dev)
out_nd = tvm.nd.empty(dst_shape, device=dev, dtype=Z.dtype)
foo(datax_nd, datay_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected):
sparse_indices_data = np.array(sparse_indices)
sparse_values_data = np.array(sparse_values)
output_shape_data = np.array(output_shape)
default_value_data = np.array(default_value)
A = te.placeholder(
shape=sparse_indices_data.shape, name="sparse_indices", dtype=str(sparse_indices_data.dtype)
)
B = te.placeholder(
shape=sparse_values_data.shape, name="sparse_values", dtype=str(sparse_values_data.dtype)
)
if default_value is None:
args = [A, B]
D = topi.sparse_to_dense(A, output_shape, B)
else:
C = te.placeholder(shape=(), name="default_value", dtype=str(default_value_data.dtype))
args = [A, B, C]
D = topi.sparse_to_dense(A, output_shape, B, C)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(D)
foo = tvm.build(s, args + [D], target, name="sparse_to_dense")
sparse_indices_nd = tvm.nd.array(sparse_indices_data, dev)
sparse_values_nd = tvm.nd.array(sparse_values_data, dev)
out_nd = tvm.nd.empty(output_shape_data, device=dev, dtype=B.dtype)
if default_value is None:
foo(sparse_indices_nd, sparse_values_nd, out_nd)
else:
default_value_nd = tvm.nd.array(default_value_data, dev)
foo(sparse_indices_nd, sparse_values_nd, default_value_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), np.array(xpected))
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_matrix_set_diag(input_shape, diagonal_shape, dtype, k=0, align="RIGHT_LEFT"):
input = te.placeholder(shape=input_shape, name="input", dtype=dtype)
diagonal = te.placeholder(shape=diagonal_shape, name="diagonal", dtype=dtype)
matrix_set_diag_result = topi.transform.matrix_set_diag(input, diagonal, k, align)
def check_device(target, dev):
dev = tvm.device(target, 0)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(matrix_set_diag_result)
fn = tvm.build(s, [input, diagonal, matrix_set_diag_result], target, name="matrix_set_diag")
input_npy = np.random.randint(-100, 100, size=input_shape).astype(dtype)
diagonal_npy = np.random.randint(-100, 100, size=diagonal_shape).astype(dtype)
out_npy = tvm.topi.testing.matrix_set_diag(input_npy, diagonal_npy, k, align)
input_nd = tvm.nd.array(input_npy, dev)
diagonal_nd = tvm.nd.array(diagonal_npy, dev)
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(matrix_set_diag_result.dtype), dev)
fn(input_nd, diagonal_nd, out_nd)
out_topi = out_nd.numpy()
tvm.testing.assert_allclose(out_topi, out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_adv_index(data_shape, index_shapes, indice_dtype="int64"):
dtype = "float32"
data = te.placeholder(shape=data_shape, name="data", dtype=dtype)
indices = []
np_data = np.random.uniform(size=data_shape).astype(dtype)
np_indices = []
for i, index_shape in enumerate(index_shapes):
limit = data_shape[i]
np_indices.append(np.random.uniform(0, limit - 1, size=index_shape).astype(indice_dtype))
indices.append(
te.placeholder(shape=index_shape, name="index_{}".format(i), dtype=indice_dtype)
)
np_out = np_data[tuple(np_indices)]
out = topi.adv_index(data, indices)
def check_device(target, dev):
dev = tvm.device(target, 0)
if not dev.exist:
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % target)
with tvm.target.create(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
func = tvm.build(s, [data] + indices + [out], target, name="adv_index")
nd_list = [tvm.nd.array(np_data, dev)]
for np_index in np_indices:
nd_list.append(tvm.nd.array(np_index, dev))
nd_list.append(tvm.nd.empty(out.shape, device=dev, dtype=data.dtype))
func(*nd_list)
tvm.testing.assert_allclose(nd_list[-1].numpy(), np.array(np_out))
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
def verify_trilu(input_shape, upper, k=0):
x = te.placeholder(shape=input_shape, name="x", dtype="float32")
k_tir = tvm.tir.const(k, dtype="int32")
trilu_result = topi.transform.trilu(x, k_tir, upper)
def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(trilu_result)
fn = tvm.build(s, [x, trilu_result], target, name="trilu")
x_npy = np.random.normal(size=input_shape).astype(x.dtype)
if upper:
out_npy = np.triu(x_npy, k)
else:
out_npy = np.tril(x_npy, k)
x_nd = tvm.nd.array(x_npy, dev)
out_nd = tvm.nd.array(np.empty(x_npy.shape).astype(trilu_result.dtype), dev)
fn(x_nd, out_nd)
out_topi = out_nd.numpy()
tvm.testing.assert_allclose(out_topi, out_npy)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
@tvm.testing.uses_gpu
def test_strided_slice():
verify_strided_slice((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1])
verify_strided_slice((3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1])
verify_strided_slice((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2])
verify_strided_slice((3, 4, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1])
verify_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3])
verify_strided_slice((3, 4, 3), [0, 2, 0], [1, 2, 3])
verify_strided_slice((3, 4, 3), [0, 0, 0], [None, None, None])
verify_strided_slice((3, 4, 3), [0], [2], None, axes=[1])
@tvm.testing.uses_gpu
def test_dynamic_strided_slice():
verify_dynamic_strided_slice((3, 4, 3), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify_dynamic_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1])
verify_dynamic_strided_slice((3, 4, 3), [1, 0, 0], [2, 2, 3], [1, 1, 2])
verify_dynamic_strided_slice((3, 4, 3), [1, 1, 0], [4, 4, 3])
verify_dynamic_strided_slice((3, 4, 3), [0, 2, 0], [1, 2, 3])
@tvm.testing.uses_gpu
def test_strided_set():
verify_strided_set((3, 4, 3), (3, 2, 2), [0, 3, 0], [4, 1, 4], [1, -1, 2])
verify_strided_set((3, 4, 3), (3, 1, 2), [0, 0, 0], [4, -5, 4], [1, -1, 2])
verify_strided_set((3, 4, 3), (1, 3, 3), [1, 1, 0], [4, 4, 3], [2, 1, 1])
verify_strided_set((3, 4, 3), (1, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1])
verify_strided_set((3, 4, 3), (1, 2, 2), [1, 0, 0], [2, 2, 3], [1, 1, 2])
verify_strided_set((3, 4, 3), (1, 2, 3), [1, -1, 0], [2, -3, 3], [1, -1, 1])
verify_strided_set((3, 4, 3), (1, 2, 3), [1, 1, 0], [2, 3, 3], [1])
verify_strided_set((3, 4, 3), (2, 3, 3), [1, 1, 0], [4, 4, 3])
verify_strided_set((3, 4, 3), (2, 3, 3), [1, 1], [4, 4, 3])
@tvm.testing.uses_gpu
def test_expand_dims():
verify_expand_dims((3, 10), (3, 10, 1, 1), 2, 2)
verify_expand_dims((3, 10), (1, 3, 10), -3, 1)
@tvm.testing.uses_gpu
def test_reinterpret():
verify_reinterpret((1000,), "float32", "int32", lambda shape: np.random.randn(*shape) * 1000)
verify_reinterpret((1000,), "float16", "int16", lambda shape: np.random.randn(*shape) * 100)
verify_reinterpret(
(1000,), "int16", "uint16", lambda shape: np.random.randint(-1000, 1000, size=shape)
)
verify_reinterpret(
(1000,), "uint32", "int32", lambda shape: np.random.randint(0, 2**32 - 1, size=shape)
)
verify_reinterpret(
(1000,), "uint32", "int32", lambda shape: np.random.randint(0, 2**32 - 1, size=shape)
)
@tvm.testing.uses_gpu
def test_transpose():
verify_transpose((3, 10, 2), (1, 0, 2))
verify_transpose((3, 10, 5), (2, 0, 1))
verify_transpose((3, 10), None)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_transpose_unfused_schedule(target, dev):
shape = (100, tvm.target.Target(target).thread_warp_size + 3)
x = relay.var("x", relay.TensorType(shape, "float32"))
f = relay.transpose(x)
r = np.random.rand(*shape)
func = relay.create_executor(
kind="graph", mod=tvm.IRModule.from_expr(relay.Function([x], f)), device=dev, target=target
).evaluate()
tvm.testing.assert_allclose(func(r).numpy(), np.transpose(r))
# We want to make sure schedule does not fire here, but there is no way of
# inspecting which schedules were used.
x = relay.var("x", relay.TensorType(shape, "float32"))
y = relay.var("y", relay.TensorType(shape, "float32"))
f = relay.transpose(x + y)
func = relay.create_executor(
kind="graph",
mod=tvm.IRModule.from_expr(relay.Function([x, y], f)),
device=dev,
target=target,
).evaluate()
tvm.testing.assert_allclose(func(r, r).numpy(), np.transpose(r + r))
@tvm.testing.uses_gpu
def test_reshape():
verify_reshape((1, 2, 3, 4), (2, 3, 4))
verify_reshape((4, 2, 3, 4), (2, 4, 12))
verify_reshape((4, 2, 3, 4), (2, 48))
verify_reshape((16,), (2, 2, 2, 2))
verify_reshape((4, 0), (2, 0, 2))
@tvm.testing.uses_gpu
def test_where():
verify_where(())
verify_where((1, 2, 3, 4))
@tvm.testing.uses_gpu
def test_squeeze():
verify_squeeze((1, 2, 3, 4), 0)
verify_squeeze((1, 2, 1, 4), None)
verify_squeeze((1, 1, 1, 4), (1, 2))
verify_squeeze((1, 1, 1, 1), None)
verify_squeeze((1, 1, 1, 1), ())
# a special case to trigger inline let expression
A = te.placeholder((2,), "float32", "A")
E = topi.squeeze(A)
C = te.compute((1,), lambda i: E[(2 * A[0] - 1).astype("int32")])
for target in ["llvm", "cuda", "opencl"]:
dev = tvm.device(target, 0)
if tvm.testing.device_enabled(target):
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(C)
func = tvm.build(s, [A, C])
a = tvm.nd.array(np.array((1, 2)).astype("float32"), device=dev)
c = tvm.nd.empty((1,), dtype="float32", device=dev)
func(a, c)
assert c.numpy()[0] == 2
@tvm.testing.uses_gpu
def test_concatenate():
verify_concatenate([(2,), (2,), (2,)], -1)
verify_concatenate([(2, 3, 4), (2, 2, 4), (2, 5, 4)], 1)
verify_concatenate([(1, 2, 4), (1, 2, 3), (1, 2, 7), (1, 2, 8), (1, 2, 1)], -1)
verify_concatenate([(5, 6, 7, 3), (16, 6, 7, 3), (12, 6, 7, 3), (8, 6, 7, 3), (2, 6, 7, 3)], 0)
verify_concatenate([(1, 14400), (1, 2400), (1, 640), (1, 240)], 1)
@tvm.testing.uses_gpu
def test_stack():
verify_stack([(2,), (2,), (2,)], -1)
verify_stack([(2,), (2,), (2,)], 1)
verify_stack([(2,), (2,), (2,)], 0)
verify_stack([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1)
verify_stack([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1)
@tvm.testing.uses_gpu
def test_split():
verify_split((2, 12, 3), 3, 1)
verify_split((2, 12, 3), [2, 4], 1)
verify_split((10, 12, 24), [5, 7, 9], -1)
@tvm.testing.uses_gpu
def test_flip():
verify_flip((3, 4, 3), 1)
verify_flip((3, 4, 3), 0)
verify_flip((3, 4, 3), 2)
verify_flip((3, 4, 3), -1)
verify_flip((3, 4, 3), -3)
verify_flip((3, 4, 3), -2)
@tvm.testing.requires_llvm
def test_expand_like():
verify_expand_like((3,), (2, 3), [0])
verify_expand_like((2,), (2, 3), [1])
verify_expand_like((3, 4), (3, 5, 4), [1])
verify_expand_like((5, 7), (5, 6, 7, 8), [1, 3])
@tvm.testing.uses_gpu
def test_take():
verify_take((4,), [1])
verify_take((4,), [[0, 1, 2, 3]])
verify_take((3, 3, 3), [[11, 25]])
verify_take((4,), [[0, 1], [2, 3]])
verify_take((4,), [1], 0)
verify_take((2, 2), [[[1, 0], [0, 1]]], 0)
verify_take((2, 2), [[[1, 0], [0, 1]]], 1)
verify_take((4, 3, 5, 6), [[2, 1, 0, 0]], -2)
verify_take((3, 4), [-5, 20])
verify_take((3, 4), [-5, 20], mode="wrap")
verify_take((3, 4), [-1, 2], axis=0)
verify_take((3, 4), [-1, 2], axis=0, mode="wrap")
verify_take((3, 4), [-1, 2], axis=1)
verify_take((3, 4), [-1, 2], axis=1, mode="wrap")
verify_take((3, 3, 3), [[11, 25]], mode="fast")
verify_take((3, 4), [0, 2], axis=0, mode="fast")
verify_take((3, 4), [0, 2], axis=1, mode="fast")
verify_take((3, 4), [1, 2], axis=1, indices_dtype="uint32")
verify_take((3, 4), [1, 2], axis=1, mode="wrap", indices_dtype="uint16")
verify_take((3, 3, 3), [[11, 20]], mode="fast", indices_dtype="uint8")
@tvm.testing.uses_gpu
def test_gather():
verify_gather([[1, 2], [3, 4]], 1, [[0, 0], [1, 0]])
verify_gather(np.random.randn(4, 7, 5), 0, np.random.randint(low=0, high=4, size=(1, 7, 5)))
verify_gather(np.random.randn(4, 7, 5), 0, np.random.randint(low=0, high=4, size=(4, 7, 5)))
verify_gather(np.random.randn(4, 7, 5), 1, np.random.randint(low=0, high=7, size=(4, 10, 5)))
verify_gather(np.random.randn(4, 7, 5), 1, np.random.randint(low=0, high=7, size=(4, 10, 5)))
verify_gather(np.random.randn(4, 7, 5), 2, np.random.randint(low=0, high=5, size=(4, 7, 2)))
verify_gather(np.random.randn(4, 7, 5), 2, np.random.randint(low=0, high=5, size=(4, 7, 10)))
verify_gather(np.random.randn(4, 7, 2), 0, np.random.randint(low=0, high=4, size=(4, 7, 2)))
@tvm.testing.uses_gpu
def test_gather_nd():
for indices_dtype in ["int32", "float32", "uint8"]:
verify_gather_nd((4,), [[1.8]], indices_dtype)
verify_gather_nd((4,), [[1, 3, 2]], indices_dtype)
verify_gather_nd((2, 3), [[1]], indices_dtype)
verify_gather_nd((2, 3), [[1], [0]], indices_dtype)
verify_gather_nd((2, 3), [[1, 0], [0, 2]], indices_dtype)
verify_gather_nd((2, 3, 4), [[1, 0], [0, 2]], indices_dtype)
verify_gather_nd((2, 3, 4), [[1, 0], [0, 2], [3, 1]], indices_dtype)
verify_gather_nd(
(2, 3, 4), [[[1, 0], [0, 1]], [[0, 2], [1, 2]], [[3, 1], [0, 2]]], indices_dtype
)
verify_gather_nd((2, 3, 4, 5), [[1, 0], [0, 2]], indices_dtype)
verify_gather_nd((2, 3, 4, 5), [[1, 0], [2, 1], [3, 2], [4, 2]], indices_dtype)
@tvm.testing.uses_gpu
def test_arange():
verify_arange(None, 20, None)
verify_arange(None, 20, 2)
verify_arange(1, 20, None)
verify_arange(1, 20, 2)
verify_arange(1, 20, 1.5)
verify_arange(1, 20.5, None)
verify_arange(1, 20, 3)
verify_arange(20, 1, -1)
verify_arange(20, 1, -1.5)
@tvm.testing.uses_gpu
def test_repeat():
verify_repeat((2,), 1, 0)
verify_repeat((3, 2), 2, 0)
verify_repeat((3, 2, 4), 3, 1)
verify_repeat((1, 3, 2, 4), 4, -1)
@tvm.testing.uses_gpu
def test_tile():
verify_tile((3, 2), (2, 3))
verify_tile((3, 2, 5), (2,))
verify_tile((3,), (2, 3, 3))
verify_tile((4, 0), (5,))
@tvm.testing.uses_gpu
def test_layout_transform():
in_shape = (1, 32, 8, 8)
A = te.placeholder(shape=in_shape, dtype="float32", name="A")
B = topi.layout_transform(A, "NCHW", "NCHW16c")
input = np.random.uniform(size=in_shape).astype(A.dtype)
output = np.transpose(input, axes=(0, 2, 3, 1))
output = np.reshape(output, newshape=(1, 8, 8, 2, 16))
output = np.transpose(output, axes=(0, 3, 1, 2, 4))
def check_device(target, dev):
tvm_input = tvm.nd.array(input, dev)
tvm_output = tvm.nd.empty(output.shape, device=dev, dtype=B.dtype)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
f = tvm.build(s, [A, B], target, name="layout_transform")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.numpy(), output)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_shape():
in_shape = (8, 7, 13)
dtype = "int32"
A = te.placeholder(shape=in_shape, dtype="float32", name="A")
B = topi.shape(A, dtype)
input = np.random.uniform(size=in_shape).astype(A.dtype)
output = np.asarray(in_shape).astype(dtype)
def check_device(target, dev):
tvm_input = tvm.nd.array(input, dev)
tvm_output = tvm.nd.empty(output.shape, device=dev, dtype=dtype)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
f = tvm.build(s, [A, B], target, name="shape")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.numpy(), output)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_sequence_mask():
for in_shape in (5, 10), (3, 4, 5, 4):
for axis in [0, 1]:
for mask_value in [0.0, 1.0]:
max_length = in_shape[axis]
batch_size = in_shape[1 - axis]
A = te.placeholder(shape=in_shape, dtype="float32", name="A")
B = te.placeholder(shape=(batch_size,), dtype="int32", name="B")
C = topi.sequence_mask(A, B, axis=axis, mask_value=mask_value)
A_data = np.random.normal(0, 1, in_shape).astype(np.float32)
B_data = np.random.randint(1, max_length, (batch_size,)).astype(np.int32)
C_gt_data = tvm.topi.testing.sequence_mask(A_data, B_data, mask_value, axis)
def check_device(target, dev):
tvm_A = tvm.nd.array(A_data, dev)
tvm_B = tvm.nd.array(B_data, dev)
tvm_C = tvm.nd.empty(in_shape, device=dev, dtype="float32")
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(C)
f = tvm.build(s, [A, B, C], target, name="SequenceMask")
f(tvm_A, tvm_B, tvm_C)
tvm.testing.assert_allclose(tvm_C.numpy(), C_gt_data)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_ndarray_size():
in_shape = (5, 11, 7)
dtype = "int32"
A = te.placeholder(shape=in_shape, dtype="float32", name="A")
B = topi.ndarray_size(A, dtype)
input = np.random.uniform(size=in_shape).astype(A.dtype)
output = np.asarray(np.size(input)).astype(dtype)
def check_device(target, dev):
tvm_input = tvm.nd.array(input, device=dev)
tvm_output = tvm.nd.empty((), device=dev, dtype=B.dtype)
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
f = tvm.build(s, [A, B], target, name="ndarray_size")
f(tvm_input, tvm_output)
tvm.testing.assert_allclose(tvm_output.numpy(), output)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_where_fusion():
"""integration test that where and zeros should be properly inlined"""
def check_device(target, dev):
with tvm.target.Target(target):
print("Running on target: %s" % target)
conv2d_compute, conv2d_schedule = tvm.topi.testing.get_conv2d_nchw_implement(target)
data = te.placeholder((2, 1, 2, 4), "int8", "data")
w = te.placeholder((3, 1, 2, 2), "int8", "w")
conv1 = conv2d_compute(data, w, 1, 0, 1, "int32")
zeros = topi.full((2, 3, 1, 3), "int32", tvm.tir.const(0, dtype="int32"))
gt = topi.greater_equal(conv1, zeros)
one = topi.full((2, 3, 1, 3), "int32", tvm.tir.const(1, dtype="int32"))
two = topi.full((2, 3, 1, 3), "int32", tvm.tir.const(2, dtype="int32"))
where = topi.where(gt, one, two)
add = topi.add(conv1, where)
outs = [add]
s = conv2d_schedule(outs)
tvm.build(s, [data, w, add], target=backend)
for backend, dev in tvm.testing.enabled_targets():
check_device(backend, dev)
@tvm.testing.uses_gpu
def test_one_hot():
verify_one_hot((3,), 3, 1, 0, -1, "int32")
verify_one_hot((3,), 3, 1.0, 0.0, -1, "float32")
verify_one_hot((2, 2), 5, 2, -2, 0, "int32")
verify_one_hot((2, 2), 5, 0.5, -0.5, 1, "float32")
verify_one_hot((3, 2, 4, 5), 6, 1, 0, 1, "int32")
verify_one_hot((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
@tvm.testing.uses_gpu
def test_unravel_index():
for dtype in ["int32", "int64"]:
for indice_dtype in ["int64", "uint8", "uint16", "uint32"]:
verify_unravel_index([0, 1, 2, 3], [2, 2], dtype, indice_dtype)
verify_unravel_index([144], [5, 5, 5, 2], dtype, indice_dtype)
verify_unravel_index(144, [5, 5, 5, 2], dtype, indice_dtype)
verify_unravel_index([100, 13, 5], [5, 5, 5, 2], dtype, indice_dtype)
@tvm.testing.uses_gpu
def test_sparse_to_dense():
verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0]) # scalar
verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3]) # vector
verify_sparse_to_dense(
[[0, 0], [1, 2]], [1, 2], 0, [3, 4], [[1, 0, 0, 0], [0, 0, 2, 0], [0, 0, 0, 0]]
) # nXd
verify_sparse_to_dense(
[[0, 0, 0], [1, 2, 3]],
[1, 2],
4,
[2, 3, 4],
[[[1, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 4]], [[4, 4, 4, 4], [4, 4, 4, 4], [4, 4, 4, 2]]],
) # nXd
verify_sparse_to_dense(
[0, 1, 4], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]
) # floats
verify_sparse_to_dense(1, 3, None, [5], [0, 3, 0, 0, 0]) # default value not specified
# negative test cases
# sparse indices should be ints
# verify_sparse_to_dense([[0.1, 1.1, 4.1], [0,2,4]], [3.1, 3.1, 3.1], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
# sparse_values should be 0d or 1d only
# verify_sparse_to_dense([[0, 1, 4], [0, 2, 4]], [[[3.1, 3.1, 3.1]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
# sparse_indices should not be > 2d tensor
# verify_sparse_to_dense([[[[0, 1, 4], [0, 2, 4]]]], [[[3.1, 3.1, 3.1]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1])
@tvm.testing.uses_gpu
def test_matrix_set_diag():
for dtype in ["float32", "int32"]:
verify_matrix_set_diag((2, 2), (2,), dtype)
verify_matrix_set_diag((4, 3, 3), (4, 3), dtype)
verify_matrix_set_diag((2, 3, 4), (2, 3), dtype, 1)
verify_matrix_set_diag((2, 3, 4), (2, 4, 3), dtype, (-1, 2), "LEFT_RIGHT")
verify_matrix_set_diag((2, 3, 4), (2, 4, 3), dtype, (-1, 2), "LEFT_LEFT")
verify_matrix_set_diag((2, 3, 4), (2, 4, 3), dtype, (-1, 2), "RIGHT_RIGHT")
@tvm.testing.uses_gpu
def test_adv_index():
for indice_dtype in ["int32", "int64", "uint8", "uint16", "uint32"]:
verify_adv_index((3, 4, 5), [(2,), (2,), (1,)], indice_dtype=indice_dtype)
verify_adv_index((10, 15, 5), [(4, 1), (1, 7)], indice_dtype=indice_dtype)
verify_adv_index((10, 5, 15), [(1, 2, 1), (1, 2, 7)], indice_dtype=indice_dtype)
@tvm.testing.uses_gpu
def test_trilu():
# Test upper and lower triangle
verify_trilu((3, 3), True, 0)
verify_trilu((3, 3), False, 0)
# Test larger matrices with offset.
verify_trilu((6, 6), True, 1)
verify_trilu((6, 6), False, 2)
verify_trilu((6, 6), False, -2)
# Test batch size
verify_trilu((8, 6, 6), False, -2)
if __name__ == "__main__":
test_strided_slice()
test_concatenate()
test_stack()
test_transpose()
test_expand_dims()
test_reshape()
test_where()
test_squeeze()
test_split()
test_flip()
test_expand_like()
test_take()
test_gather_nd()
test_arange()
test_layout_transform()
test_repeat()
test_tile()
test_shape()
test_sequence_mask()
test_ndarray_size()
test_where_fusion()
test_one_hot()
test_unravel_index()
test_sparse_to_dense()
test_matrix_set_diag()
test_adv_index()
test_trilu()
| 53,819 | 39.557649 | 114 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_group_norm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for group_norm."""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import topi
from tvm.topi.utils import get_const_tuple
import tvm.topi.testing
import tvm.testing
_group_norm_schedule = {
"generic": topi.generic.schedule_injective,
}
# only test on llvm because schedule is missing
@tvm.testing.parametrize_targets("llvm")
@pytest.mark.parametrize("shape, axis", [([2, 4, 16], (2,)), ([2, 4, 4, 16], (2, 3))])
def test_group_norm(target, dev, shape, axis, epsilon=1e-5, dtype="float32", rtol=1e-5, atol=1e-5):
data = te.placeholder(shape, dtype=dtype, name="data")
num_groups = 2
channel_axis = 1
gamma = te.placeholder((shape[channel_axis],), dtype=dtype, name="gamma")
beta = te.placeholder((shape[channel_axis],), dtype=dtype, name="beta")
B = topi.nn.group_norm(data, gamma, beta, num_groups, channel_axis, axis, epsilon)
np.random.seed(0)
data_np = np.random.uniform(size=shape).astype(dtype)
gamma_np = np.random.uniform(size=(shape[channel_axis],)).astype(dtype)
beta_np = np.random.uniform(size=(shape[channel_axis],)).astype(dtype)
b_np = tvm.topi.testing.group_norm_python(
data_np, gamma_np, beta_np, num_groups, channel_axis, axis, epsilon
)
with tvm.target.Target(target):
s_func = tvm.topi.testing.dispatch(target, _group_norm_schedule)
s = s_func([B])
data_tvm = tvm.nd.array(data_np, dev)
gamma_tvm = tvm.nd.array(gamma_np, dev)
beta_tvm = tvm.nd.array(beta_np, dev)
b_tvm = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=dtype), dev)
f = tvm.build(s, [data, gamma, beta, B], target)
f(data_tvm, gamma_tvm, beta_tvm, b_tvm)
tvm.testing.assert_allclose(b_tvm.numpy(), b_np, rtol=rtol, atol=atol)
if __name__ == "__main__":
tvm.testing.main()
| 2,609 | 37.955224 | 99 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_sparse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for sparse operator"""
import numpy as np
import tvm
from tvm import te
from tvm import topi
from tvm import relay
import tvm.topi.testing
from tvm.topi.utils import get_const_tuple
import tvm.contrib.sparse as tvmsp
from collections import namedtuple
import time
import scipy.sparse as sp
import tvm.testing
_sparse_dense_implement = {
"generic": (topi.nn.sparse_dense, topi.generic.schedule_sparse_dense),
"cuda": (topi.cuda.sparse_dense, topi.cuda.schedule_sparse_dense),
"x86": (topi.nn.sparse_dense, topi.x86.schedule_sparse_dense),
}
def verify_dynamic_csrmv(batch, in_dim, out_dim, dtype, use_bias=True):
nr, nc, n = te.var("nr"), te.var("nc"), te.var("n")
A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, dtype=dtype, name="A")
B = te.placeholder((in_dim, 1), dtype=dtype, name="B")
C = te.placeholder((nr,), dtype=dtype, name="C")
D = topi.sparse.csrmv(A, B, C if use_bias else None)
s = te.create_schedule(D.op)
dtype = A.dtype
# get the test data
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_dim), high=100).astype(dtype)
b_np = np.random.uniform(size=(in_dim, 1), high=100).astype(dtype)
c_np = np.random.uniform(size=(batch,), high=100).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np) + c_np.reshape((batch, 1))
else:
d_np = np.dot(a_np, b_np)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, dev)
_nr, _nc, _n = a.shape[0], a.shape[1], a.data.shape[0]
assert a.shape[0] == a.indptr.shape[0] - 1
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros((_nr, 1), dtype=dtype), dev)
assert a.data.dtype == A.data.dtype
assert a.indices.dtype == A.indices.dtype
assert a.indptr.dtype == A.indptr.dtype
f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmv")
f(_nr, a.data, a.indices, a.indptr, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-4, atol=1e-4)
for device in ["llvm"]:
check_device(device)
def verify_dynamic_csrmm(batch, in_dim, out_dim, dtype, use_bias=True):
nr, nc, n = te.var("nr"), te.var("nc"), te.var("n")
A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, dtype=dtype, name="A")
B = te.placeholder((in_dim, out_dim), dtype=dtype, name="B")
C = te.placeholder((nr,), dtype=dtype, name="C")
D = topi.sparse.csrmm(A, B, C if use_bias else None)
s = te.create_schedule(D.op)
dtype = A.dtype
# get the test data
def get_ref_data():
a_np = np.random.uniform(size=(batch, in_dim), high=100).astype(dtype)
b_np = np.random.uniform(size=(in_dim, out_dim), high=100).astype(dtype)
c_np = np.random.uniform(size=(batch,), high=100).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np) + c_np.reshape((batch, 1))
else:
d_np = np.dot(a_np, b_np)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, dev)
_nr, _nc, _n = a.shape[0], a.shape[1], a.data.shape[0]
assert a.shape[0] == a.indptr.shape[0] - 1
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros((_nr, out_dim), dtype=dtype), dev)
f = tvm.build(s, [nr, A.data, A.indices, A.indptr, B, C, D], device, name="csrmm")
f(_nr, a.data, a.indices, a.indptr, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-2, atol=1e-2)
for device in ["llvm"]:
check_device(device)
def verify_dense_si(batch, in_dim, out_dim, use_bias=True, dtype="float32"):
nonzeros = te.var("nonzeros")
A = tvmsp.placeholder(shape=(batch, in_dim), nonzeros=nonzeros, dtype=dtype, name="A")
B = te.placeholder((out_dim, in_dim), dtype=dtype, name="B")
C = te.placeholder((out_dim,), dtype=dtype, name="C")
D = topi.sparse.dense(A, B, C if use_bias else None)
s = te.create_schedule(D.op)
# get the test data
def get_ref_data():
mag = 10.0
a_np = np.maximum(
mag * (np.random.uniform(size=(batch, in_dim)).astype("float32") - 0.5), 0.0
).astype(dtype)
b_np = (mag * (np.random.uniform(size=(out_dim, in_dim)).astype("float32") - 0.5)).astype(
dtype
)
c_np = (mag * (np.random.uniform(size=(out_dim,)).astype("float32") - 0.5)).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np.T) + c_np
else:
d_np = np.dot(a_np, b_np.T)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvmsp.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), dev)
f = tvm.build(s, [A.data, A.indices, A.indptr, B, C, D], device, name="dense")
f(a.data, a.indices, a.indptr, b, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-4, atol=1e-4)
check_device("llvm")
def verify_dense_sw(batch, in_dim, out_dim, use_bias=True, dtype="float32"):
nonzeros = te.var("nonzeros")
A = te.placeholder((batch, in_dim), dtype=dtype, name="A")
B = tvmsp.placeholder(shape=(out_dim, in_dim), nonzeros=nonzeros, dtype=dtype, name="B")
C = te.placeholder((out_dim,), dtype=dtype, name="C")
D = topi.sparse.dense(A, B, C if use_bias else None)
s = te.create_schedule(D.op)
# get the test data
def get_ref_data():
mag = 10.0
a_np = (mag * (np.random.uniform(size=(batch, in_dim)).astype("float32") - 0.5)).astype(
dtype
)
b_np = np.maximum(
mag * (np.random.uniform(size=(out_dim, in_dim)).astype("float32") - 0.5), 0.0
).astype(dtype)
c_np = (mag * (np.random.uniform(size=(out_dim,)).astype("float32") - 0.5)).astype(dtype)
if use_bias:
d_np = np.dot(a_np, b_np.T) + c_np
else:
d_np = np.dot(a_np, b_np.T)
return (a_np, b_np, c_np, d_np)
a_np, b_np, c_np, d_np = get_ref_data()
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
a = tvm.nd.array(a_np, dev)
b = tvmsp.array(b_np, dev)
c = tvm.nd.array(c_np, dev)
d = tvm.nd.array(np.zeros(get_const_tuple(D.shape), dtype=dtype), dev)
f = tvm.build(s, [A, B.data, B.indices, B.indptr, C, D], device, name="dense")
f(a, b.data, b.indices, b.indptr, c, d)
tvm.testing.assert_allclose(d.numpy(), d_np, rtol=1e-4, atol=1e-4)
check_device("llvm")
def test_csrmv():
verify_dynamic_csrmv(batch=5, in_dim=7, out_dim=1, dtype="float32", use_bias=False)
verify_dynamic_csrmv(batch=5, in_dim=7, out_dim=1, dtype="float64", use_bias=True)
verify_dynamic_csrmv(batch=5, in_dim=7, out_dim=1, dtype="int32", use_bias=True)
def test_csrmm():
M, K, N = 5, 7, 2
verify_dynamic_csrmm(batch=M, in_dim=K, out_dim=N, dtype="int64", use_bias=False)
verify_dynamic_csrmm(batch=M, in_dim=K, out_dim=N, dtype="float64", use_bias=True)
def test_dense_si():
M, K, N = 3, 5, 2
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="float32")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="float32")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="int32")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="int32")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="int16")
verify_dense_si(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="int16")
def test_dense_sw():
M, K, N = 3, 5, 2
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="float32")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="float32")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="int32")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="int32")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=False, dtype="int16")
verify_dense_sw(batch=M, in_dim=K, out_dim=N, use_bias=True, dtype="int16")
def test_dense():
test_dense_si()
test_dense_sw()
def test_sparse_dense_csr():
M, N, K, density = 1, 17, 47, 0.2
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = sp.random(N, K, density=density, format="csr", dtype="float32")
W_np = W_sp_np.todense()
Y_np = X_np.dot(W_np.T)
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_dense(X, W_data, W_indices, W_indptr)
s = te.create_schedule(Y.op)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype))
func(
tvm.nd.array(X_np),
tvm.nd.array(W_sp_np.data),
tvm.nd.array(W_sp_np.indices),
tvm.nd.array(W_sp_np.indptr),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4)
def test_sparse_dense_csr_reverse():
M, N, K, density = 1, 17, 47, 0.2
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = sp.random(N, K, density=density, format="csr", dtype="float32")
W_np = W_sp_np.todense()
Y_np = W_np.dot(X_np.T)
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_dense(X, W_data, W_indices, W_indptr, sparse_lhs=True)
s = te.create_schedule(Y.op)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype))
func(
tvm.nd.array(X_np),
tvm.nd.array(W_sp_np.data),
tvm.nd.array(W_sp_np.indices),
tvm.nd.array(W_sp_np.indptr),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4)
def test_sparse_transpose_csr():
N, density = 1023, 0.3
X_sp = sp.random(N, N, density=density, format="csr", dtype="float32")
X_sp_T = X_sp.transpose()
X_np_T = X_sp_T.todense()
X_data = te.placeholder(shape=X_sp.data.shape, dtype=str(X_sp.data.dtype))
X_indices = te.placeholder(shape=X_sp.indices.shape, dtype=str(X_sp.indices.dtype))
X_indptr = te.placeholder(shape=X_sp.indptr.shape, dtype=str(X_sp.indptr.dtype))
X_T_data, X_T_indices, X_T_indptr = topi.nn.sparse_transpose(X_data, X_indices, X_indptr)
s = te.create_schedule([X_T_data.op, X_T_indices.op, X_T_indptr.op])
func = tvm.build(s, [X_data, X_indices, X_indptr, X_T_data, X_T_indices, X_T_indptr])
X_T_data_tvm = tvm.nd.array(np.zeros(X_sp_T.data.shape, dtype=X_sp_T.data.dtype))
X_T_indices_tvm = tvm.nd.array(np.zeros(X_sp_T.indices.shape, dtype=X_sp_T.indices.dtype))
X_T_indptr_tvm = tvm.nd.array(np.zeros(X_sp_T.indptr.shape, dtype=X_sp_T.indptr.dtype))
func(
tvm.nd.array(X_sp.data),
tvm.nd.array(X_sp.indices),
tvm.nd.array(X_sp.indptr),
X_T_data_tvm,
X_T_indices_tvm,
X_T_indptr_tvm,
)
X_T_out = sp.csr_matrix(
(X_T_data_tvm.numpy(), X_T_indices_tvm.numpy(), X_T_indptr_tvm.numpy()), shape=(N, N)
).todense()
tvm.testing.assert_allclose(X_np_T, X_T_out, atol=1e-4, rtol=1e-4)
def random_bsr_matrix(M, N, BS_R, BS_C, density, dtype):
import itertools
Y = np.zeros((M, N), dtype=dtype)
assert M % BS_R == 0
assert N % BS_C == 0
nnz = int(density * M * N)
num_blocks = int(nnz / (BS_R * BS_C)) + 1
candidate_blocks = np.asarray(list(itertools.product(range(0, M, BS_R), range(0, N, BS_C))))
assert candidate_blocks.shape[0] == M // BS_R * N // BS_C
chosen_blocks = candidate_blocks[
np.random.choice(candidate_blocks.shape[0], size=num_blocks, replace=False)
]
for i in range(len(chosen_blocks)):
r, c = chosen_blocks[i]
Y[r : r + BS_R, c : c + BS_C] = np.random.randn(BS_R, BS_C)
s = sp.bsr_matrix(Y, blocksize=(BS_R, BS_C))
assert s.data.shape == (num_blocks, BS_R, BS_C)
assert s.indices.shape == (num_blocks,)
assert s.indptr.shape == (M // BS_R + 1,)
return s
def verify_sparse_dense_bsr(M, N, K, BS_R, BS_C, density, use_relu, device, target):
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32")
W_np = W_sp_np.todense()
Y_np = X_np @ W_np.T
if use_relu:
Y_np = np.maximum(Y_np, 0.0)
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
fcompute, fschedule = tvm.topi.testing.dispatch(target, _sparse_dense_implement)
with tvm.target.Target(target):
Y = fcompute(X, W_data, W_indices, W_indptr)
if use_relu:
Y = topi.nn.relu(Y)
s = fschedule([Y])
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype), device=device)
func(
tvm.nd.array(X_np, device=device),
tvm.nd.array(W_sp_np.data, device=device),
tvm.nd.array(W_sp_np.indices, device=device),
tvm.nd.array(W_sp_np.indptr, device=device),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4)
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_sparse_dense_bsr_relu(dev, target):
M, N, K, BS_R, BS_C, density = 1, 64, 128, 8, 16, 0.9
verify_sparse_dense_bsr(M, N, K, BS_R, BS_C, density, True, dev, target)
verify_sparse_dense_bsr(M, N, K, BS_R, BS_C, density, False, dev, target)
def test_sparse_dense_bsr_reverse():
M, N, K, BS_R, BS_C, density = 1, 64, 128, 8, 16, 0.9
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32")
W_np = W_sp_np.todense()
Y_np = W_np.dot(X_np.T)
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_dense(X, W_data, W_indices, W_indptr, sparse_lhs=True)
s = te.create_schedule(Y.op)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype))
func(
tvm.nd.array(X_np),
tvm.nd.array(W_sp_np.data),
tvm.nd.array(W_sp_np.indices),
tvm.nd.array(W_sp_np.indptr),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_sparse_dense_bsr_randomized():
for _ in range(20):
BS_R = np.random.randint(1, 16)
BS_C = np.random.randint(1, 16)
M = np.random.randint(1, 32)
N = int(np.random.randint(1, 16) * BS_R)
K = int(np.random.randint(1, 16) * BS_C)
density = np.clip(np.random.random(), 0.1, 0.9)
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32")
W_np = W_sp_np.todense()
Y_np = np.array(X_np.dot(W_np.T))
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _sparse_dense_implement)
with tvm.target.Target(device):
Y = fcompute(X, W_data, W_indices, W_indptr)
s = fschedule([Y])
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype), device=dev)
func(
tvm.nd.array(X_np, device=dev),
tvm.nd.array(W_sp_np.data, device=dev),
tvm.nd.array(W_sp_np.indices, device=dev),
tvm.nd.array(W_sp_np.indptr, device=dev),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-5, rtol=1e-5)
for device in ["llvm", "cuda"]:
check_device(device)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_sparse_dense_padded_gpu(target, dev):
M = 128
N = 1280
K = 128
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, 1, 1, density=0.01, dtype="float32")
W_sp_np_padded = tvm.topi.cuda.pad_sparse_matrix(W_sp_np, 32)
W_np = W_sp_np.todense()
Y_np = X_np @ W_sp_np.T
W_data = te.placeholder(shape=W_sp_np_padded.data.shape, dtype=str(W_sp_np_padded.data.dtype))
W_indices = te.placeholder(
shape=W_sp_np_padded.indices.shape, dtype=str(W_sp_np_padded.indices.dtype)
)
W_indptr = te.placeholder(
shape=W_sp_np_padded.indptr.shape, dtype=str(W_sp_np_padded.indptr.dtype)
)
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
with tvm.target.Target(target):
Y = topi.cuda.sparse_dense_padded(X, W_data, W_indices, W_indptr)
s = topi.cuda.schedule_sparse_dense_padded([Y])
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype=Y_np.dtype), device=dev)
func(
tvm.nd.array(X_np, device=dev),
tvm.nd.array(W_sp_np_padded.data, device=dev),
tvm.nd.array(W_sp_np_padded.indices, device=dev),
tvm.nd.array(W_sp_np_padded.indptr, device=dev),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np, atol=1e-5, rtol=1e-5)
@tvm.testing.parametrize_targets("cuda", "rocm")
def test_sparse_dense_padded_alter_op(target, dev):
with tvm.target.Target(target):
M = 128
N = 16
K = 128
X_np = np.random.randn(M, K).astype("float32")
W_sp_np = random_bsr_matrix(N, K, 2, 2, density=0.01, dtype="float32")
x = relay.var("x", relay.TensorType(X_np.shape, "float32"))
mult = relay.op.nn.sparse_dense(
x,
(
relay.Constant(tvm.nd.array(W_sp_np.data)),
relay.Constant(tvm.nd.array(W_sp_np.indices)),
relay.Constant(tvm.nd.array(W_sp_np.indptr)),
),
)
f = relay.Function([x], mult)
f_ = relay.transform.InferType()(tvm.IRModule.from_expr(f))
f_ = relay.transform.AlterOpLayout()(f_)
assert f_["main"].body.op.name == "nn.internal.sparse_dense_padded"
# build with cuda and AlterOpLayout to ensure that sparse_dense_padded is in action
with tvm.transform.PassContext(opt_level=3, required_pass="AlterOpLayout"):
x = relay.build(tvm.IRModule.from_expr(f), target=target)
def test_sparse_add_csr():
for indices_dtype in ["int32", "int64"]:
for data_dtype in ["float32", "float64"]:
M, K, density = 3, 49, 0.2
X_np = np.random.randn(M, K).astype(data_dtype)
Y_sp_np = sp.random(M, K, density=density, format="csr", dtype=data_dtype)
Y_np = Y_sp_np.todense()
Z_np = X_np + Y_np
Y_data = te.placeholder(shape=Y_sp_np.data.shape, dtype=data_dtype)
Y_indices = te.placeholder(shape=Y_sp_np.indices.shape, dtype=indices_dtype)
Y_indptr = te.placeholder(shape=Y_sp_np.indptr.shape, dtype=indices_dtype)
X = te.placeholder(shape=X_np.shape, dtype=data_dtype)
Z = topi.nn.sparse_add(X, Y_data, Y_indices, Y_indptr)
s = te.create_schedule(Z.op)
func = tvm.build(s, [X, Y_data, Y_indices, Y_indptr, Z])
Z_tvm = tvm.nd.array(np.zeros(Z_np.shape, dtype=Z_np.dtype))
func(
tvm.nd.array(X_np.astype(data_dtype)),
tvm.nd.array(Y_sp_np.data.astype(data_dtype)),
tvm.nd.array(Y_sp_np.indices.astype(indices_dtype)),
tvm.nd.array(Y_sp_np.indptr.astype(indices_dtype)),
Z_tvm,
)
tvm.testing.assert_allclose(Z_tvm.numpy(), Z_np, atol=1e-4, rtol=1e-4)
def verify_sparse_conv2d_bsr(M, H, W, N, K, BS_R, BS_C, density, layout):
if layout == "NHWC":
X_np = np.random.randn(M, H, W, K).astype("float32")
elif layout == "NCHW":
X_np = np.random.randn(M, K, H, W).astype("float32")
W_sp_np = random_bsr_matrix(N, K, BS_R, BS_C, density=density, dtype="float32")
W_np = W_sp_np.todense()
if layout == "NHWC":
Y_np = tvm.topi.testing.conv2d_nhwc_python(X_np, np.array(W_np).T.reshape(1, 1, K, N), 1, 0)
elif layout == "NCHW":
Y_np = tvm.topi.testing.conv2d_nchw_python(X_np, np.array(W_np).reshape(N, K, 1, 1), 1, 0)
if BS_C == 1:
W_data = te.placeholder(shape=W_sp_np.data.shape[:-1], dtype=str(W_sp_np.data.dtype))
W_sp_np_data = W_sp_np.data.reshape(W_sp_np.data.shape[0], BS_R)
else:
W_data = te.placeholder(shape=W_sp_np.data.shape, dtype=str(W_sp_np.data.dtype))
W_sp_np_data = W_sp_np.data
W_indices = te.placeholder(shape=W_sp_np.indices.shape, dtype=str(W_sp_np.indices.dtype))
W_indptr = te.placeholder(shape=W_sp_np.indptr.shape, dtype=str(W_sp_np.indptr.dtype))
X = te.placeholder(shape=X_np.shape, dtype=str(X_np.dtype))
Y = topi.nn.sparse_conv2d(X, W_data, W_indices, W_indptr, layout)
s = te.create_schedule(Y.op)
def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
func = tvm.build(s, [X, W_data, W_indices, W_indptr, Y])
Y_tvm = tvm.nd.array(np.zeros(Y_np.shape, dtype="float32"))
func(
tvm.nd.array(X_np, dev),
tvm.nd.array(W_sp_np_data, dev),
tvm.nd.array(W_sp_np.indices, dev),
tvm.nd.array(W_sp_np.indptr, dev),
Y_tvm,
)
tvm.testing.assert_allclose(Y_tvm.numpy(), Y_np.astype("float32"), atol=1e-4, rtol=1e-4)
check_device("llvm")
def test_sparse_conv2d_bsr():
M, H, W, N, K, BS_R, BS_C, density = 1, 32, 32, 128, 64, 8, 16, 0.9
verify_sparse_conv2d_bsr(M, H, W, N, K, BS_R, BS_C, density, "NHWC")
verify_sparse_conv2d_bsr(M, H, W, N, K, BS_R, BS_C, density, "NCHW")
verify_sparse_conv2d_bsr(M, H, W, N, K, BS_R, 1, density, "NHWC")
if __name__ == "__main__":
# test_csrmv()
# test_csrmm()
# test_dense()
# test_sparse_dense_csr()
# test_sparse_dense_bsr_randomized()
# test_sparse_transpose_csr()
# test_sparse_dense_padded_cuda()
# test_sparse_dense_padded_alter_op()
# test_sparse_dense_csr_reverse()
# test_sparse_dense_bsr_reverse()
# test_sparse_add_csr()
test_sparse_conv2d()
| 26,009 | 40.951613 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_depthwise_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import autotvm, te, topi
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.utils import get_pad_tuple
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.nn.depthwise_conv2d import _get_workload
from tvm.topi.x86.depthwise_conv2d import _fallback_schedule
from tvm.topi.generic import conv2d as conv2d_generic
_depthwise_conv2d_implement = {
"NCHW": {
"generic": [(topi.nn.depthwise_conv2d_nchw, topi.generic.schedule_depthwise_conv2d_nchw)],
"arm_cpu": [
(topi.arm_cpu.depthwise_conv2d_nchw, topi.arm_cpu.schedule_depthwise_conv2d_nchw),
(
topi.arm_cpu.depthwise_conv2d_nchw_spatial_pack,
topi.arm_cpu.schedule_depthwise_conv2d_nchw_spatial_pack,
),
],
"gpu": [(topi.cuda.depthwise_conv2d_nchw, topi.cuda.schedule_depthwise_conv2d_nchw)],
"mali": [(topi.mali.depthwise_conv2d_nchw, topi.mali.schedule_depthwise_conv2d_nchw)],
"bifrost": [(topi.nn.depthwise_conv2d_nchw, topi.bifrost.schedule_depthwise_conv2d_nchw)],
"intel_graphics": [
(
topi.intel_graphics.depthwise_conv2d_nchw,
topi.intel_graphics.schedule_depthwise_conv2d_nchw,
)
],
},
"NHWC": {
"generic": [
(topi.nn.depthwise_conv2d_nhwc, topi.generic.schedule_depthwise_conv2d_nhwc),
(topi.nn.depthwise_conv2d_nhwc, conv2d_generic.schedule_depthwise_conv2d_nhwc),
],
"arm_cpu": [
(
topi.arm_cpu.compute_depthwise_conv2d_nhwc,
topi.arm_cpu.schedule_depthwise_conv2d_nhwc,
)
],
"gpu": [(topi.nn.depthwise_conv2d_nhwc, topi.cuda.schedule_depthwise_conv2d_nhwc)],
"mali": [(topi.mali.depthwise_conv2d_nhwc, topi.mali.schedule_depthwise_conv2d_nhwc)],
"bifrost": [(topi.mali.depthwise_conv2d_nhwc, topi.mali.schedule_depthwise_conv2d_nhwc)],
},
"NCHWc": {
"generic": [(topi.x86.depthwise_conv2d_NCHWc, topi.x86.schedule_depthwise_conv2d_NCHWc)],
},
}
random_seed = tvm.testing.parameter(0)
in_dtype, out_dtype = tvm.testing.parameters(
("float32", "float32"),
("float16", "float16"),
)
@tvm.testing.fixture
def input_shape(layout, batch, in_channel, in_size, filter_shape):
if layout == "NCHW":
return (batch, in_channel, in_size, in_size)
elif layout == "NHWC":
return (batch, in_size, in_size, in_channel)
elif layout == "NCHWc":
oc_block = filter_shape[-1]
ic_block = next(bn for bn in range(oc_block, 0, -1) if in_channel % bn == 0)
return (batch, in_channel // ic_block, in_size, in_size, ic_block)
@tvm.testing.fixture
def filter_shape(layout, in_channel, channel_multiplier, kernel):
filter_channel = in_channel
if layout == "NCHW":
return (filter_channel, channel_multiplier, kernel, kernel)
elif layout == "NHWC":
return (kernel, kernel, filter_channel, channel_multiplier)
elif layout == "NCHWc":
out_channel = in_channel * channel_multiplier
# For testing the functionality, we choose an arbitrary block
# size that can divide out_channel, regardless of the
# performance.
oc_block = next(bn for bn in range(16, 0, -1) if out_channel % bn == 0)
return (out_channel // oc_block, 1, kernel, kernel, 1, oc_block)
@tvm.testing.fixture
def scale_shape(layout, in_channel, channel_multiplier, filter_shape):
out_channel = in_channel * channel_multiplier
if layout in ("NCHW", "NHWC"):
return (out_channel,)
if layout == "NCHWc":
oc_block = filter_shape[-1]
return (out_channel // oc_block, oc_block)
raise ValueError("Unknown layout {}".format(layout))
@tvm.testing.fixture
def shift_shape(scale_shape):
return scale_shape
@tvm.testing.fixture(cache_return_value=True)
def ref_data(
random_seed,
in_dtype,
out_dtype,
layout,
input_shape,
filter_shape,
dilation,
stride,
padding,
scale_shape,
shift_shape,
use_scale_shift,
apply_relu,
):
np.random.seed(random_seed)
# scipy.signal.convolve2d does not support float16 data types, and
# the python fallback is too slow for general use. Computing
# ref_data in float32 will have fewer rounding errors than the TVM
# float16 compute, but those vary based on schedule anyways.
conv_dtype = "float32" if in_dtype == "float16" else in_dtype
input_np = np.random.uniform(size=input_shape).astype(in_dtype)
filter_np = np.random.uniform(size=filter_shape).astype(in_dtype)
scale_np = np.random.uniform(size=scale_shape).astype(out_dtype)
shift_np = np.random.uniform(size=shift_shape).astype(out_dtype)
if layout == "NCHW":
np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nchw
dilation = (1, 1, dilation, dilation)
reshape = (1, -1, 1, 1)
elif layout == "NHWC":
np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nhwc
dilation = (dilation, dilation, 1, 1)
reshape = (1, 1, 1, -1)
elif layout == "NCHWc":
np_depthwise_conv2d = tvm.topi.testing.depthwise_conv2d_python_nchwc
dilation = (1, 1, dilation, dilation, 1, 1)
reshape = (1, scale_shape[0], 1, 1, scale_shape[1])
dilated_filter_np = tvm.topi.testing.dilate_python(filter_np, dilation)
output_np = np_depthwise_conv2d(
input_np.astype(conv_dtype), dilated_filter_np.astype(conv_dtype), stride, padding
).astype(out_dtype)
if use_scale_shift:
output_np = output_np * scale_np.reshape(reshape) + shift_np.reshape(reshape)
if apply_relu:
output_np = np.maximum(output_np, 0)
return (
input_np,
filter_np,
scale_np,
shift_np,
output_np,
)
class BaseDepthwiseConv2D:
"""Provides the test_conv2d test function, to be used by other test classes.
Test parameter sets are split out into different classes for
readability (e.g. used for mobilenet), and for restrictions
(e.g. implemented only for llvm).
"""
layout = tvm.testing.parameter("NCHW", "NHWC")
(batch, in_channel, in_size, channel_multiplier, kernel, stride) = tvm.testing.parameters(
(1, 728, 32, 1, 3, 1),
(4, 256, 64, 2, 5, 2),
)
padding = tvm.testing.parameter("SAME", "VALID")
dilation = tvm.testing.parameter(1, 2)
use_scale_shift = tvm.testing.parameter(True, False, ids=["with_scale_shift", "no_scale_shift"])
apply_relu = tvm.testing.parameter(True, False, ids=["with_relu", "no_relu"])
run_after_compile = True
def test_conv2d(
self,
target,
dev,
in_dtype,
out_dtype,
layout,
input_shape,
filter_shape,
scale_shape,
shift_shape,
use_scale_shift,
apply_relu,
batch,
in_channel,
channel_multiplier,
kernel,
stride,
padding,
dilation,
ref_data,
):
target = tvm.target.Target(target)
if (
target.kind.name == "cuda"
and in_dtype == "float16"
and not tvm.contrib.nvcc.have_fp16(dev.compute_version)
):
pytest.xfail("CUDA float16 intrinsics not available")
if (
target.kind.name == "vulkan"
and in_dtype == "float16"
and (
not target.attrs.get("supports_float16", False)
or not target.attrs.get("supports_16bit_buffer", False)
)
):
pytest.xfail("Vulkan float16 driver support not available")
# Transform the padding argument from 'str' to 'tuple' to
# match the "workload" tuple in TopHub. Which padding_args to
# use for each layout chosen to reproduce previous behavior.
if dilation == 1:
padding_args = get_pad_tuple(padding, (kernel, kernel))
padding_args_i = [0, 1, 2, 3] if layout == "NCHW" else [0, 1]
padding_args = [padding_args[i] for i in padding_args_i]
else:
padding_args = padding
# placeholder
Input = te.placeholder(input_shape, name="Input", dtype=in_dtype)
Filter = te.placeholder(filter_shape, name="Filter", dtype=in_dtype)
Scale = te.placeholder(scale_shape, name="Scale", dtype=out_dtype)
Shift = te.placeholder(shift_shape, name="Shift", dtype=out_dtype)
if layout == "NCHW":
topi_scale_shift = topi.nn.scale_shift_nchw
fcompute_args = (Input, Filter, stride, padding_args, dilation, out_dtype)
elif layout == "NHWC":
topi_scale_shift = topi.nn.scale_shift_nhwc
fcompute_args = (Input, Filter, stride, padding_args, dilation, out_dtype)
elif layout == "NCHWc":
topi_scale_shift = topi.nn.scale_shift_nchwc
in_layout = "NCHW{}c".format(input_shape[-1])
out_layout = "NCHW{}c".format(filter_shape[-1])
fcompute_args = (
Input,
Filter,
stride,
padding,
dilation,
in_layout,
out_layout,
out_dtype,
)
with autotvm.tophub.context(target): # load tophub pre-tuned parameters
impl_list = tvm.topi.testing.dispatch(target, _depthwise_conv2d_implement[layout])[:]
if target == "llvm" and layout == "NCHW" and channel_multiplier == 1 and dilation == 1:
impl_list.append(
(topi.x86.depthwise_conv2d_nchw, topi.x86.schedule_depthwise_conv2d_nchw)
)
for fcompute, fschedule in impl_list:
with tvm.target.Target(target):
# Declare, build schedule
C = fcompute(*fcompute_args)
if use_scale_shift:
C = topi_scale_shift(C, Scale, Shift)
if apply_relu:
C = topi.nn.relu(C)
s = fschedule(C)
# Build and run
f = tvm.build(s, [Input, Filter, Scale, Shift, C], target)
if self.run_after_compile:
input_np, filter_np, scale_np, shift_np, output_np = ref_data
if "int" in out_dtype:
tol = {"atol": 0, "rtol": 0}
elif out_dtype == "float32":
tol = {"rtol": 1e-4, "atol": 1e-5}
elif out_dtype == "float16":
# A summation in float16 with a single accumulator very
# quickly runs into large rounding errors. At some point,
# this tolerance should be schedule-dependent for to avoid
# false negatives.
num_values_summed = kernel * kernel
gap_size = (
np.nextafter(output_np.max(), np.inf, dtype=output_np.dtype)
- output_np.max()
)
tol = {"rtol": 1e-3, "atol": num_values_summed * gap_size / 2}
input_tvm = tvm.nd.array(input_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
scale_tvm = tvm.nd.array(scale_np, dev)
shift_tvm = tvm.nd.array(shift_np, dev)
output_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(C.shape), dtype=C.dtype),
dev,
)
f(input_tvm, filter_tvm, scale_tvm, shift_tvm, output_tvm)
tvm.testing.assert_allclose(output_np, output_tvm.numpy(), **tol)
class TestDepthwiseConv2D(BaseDepthwiseConv2D):
"""Test variety of parameters, defined in BaseDepthwiseConv2D. Also
has llvm-specific tests for workload padding."""
@tvm.testing.parametrize_targets("llvm")
def test_workload_padding(
self,
out_dtype,
layout,
input_shape,
filter_shape,
target,
ref_data,
stride,
padding,
dilation,
):
input_np, filter_np, scale_np, shift_np, output_np = ref_data
if layout == "NCHW":
_, _, out_height, out_width = output_np.shape
elif layout == "NHWC":
_, out_height, out_width, _ = output_np.shape
elif layout == "NCHWc":
_, _, out_height, out_width, _ = output_np.shape
Input = te.placeholder(input_shape, name="Input")
Filter = te.placeholder(filter_shape, name="Filter")
wkl = _get_workload(Input, Filter, (stride, stride), padding, dilation, out_dtype, layout)
# check if tile_ow candidates are the factors of the right output weight.
with tvm.target.Target(target):
cfg = autotvm.get_config()
_fallback_schedule(cfg, wkl)
ow_tile = np.prod(cfg["tile_ow"].size)
tvm.testing.assert_allclose(ow_tile, out_width)
class TestDepthwiseConv2D_MobilenetWorkloads(BaseDepthwiseConv2D):
"""Extra tests to verify functionality for workloads used by mobilenet."""
layout = tvm.testing.parameter("NCHW")
batch = tvm.testing.parameter(1)
channel_multiplier = tvm.testing.parameter(1)
kernel = tvm.testing.parameter(3)
padding = tvm.testing.parameter("SAME")
dilation = tvm.testing.parameter(1)
in_channel, in_size, stride = tvm.testing.parameters(
(32, 112, 1),
(64, 112, 2),
(128, 56, 1),
(128, 56, 2),
(256, 28, 1),
(256, 28, 2),
(512, 14, 1),
(512, 14, 2),
(1024, 7, 1),
)
@tvm.testing.parametrize_targets("llvm")
class TestDepthwiseConv2D_NCHWc(BaseDepthwiseConv2D):
"""Tests specific to NCHWc layouts.
Once the implementation supports channel_multiplier>1 and GPU
devices, this class can be merged into TestDepthwiseConv2D.
"""
# depthwise_conv2d_NCHWc currently does not support channel multiplier > 1
layout = tvm.testing.parameter("NCHWc")
(batch, in_channel, in_size, channel_multiplier, kernel, stride) = tvm.testing.parameters(
(1, 728, 32, 1, 3, 1),
)
@tvm.testing.parametrize_targets("llvm -device=arm_cpu -mtriple=aarch64-linux-gnu")
class TestDepthwiseConv2DArmCompile(BaseDepthwiseConv2D):
"""Compile-only tests for cross-compiling to ARM."""
layout = tvm.testing.parameter("NHWC", "NCHW")
batch = tvm.testing.parameter(1)
dilation = tvm.testing.parameter(1)
in_dtype, out_dtype = tvm.testing.parameters(("int16", "int32"))
in_channel = tvm.testing.parameter(728)
in_size = tvm.testing.parameter(32)
kernel = tvm.testing.parameter(1)
channel_multiplier = tvm.testing.parameter(1, 3)
stride = tvm.testing.parameter(1)
padding = tvm.testing.parameter("SAME")
use_scale_shift = tvm.testing.parameter(True, False, ids=["with_scale_shift", "no_scale_shift"])
run_after_compile = False
if __name__ == "__main__":
tvm.testing.main()
| 16,116 | 35.463801 | 100 | py |
tvm | tvm-main/tests/python/ci/test_script_converter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test the conversion of bash to rst
"""
import sys
import tvm
from tvm.contrib import utils
# this has to be after the sys.path patching, so ignore pylint
# pylint: disable=wrong-import-position,wrong-import-order
from .test_utils import REPO_ROOT
sys.path.insert(0, str(REPO_ROOT / "docs"))
from script_convert import bash_to_python, BASH, BASH_IGNORE, BASH_MULTILINE_COMMENT
# pylint: enable=wrong-import-position,wrong-import-order
def test_bash_cmd():
"""Test that a bash command gets turned into a rst code block"""
temp = utils.tempdir()
src_path = temp / "src.sh"
dest_path = temp / "dest.py"
with open(src_path, "w") as src_f:
src_f.write(BASH)
src_f.write("\n")
src_f.write("tvmc\n")
src_f.write(BASH)
src_f.write("\n")
bash_to_python(src_path, dest_path)
with open(dest_path, "r") as dest_f:
generated_cmd = dest_f.read()
expected_cmd = "# .. code-block:: bash\n" "#\n" "# \t tvmc\n" "#\n"
assert generated_cmd == expected_cmd
def test_bash_ignore_cmd():
"""Test that ignored bash commands are not turned into code blocks"""
temp = utils.tempdir()
src_path = temp / "src.sh"
dest_path = temp / "dest.py"
with open(src_path, "w") as src_f:
src_f.write("# start\n")
src_f.write(BASH_IGNORE)
src_f.write("\n")
src_f.write("tvmc\n")
src_f.write(BASH_IGNORE)
src_f.write("\n")
src_f.write("# end\n")
bash_to_python(src_path, dest_path)
with open(dest_path, "r") as dest_f:
generated_cmd = dest_f.read()
expected_cmd = "# start\n" "# end\n"
assert generated_cmd == expected_cmd
def test_no_command():
"""Test a file with no code blocks"""
temp = utils.tempdir()
src_path = temp / "src.sh"
dest_path = temp / "dest.py"
with open(src_path, "w") as src_f:
src_f.write("# start\n")
src_f.write("# description\n")
src_f.write("end\n")
bash_to_python(src_path, dest_path)
with open(dest_path, "r") as dest_f:
generated_cmd = dest_f.read()
expected_cmd = "# start\n" "# description\n" "end\n"
assert generated_cmd == expected_cmd
def test_text_and_bash_command():
"""Test a file with a bash code block"""
temp = utils.tempdir()
src_path = temp / "src.sh"
dest_path = temp / "dest.py"
with open(src_path, "w") as src_f:
src_f.write("# start\n")
src_f.write(BASH)
src_f.write("\n")
src_f.write("tvmc\n")
src_f.write(BASH)
src_f.write("\n")
src_f.write("# end\n")
bash_to_python(src_path, dest_path)
with open(dest_path, "r") as dest_f:
generated_cmd = dest_f.read()
expected_cmd = "# start\n" "# .. code-block:: bash\n" "#\n" "# \t tvmc\n" "#\n" "# end\n"
assert generated_cmd == expected_cmd
def test_last_line_break():
"""Test that line endings are correct"""
temp = utils.tempdir()
src_path = temp / "src.sh"
dest_path = temp / "dest.py"
with open(src_path, "w") as src_f:
src_f.write("# start\n")
src_f.write("# end\n")
bash_to_python(src_path, dest_path)
with open(dest_path, "r") as dest_f:
generated_cmd = dest_f.read()
expected_cmd = "# start\n" "# end\n"
assert generated_cmd == expected_cmd
def test_multiline_comment():
"""Test that bash comments are inserted correctly"""
temp = utils.tempdir()
src_path = temp / "src.sh"
dest_path = temp / "dest.py"
with open(src_path, "w") as src_f:
src_f.write(BASH_MULTILINE_COMMENT)
src_f.write("\n")
src_f.write('# """\n')
src_f.write("# comment\n")
src_f.write(BASH_MULTILINE_COMMENT)
src_f.write("\n")
bash_to_python(src_path, dest_path)
with open(dest_path, "r") as dest_f:
generated_cmd = dest_f.read()
expected_cmd = '"""\ncomment\n'
assert generated_cmd == expected_cmd
if __name__ == "__main__":
tvm.testing.main()
| 4,814 | 26.672414 | 94 | py |
tvm | tvm-main/tests/python/ci/test_tvmbot.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Test the @tvm-bot merge code
"""
import json
from pathlib import Path
from typing import Dict, Any
import tvm
from .test_utils import GITHUB_SCRIPT_ROOT, TempGit, run_script
SUCCESS_EXPECTED_OUTPUT = """
Dry run, would have merged with url=pulls/10786/merge and data={
"commit_title": "[Hexagon] 2-d allocation cleanup (#10786)",
"commit_message": "- Added device validity check in allocation. HexagonDeviceAPI should only be called for CPU/Hexagon types.\\n\\n- Check for \\"global.vtcm\\" scope instead of \\"vtcm\\". The ccope of N-d allocations produced by `LowerVtcmAlloc` should be `\\"global.vtcm\\"`. The previous check allowed unsupported scope such as `\\"local.vtcm\\"`.\\n\\n- Remove `vtcmallocs` entry after calling free.\\n\\nPreviously, the vtcm allocation map kept dangling pointers to `HexagonBuffer` objects after they had been freed.\\n\\n- Rename N-d alloc and free packed functions. Since most of the similar device functions use snake case, renaming `*.AllocND` to `*.alloc_nd` and `*.FreeND` to `*.free_nd`.\\n\\n\\ncc someone\\n\\n\\nCo-authored-by: Adam Straw <astraw@octoml.ai>",
"sha": "6f04bcf57d07f915a98fd91178f04d9e92a09fcd",
"merge_method": "squash"
}
""".strip()
class _TvmBotTest:
NUMBER = 10786
def preprocess_data(self, data: Dict[str, Any]):
"""
Used to pre-process PR data before running the test. Override as
necessary to edit data for specific test cases.
"""
return data
@tvm.testing.skip_if_wheel_test
def test(self, tmpdir_factory):
"""
Run the tvm-bot script using the data from preprocess_data
"""
mergebot_script = GITHUB_SCRIPT_ROOT / "github_tvmbot.py"
test_json_dir = Path(__file__).resolve().parent / "sample_prs"
with open(test_json_dir / f"pr{self.NUMBER}.json") as f:
test_data = json.load(f)
# Update testing data with replacements / additions
test_data = self.preprocess_data(test_data)
git = TempGit(tmpdir_factory.mktemp("tmp_git_dir"))
comment = {
"body": self.COMMENT,
"id": 123,
"user": {
"login": self.USER,
},
}
allowed_users = [{"login": "abc"}, {"login": "other-abc"}]
proc = run_script(
[
mergebot_script,
"--pr",
self.NUMBER,
"--dry-run",
"--run-url",
"https://example.com",
"--testing-pr-json",
json.dumps(test_data),
"--testing-collaborators-json",
json.dumps(allowed_users),
"--testing-mentionable-users-json",
json.dumps(allowed_users),
"--trigger-comment-json",
json.dumps(comment),
],
env={
"TVM_BOT_JENKINS_TOKEN": "123",
"GH_ACTIONS_TOKEN": "123",
},
cwd=git.cwd,
)
if self.EXPECTED not in proc.stderr:
raise RuntimeError(f"{proc.stderr}\ndid not contain\n{self.EXPECTED}")
class TestNoRequest(_TvmBotTest):
"""
A PR for which the mergebot runs but no merge is requested
"""
COMMENT = "@tvm-bot do something else"
USER = "abc"
EXPECTED = "Command 'do something else' did not match anything"
def preprocess_data(self, data: Dict[str, Any]):
data["reviews"]["nodes"][0]["body"] = "nothing"
return data
class TestSuccessfulMerge(_TvmBotTest):
"""
Everything is fine so this PR will merge
"""
COMMENT = "@tvm-bot merge"
USER = "abc"
EXPECTED = SUCCESS_EXPECTED_OUTPUT
class TestBadCI(_TvmBotTest):
"""
A PR which failed CI and cannot merge
"""
COMMENT = "@tvm-bot merge"
USER = "abc"
EXPECTED = "Cannot merge, these CI jobs are not successful on"
def preprocess_data(self, data: Dict[str, Any]):
# Mark the Jenkins build as failed
contexts = data["commits"]["nodes"][0]["commit"]["statusCheckRollup"]["contexts"]["nodes"]
for context in contexts:
if "context" in context and context["context"] == "tvm-ci/pr-head":
context["state"] = "FAILED"
return data
class TestOldReview(_TvmBotTest):
"""
A PR with passing CI and approving reviews on an old commit so it cannot merge
"""
COMMENT = "@tvm-bot merge"
USER = "abc"
EXPECTED = "Cannot merge, did not find any approving reviews"
def preprocess_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
data["reviews"]["nodes"][0]["commit"]["oid"] = "abc12345"
return data
class TestMissingJob(_TvmBotTest):
"""
PR missing an expected CI job and cannot merge
"""
COMMENT = "@tvm-bot merge"
USER = "abc"
EXPECTED = "Cannot merge, missing expected jobs"
def preprocess_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
contexts = data["commits"]["nodes"][0]["commit"]["statusCheckRollup"]["contexts"]["nodes"]
for context in contexts:
if "context" in context and context["context"] == "tvm-ci/pr-head":
context["context"] = "something"
return data
class TestInvalidAuthor(_TvmBotTest):
"""
Merge requester is not a committer and cannot merge
"""
COMMENT = "@tvm-bot merge"
USER = "not-abc"
EXPECTED = "Failed auth check 'collaborators', quitting"
class TestUnauthorizedComment(_TvmBotTest):
"""
Check that a merge comment not from a CONTRIBUTOR is rejected
"""
COMMENT = "@tvm-bot merge"
USER = "not-abc2"
EXPECTED = "Failed auth check 'collaborators'"
class TestNoReview(_TvmBotTest):
"""
Check that a merge request without any reviews is rejected
"""
COMMENT = "@tvm-bot merge"
USER = "abc"
EXPECTED = "Cannot merge, did not find any approving reviews from users with write access"
def preprocess_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
data["reviews"]["nodes"] = []
return data
class TestChangesRequested(_TvmBotTest):
"""
Check that a merge request with a 'Changes Requested' review is rejected
"""
COMMENT = "@tvm-bot merge"
USER = "abc"
EXPECTED = "Cannot merge, found [this review]"
def preprocess_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
data["reviews"]["nodes"][0]["state"] = "CHANGES_REQUESTED"
data["reviews"]["nodes"][0]["url"] = "http://example.com"
return data
class TestCoAuthors(_TvmBotTest):
"""
Check that a merge request with co-authors generates the correct commit message
"""
COMMENT = "@tvm-bot merge"
USER = "abc"
EXPECTED = "Co-authored-by: Some One <someone@email.com>"
def preprocess_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
data["authorCommits"]["nodes"][0]["commit"]["authors"]["nodes"].append(
{"name": "Some One", "email": "someone@email.com"}
)
return data
class TestRerunCI(_TvmBotTest):
"""
Start a new CI job
"""
COMMENT = "@tvm-bot rerun"
USER = "abc"
EXPECTED = "Rerunning ci with"
class TestRerunPermissions(_TvmBotTest):
"""
Start a new CI job as an unauthorized user
"""
COMMENT = "@tvm-bot rerun"
USER = "someone"
EXPECTED = "Failed auth check 'mentionable_users', quitting"
class TestRerunNonAuthor(_TvmBotTest):
"""
Start a new CI job as a mentionable user
"""
COMMENT = "@tvm-bot rerun"
USER = "other-abc"
EXPECTED = "Passed auth check 'mentionable_users', continuing"
class TestIgnoreJobs(_TvmBotTest):
"""
Ignore GitHub Actions jobs that don't start with CI /
"""
COMMENT = "@tvm-bot merge"
USER = "abc"
EXPECTED = "Dry run, would have merged"
if __name__ == "__main__":
tvm.testing.main()
| 8,713 | 30.010676 | 780 | py |
tvm | tvm-main/tests/python/ci/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Infrastructure and tests for CI scripts"""
| 831 | 45.222222 | 62 | py |
tvm | tvm-main/tests/python/ci/test_ci.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test various CI scripts and GitHub Actions workflows"""
import shutil
import subprocess
import json
import textwrap
import sys
import logging
from pathlib import Path
import pytest
import tvm.testing
from .test_utils import REPO_ROOT, GITHUB_SCRIPT_ROOT, JENKINS_SCRIPT_ROOT, TempGit, run_script
# pylint: disable=wrong-import-position,wrong-import-order
sys.path.insert(0, str(REPO_ROOT / "ci"))
sys.path.insert(0, str(JENKINS_SCRIPT_ROOT))
sys.path.insert(0, str(GITHUB_SCRIPT_ROOT))
import scripts.github
import scripts.jenkins
from scripts.github.update_branch import EXPECTED_CI_JOBS
# pylint: enable=wrong-import-position,wrong-import-order
def parameterize_named(**kwargs):
keys = next(iter(kwargs.values())).keys()
return pytest.mark.parametrize(
",".join(keys), [tuple(d.values()) for d in kwargs.values()], ids=kwargs.keys()
)
# pylint: disable=line-too-long
TEST_DATA_SKIPPED_BOT = {
"found-diff-no-additional": {
"main_xml_file": "unittest/file1.xml",
"main_xml_content": """<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102"
tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781">
<testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy"
name="test_sketch_search_policy_cuda_rpc_runner" time="9.679">
</testcase>
</testsuite>
</testsuites>
""",
"pr_xml_file": "unittest/file2.xml",
"pr_xml_content": """<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102"
tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781">
<testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy"
name="test_sketch_search_policy_cuda_rpc_runner" time="9.679">
<skipped message="This test is skipped" type="pytest.skip">
Skipped
</skipped>
</testcase>
<testcase classname="ctypes.tests.python.unittest.test_roofline"
name="test_estimate_peak_bandwidth[cuda]" time="4.679">
<skipped message="This is another skippe test" type="pytest.skip">
Skipped
</skipped>
</testcase>
</testsuite>
</testsuites>
""",
"additional_tests_to_check": """{
"unittest": ["dummy_class#dummy_test"],
"unittest_GPU": ["another_dummy_class#another_dummy_test"]
}
""",
"target_url": "https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect",
"s3_prefix": "tvm-jenkins-artifacts-prod",
"jenkins_prefix": "ci.tlcpack.ai",
"common_main_build": """{"build_number": "4115", "state": "success"}""",
"commit_sha": "sha1234",
"expected_body": "The list below shows tests that ran in main sha1234 but were skipped in the CI build of sha1234:\n```\nunittest -> ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner\nunittest -> ctypes.tests.python.unittest.test_roofline#test_estimate_peak_bandwidth[cuda]\n```\nA detailed report of ran tests is [here](https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/testReport/).",
},
"found-diff-skipped-additional": {
"main_xml_file": "unittest/file1.xml",
"main_xml_content": """<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102"
tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781">
<testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy"
name="test_sketch_search_policy_cuda_rpc_runner" time="9.679">
</testcase>
</testsuite>
</testsuites>
""",
"pr_xml_file": "unittest/file2.xml",
"pr_xml_content": """<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102"
tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781">
<testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy"
name="test_sketch_search_policy_cuda_rpc_runner" time="9.679">
<skipped message="This test is skipped" type="pytest.skip">
Skipped
</skipped>
</testcase>
<testcase classname="ctypes.tests.python.unittest.test_roofline"
name="test_estimate_peak_bandwidth[cuda]" time="4.679">
<skipped message="This is another skippe test" type="pytest.skip">
Skipped
</skipped>
</testcase>
</testsuite>
</testsuites>
""",
"additional_tests_to_check": """{
"unittest": ["ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner", "dummy_class#dummy_test"],
"unittest_GPU": ["another_dummy_class#another_dummy_test"]
}
""",
"target_url": "https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect",
"s3_prefix": "tvm-jenkins-artifacts-prod",
"jenkins_prefix": "ci.tlcpack.ai",
"common_main_build": """{"build_number": "4115", "state": "success"}""",
"commit_sha": "sha1234",
"expected_body": "The list below shows tests that ran in main sha1234 but were skipped in the CI build of sha1234:\n```\nunittest -> ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner\nunittest -> ctypes.tests.python.unittest.test_roofline#test_estimate_peak_bandwidth[cuda]\n```\n\nAdditional tests that were skipped in the CI build and present in the [`required_tests_to_run`](https://github.com/apache/tvm/blob/main/ci/scripts/github/required_tests_to_run.json) file:\n```\nunittest -> ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner\n```\nA detailed report of ran tests is [here](https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/testReport/).",
},
"no-diff": {
"main_xml_file": "unittest/file1.xml",
"main_xml_content": """<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102"
tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781">
<testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy"
name="test_sketch_search_policy_cuda_rpc_runner" time="9.679">
<skipped message="This test is skipped" type="pytest.skip">
Skipped
</skipped>
</testcase>
</testsuite>
</testsuites>
""",
"pr_xml_file": "unittest/file2.xml",
"pr_xml_content": """<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102"
tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781">
<testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy"
name="test_sketch_search_policy_cuda_rpc_runner" time="9.679">
<skipped message="This test is skipped" type="pytest.skip">
Skipped
</skipped>
</testcase>
</testsuite>
</testsuites>
""",
"additional_tests_to_check": """{
}
""",
"target_url": "https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect",
"s3_prefix": "tvm-jenkins-artifacts-prod",
"jenkins_prefix": "ci.tlcpack.ai",
"common_main_build": """{"build_number": "4115", "state": "success"}""",
"commit_sha": "sha1234",
"expected_body": "No diff in skipped tests with main found in this branch for commit sha1234.\nA detailed report of ran tests is [here](https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/testReport/).",
},
"no-diff-skipped-additional": {
"main_xml_file": "unittest/file1.xml",
"main_xml_content": """<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102"
tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781">
<testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy"
name="test_sketch_search_policy_cuda_rpc_runner" time="9.679">
<skipped message="This test is skipped" type="pytest.skip">
Skipped
</skipped>
</testcase>
</testsuite>
</testsuites>
""",
"pr_xml_file": "unittest/file2.xml",
"pr_xml_content": """<?xml version="1.0" encoding="utf-8"?>
<testsuites>
<testsuite errors="0" failures="0" hostname="13e7c5f749d8" name="python-unittest-gpu-0-shard-1-ctypes" skipped="102"
tests="165" time="79.312" timestamp="2022-08-10T22:39:36.673781">
<testcase classname="ctypes.tests.python.unittest.test_auto_scheduler_search_policy"
name="test_sketch_search_policy_cuda_rpc_runner" time="9.679">
<skipped message="This test is skipped" type="pytest.skip">
Skipped
</skipped>
</testcase>
</testsuite>
</testsuites>
""",
"additional_tests_to_check": """{
"unittest": ["dummy_class#dummy_test", "ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner"],
"unittest_GPU": ["another_dummy_class#another_dummy_test"]
}
""",
"target_url": "https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect",
"s3_prefix": "tvm-jenkins-artifacts-prod",
"jenkins_prefix": "ci.tlcpack.ai",
"common_main_build": """{"build_number": "4115", "state": "success"}""",
"commit_sha": "sha1234",
"expected_body": "No diff in skipped tests with main found in this branch for commit sha1234.\n\nAdditional tests that were skipped in the CI build and present in the [`required_tests_to_run`](https://github.com/apache/tvm/blob/main/ci/scripts/github/required_tests_to_run.json) file:\n```\nunittest -> ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner\n```\nA detailed report of ran tests is [here](https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/testReport/).",
},
"unable-to-run": {
"main_xml_file": "unittest/file1.xml",
"main_xml_content": """<?xml version="1.0" encoding="utf-8"?>
<testsuites>
</testsuites>
""",
"pr_xml_file": "unittest/file2.xml",
"pr_xml_content": """<?xml version="1.0" encoding="utf-8"?>
<testsuites>
</testsuites>
""",
"additional_tests_to_check": """{
"unittest": ["ctypes.tests.python.unittest.test_auto_scheduler_search_policy#test_sketch_search_policy_cuda_rpc_runner", "dummy_class#dummy_test"],
"unittest_GPU": ["another_dummy_class#another_dummy_test"]
}
""",
"target_url": "https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect",
"s3_prefix": "tvm-jenkins-artifacts-prod",
"jenkins_prefix": "ci.tlcpack.ai",
"common_main_build": """{"build_number": "4115", "state": "failed"}""",
"commit_sha": "sha1234",
"expected_body": "Unable to run tests bot because main failed to pass CI at sha1234.",
},
}
# pylint: enable=line-too-long
@tvm.testing.skip_if_wheel_test
@parameterize_named(**TEST_DATA_SKIPPED_BOT)
# pylint: enable=line-too-long
def test_skipped_tests_comment(
caplog,
tmpdir_factory,
main_xml_file,
main_xml_content,
pr_xml_file,
pr_xml_content,
additional_tests_to_check,
target_url,
s3_prefix,
jenkins_prefix,
common_main_build,
commit_sha,
expected_body,
):
"""
Test that a comment with a link to the docs is successfully left on PRs
"""
def write_xml_file(root_dir, xml_file, xml_content):
shutil.rmtree(root_dir, ignore_errors=True)
file = root_dir / xml_file
file.parent.mkdir(parents=True)
with open(file, "w") as f:
f.write(textwrap.dedent(xml_content))
git = TempGit(tmpdir_factory.mktemp("tmp_git_dir"))
pr_test_report_dir = Path(git.cwd) / "pr-reports"
write_xml_file(pr_test_report_dir, pr_xml_file, pr_xml_content)
main_test_report_dir = Path(git.cwd) / "main-reports"
write_xml_file(main_test_report_dir, main_xml_file, main_xml_content)
with open(Path(git.cwd) / "required_tests_to_run.json", "w") as f:
f.write(additional_tests_to_check)
pr_data = {
"commits": {
"nodes": [
{
"commit": {
"oid": commit_sha,
"statusCheckRollup": {
"contexts": {
"nodes": [
{
"context": "tvm-ci/pr-head",
"targetUrl": target_url,
}
]
}
},
}
}
]
}
}
with caplog.at_level(logging.INFO):
comment = scripts.github.github_skipped_tests_comment.get_skipped_tests_comment(
pr=pr_data,
github=None,
s3_prefix=s3_prefix,
jenkins_prefix=jenkins_prefix,
common_commit_sha=commit_sha,
pr_test_report_dir=pr_test_report_dir,
main_test_report_dir=main_test_report_dir,
common_main_build=json.loads(common_main_build),
additional_tests_to_check_file=Path(git.cwd) / "required_tests_to_run.json",
)
assert_in(expected_body, comment)
assert_in(f"with target {target_url}", caplog.text)
@tvm.testing.skip_if_wheel_test
@parameterize_named(
doc_link=dict(
target_url="https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect",
base_url="https://pr-docs.tlcpack.ai",
commit_sha="SHA",
expected_body="Built docs for commit SHA can be found "
"[here](https://pr-docs.tlcpack.ai/PR-11594/3/docs/index.html).",
)
)
def test_docs_comment(target_url, base_url, commit_sha, expected_body):
"""
Test that a comment with a link to the docs is successfully left on PRs
"""
pr_data = {
"commits": {
"nodes": [
{
"commit": {
"oid": commit_sha,
"statusCheckRollup": {
"contexts": {
"nodes": [
{
"context": "tvm-ci/pr-head",
"targetUrl": target_url,
}
]
}
},
}
}
]
}
}
comment = scripts.github.github_docs_comment.get_doc_url(
pr=pr_data,
base_docs_url=base_url,
)
assert_in(expected_body, comment)
@tvm.testing.skip_if_wheel_test
@parameterize_named(
cc_no_one=dict(
pr_body="abc", requested_reviewers=[], existing_review_users=[], expected_reviewers=[]
),
cc_abc=dict(
pr_body="cc @abc",
requested_reviewers=[],
existing_review_users=[],
expected_reviewers=["abc"],
),
bad_cc_line=dict(
pr_body="cc @", requested_reviewers=[], existing_review_users=[], expected_reviewers=[]
),
cc_multiple=dict(
pr_body="cc @abc @def",
requested_reviewers=[],
existing_review_users=[],
expected_reviewers=["abc", "def"],
),
with_existing=dict(
pr_body="some text cc @abc @def something else",
requested_reviewers=[],
existing_review_users=[],
expected_reviewers=["abc", "def"],
),
with_existing_split=dict(
pr_body="some text cc @abc @def something else\n\n another cc @zzz z",
requested_reviewers=[],
existing_review_users=[],
expected_reviewers=["abc", "def", "zzz"],
),
with_existing_request=dict(
pr_body="some text cc @abc @def something else\n\n another cc @zzz z",
requested_reviewers=["abc"],
existing_review_users=[],
expected_reviewers=["def", "zzz"],
),
with_existing_reviewers=dict(
pr_body="some text cc @abc @def something else\n\n another cc @zzz z",
requested_reviewers=["abc"],
existing_review_users=["abc"],
expected_reviewers=["def", "zzz"],
),
with_no_reviewers=dict(
pr_body="some text cc @abc @def something else\n\n another cc @zzz z",
requested_reviewers=[],
existing_review_users=["abc"],
expected_reviewers=["def", "zzz"],
),
)
def test_cc_reviewers(
tmpdir_factory, pr_body, requested_reviewers, existing_review_users, expected_reviewers
):
"""
Test that reviewers are added from 'cc @someone' messages in PRs
"""
reviewers_script = GITHUB_SCRIPT_ROOT / "github_cc_reviewers.py"
git = TempGit(tmpdir_factory.mktemp("tmp_git_dir"))
reviews = [{"user": {"login": r}} for r in existing_review_users]
requested_reviewers = [{"login": r} for r in requested_reviewers]
proc = run_script(
[reviewers_script, "--dry-run", "--testing-reviews-json", json.dumps(reviews)],
env={
"PR": json.dumps(
{"number": 1, "body": pr_body, "requested_reviewers": requested_reviewers}
)
},
cwd=git.cwd,
)
assert f"After filtering existing reviewers, adding: {expected_reviewers}" in proc.stdout
def generate_good_commit_status():
return list([{"context": context, "state": "SUCCESS"} for context in EXPECTED_CI_JOBS])
@parameterize_named(
# Missing expected gpu/branch test
missing_tvm_ci_branch=dict(
statuses=[
{
"context": "test",
"state": "SUCCESS",
}
],
expected_rc=1,
expected_output="No good commits found in the last 1 commits",
),
# Only has the right passing test
has_expected_test=dict(
statuses=generate_good_commit_status(),
expected_rc=0,
expected_output="Found last good commit: 123: hello",
),
# Check with many statuses
many_statuses=dict(
statuses=generate_good_commit_status()
+ [
{"context": "gpu/branch2", "state": "SUCCESS"},
{"context": "gpu/branch3", "state": "FAILED"},
],
expected_rc=1,
expected_output="No good commits found in the last 1 commits",
),
many_success_statuses=dict(
statuses=generate_good_commit_status()
+ [
{"context": "gpu/branch2", "state": "SUCCESS"},
{"context": "gpu/branch3", "state": "SUCCESS"},
],
expected_rc=0,
expected_output="Found last good commit: 123: hello",
),
)
def test_update_branch(tmpdir_factory, statuses, expected_rc, expected_output):
"""
Test that the last-successful branch script updates successfully
"""
update_script = GITHUB_SCRIPT_ROOT / "update_branch.py"
git = TempGit(tmpdir_factory.mktemp("tmp_git_dir"))
commit = {
"statusCheckRollup": {"contexts": {"nodes": statuses}},
"oid": "123",
"messageHeadline": "hello",
}
data = {
"data": {
"repository": {
"defaultBranchRef": {"target": {"history": {"edges": [], "nodes": [commit]}}}
}
}
}
proc = run_script(
[update_script, "--dry-run", "--testonly-json", json.dumps(data)],
cwd=git.cwd,
check=False,
)
if proc.returncode != expected_rc:
raise RuntimeError(f"Wrong return code:\nstdout:\n{proc.stdout}\n\nstderr:\n{proc.stderr}")
if expected_output not in proc.stdout:
raise RuntimeError(
f"Missing {expected_output}:\nstdout:\n{proc.stdout}\n\nstderr:\n{proc.stderr}"
)
# pylint: disable=line-too-long
@parameterize_named(
author_gate=dict(
pr_author="abc",
comments=[],
expected="Skipping comment for author abc",
),
new_comment=dict(
pr_author="driazati",
comments=[],
expected="No existing comment found",
),
update_comment=dict(
pr_author="driazati",
comments=[
{
"author": {"login": "github-actions"},
"databaseId": "comment456",
"body": "<!---bot-comment--> abc",
}
],
expected="PATCH to https://api.github.com/repos/apache/tvm/issues/comments/comment456",
),
new_body=dict(
pr_author="driazati",
comments=[],
expected="Commenting "
+ textwrap.dedent(
"""
<!---bot-comment-->
Thanks for contributing to TVM! Please refer to the contributing guidelines https://tvm.apache.org/docs/contribute/ for useful information and tips. Please request code reviews from [Reviewers](https://github.com/apache/incubator-tvm/blob/master/CONTRIBUTORS.md#reviewers) by @-ing them in a comment.
<!--bot-comment-ccs-start-->
* the cc<!--bot-comment-ccs-end--><!--bot-comment-skipped-tests-start-->
* the skipped tests<!--bot-comment-skipped-tests-end--><!--bot-comment-docs-start-->
* the docs<!--bot-comment-docs-end-->
"""
).strip(),
),
update_body=dict(
pr_author="driazati",
comments=[
{
"author": {"login": "github-actions"},
"databaseId": "comment456",
"body": textwrap.dedent(
"""
<!---bot-comment-->
Thanks for contributing to TVM! Please refer to the contributing guidelines https://tvm.apache.org/docs/contribute/ for useful information and tips. Please request code reviews from [Reviewers](https://github.com/apache/incubator-tvm/blob/master/CONTRIBUTORS.md#reviewers) by @-ing them in a comment.
<!--bot-comment-ccs-start-->
* the cc<!--bot-comment-ccs-end--><!--bot-comment-something-tests-start-->
* something else<!--bot-comment-something-tests-end--><!--bot-comment-docs-start-->
* the docs<!--bot-comment-docs-end-->
"""
).strip(),
}
],
expected="Commenting "
+ textwrap.dedent(
"""
<!---bot-comment-->
Thanks for contributing to TVM! Please refer to the contributing guidelines https://tvm.apache.org/docs/contribute/ for useful information and tips. Please request code reviews from [Reviewers](https://github.com/apache/incubator-tvm/blob/master/CONTRIBUTORS.md#reviewers) by @-ing them in a comment.
<!--bot-comment-ccs-start-->
* the cc<!--bot-comment-ccs-end--><!--bot-comment-something-tests-start-->
* something else<!--bot-comment-something-tests-end--><!--bot-comment-docs-start-->
* the docs<!--bot-comment-docs-end--><!--bot-comment-skipped-tests-start-->
* the skipped tests<!--bot-comment-skipped-tests-end-->
"""
).strip(),
),
)
# pylint: enable=line-too-long
def test_pr_comment(tmpdir_factory, pr_author, comments, expected):
"""
Test the PR commenting bot
"""
comment_script = GITHUB_SCRIPT_ROOT / "github_pr_comment.py"
git = TempGit(tmpdir_factory.mktemp("tmp_git_dir"))
target_url = "https://ci.tlcpack.ai/job/tvm/job/PR-11594/3/display/redirect"
commit = {
"commit": {
"oid": "sha1234",
"statusCheckRollup": {
"contexts": {
"nodes": [
{
"context": "tvm-ci/pr-head",
"targetUrl": target_url,
}
]
}
},
}
}
data = {
"[1] POST - https://api.github.com/graphql": {},
"[2] POST - https://api.github.com/graphql": {
"data": {
"repository": {
"pullRequest": {
"number": 1234,
"comments": {
"nodes": comments,
},
"author": {
"login": pr_author,
},
"commits": {
"nodes": [commit],
},
}
}
}
},
}
comments = {
"ccs": "the cc",
"docs": "the docs",
"skipped-tests": "the skipped tests",
}
proc = run_script(
[
comment_script,
"--dry-run",
"--test-data",
json.dumps(data),
"--test-comments",
json.dumps(comments),
"--pr",
"1234",
],
stderr=subprocess.STDOUT,
cwd=git.cwd,
)
assert_in(expected, proc.stdout)
@parameterize_named(
dont_skip_main=dict(
commands=[],
should_skip=False,
pr_title="[skip ci] test",
why="ci should not be skipped on main",
),
dont_skip_main_with_commit=dict(
commands=[
["commit", "--allow-empty", "--message", "[skip ci] commit 1"],
],
should_skip=False,
pr_title="[skip ci] test",
why="ci should not be skipped on main",
),
skip_on_new_branch=dict(
commands=[
["checkout", "-b", "some_new_branch"],
["commit", "--allow-empty", "--message", "[skip ci] commit 1"],
],
should_skip=True,
pr_title="[skip ci] test",
why="ci should be skipped on a branch with [skip ci] in the last commit",
),
no_skip_in_pr_title=dict(
commands=[
["checkout", "-b", "some_new_branch"],
["commit", "--allow-empty", "--message", "[skip ci] commit 1"],
],
should_skip=False,
pr_title="[no skip ci] test",
why="ci should not be skipped on a branch with "
"[skip ci] in the last commit but not the PR title",
),
skip_in_pr_title=dict(
commands=[
["checkout", "-b", "some_new_branch"],
["commit", "--allow-empty", "--message", "[skip ci] commit 1"],
["commit", "--allow-empty", "--message", "commit 2"],
],
should_skip=True,
pr_title="[skip ci] test",
why="ci should be skipped with [skip ci] in the PR title",
),
skip_in_pr_title_many_commits=dict(
commands=[
["checkout", "-b", "some_new_branch"],
["commit", "--allow-empty", "--message", "commit 1"],
["commit", "--allow-empty", "--message", "commit 2"],
["commit", "--allow-empty", "--message", "commit 3"],
["commit", "--allow-empty", "--message", "commit 4"],
],
should_skip=True,
pr_title="[skip ci] test",
why="ci should be skipped with [skip ci] in the PR title",
),
skip_anywhere_in_title=dict(
commands=[
["checkout", "-b", "some_new_branch"],
],
should_skip=True,
pr_title="[something][skip ci] test",
why="skip ci tag should work anywhere in title",
),
)
def test_skip_ci(tmpdir_factory, commands, should_skip, pr_title, why):
"""
Test that CI is skipped when it should be
"""
skip_ci_script = JENKINS_SCRIPT_ROOT / "git_skip_ci.py"
git = TempGit(tmpdir_factory.mktemp("tmp_git_dir"))
git.run("config", "user.name", "ci")
git.run("config", "user.email", "email@example.com")
git.run("commit", "--allow-empty", "--message", "base commit")
for command in commands:
git.run(*command)
pr_number = "1234"
proc = run_script(
[skip_ci_script, "--pr", pr_number, "--pr-title", pr_title],
cwd=git.cwd,
check=False,
)
expected = 0 if should_skip else 1
if proc.returncode != expected:
raise RuntimeError(
f"Unexpected return code {proc.returncode} "
f"(expected {expected}) in {why}:\n{proc.stdout}"
)
@parameterize_named(
no_file=dict(files=[], should_skip=True),
readme=dict(files=["README.md"], should_skip=True),
c_file=dict(files=["test.c"], should_skip=False),
c_and_readme=dict(files=["test.c", "README.md"], should_skip=False),
src_file_and_readme=dict(
files=["src/autotvm/feature_visitor.cc", "README.md"], should_skip=False
),
yaml_and_readme=dict(files=[".asf.yaml", "docs/README.md"], should_skip=True),
)
def test_skip_globs(tmpdir_factory, files, should_skip):
"""
Test that CI is skipped if only certain files are edited
"""
script = JENKINS_SCRIPT_ROOT / "git_skip_ci_globs.py"
git = TempGit(tmpdir_factory.mktemp("tmp_git_dir"))
proc = run_script(
[
script,
"--files",
",".join(files),
],
check=False,
cwd=git.cwd,
)
if should_skip:
assert proc.returncode == 0
else:
assert proc.returncode == 1
def all_time_keys(time):
return {
"updatedAt": time,
"lastEditedAt": time,
"createdAt": time,
"publishedAt": time,
}
@parameterize_named(
draft=dict(
pull_request={
"isDraft": True,
"number": 2,
},
check="Checking 0 of 1 fetched",
),
not_draft=dict(
pull_request={
"isDraft": False,
"number": 2,
},
check="Checking 0 of 1 fetched",
),
week_old=dict(
pull_request={
"number": 123,
"url": "https://github.com/apache/tvm/pull/123",
"body": "cc @someone",
"isDraft": False,
"author": {"login": "user"},
"reviews": {"nodes": []},
**all_time_keys("2022-01-18T17:54:19Z"),
"comments": {"nodes": []},
},
check="Pinging reviewers ['someone'] on https://github.com/apache/tvm/pull/123",
),
# Old comment, ping
old_comment=dict(
pull_request={
"number": 123,
"url": "https://github.com/apache/tvm/pull/123",
"body": "cc @someone",
"isDraft": False,
"author": {"login": "user"},
"reviews": {"nodes": []},
**all_time_keys("2022-01-18T17:54:19Z"),
"comments": {
"nodes": [
{
**all_time_keys("2022-01-18T17:54:19Z"),
"bodyText": "abc",
},
]
},
},
check="Pinging reviewers ['someone'] on https://github.com/apache/tvm/pull/123",
),
# New comment, don't ping
new_comment=dict(
pull_request={
"number": 123,
"url": "https://github.com/apache/tvm/pull/123",
"body": "cc @someone",
"isDraft": False,
"author": {"login": "user"},
"reviews": {"nodes": []},
**all_time_keys("2022-01-18T17:54:19Z"),
"comments": {
"nodes": [
{**all_time_keys("2022-01-27T17:54:19Z"), "bodyText": "abc"},
]
},
},
check="Not pinging PR 123",
),
)
def test_ping_reviewers(tmpdir_factory, pull_request, check):
"""
Test that reviewers are messaged after a time period of inactivity
"""
reviewers_script = GITHUB_SCRIPT_ROOT / "ping_reviewers.py"
git = TempGit(tmpdir_factory.mktemp("tmp_git_dir"))
data = {
"data": {
"repository": {
"pullRequests": {
"nodes": [pull_request],
"edges": [],
}
}
}
}
proc = run_script(
[
reviewers_script,
"--dry-run",
"--wait-time-minutes",
"1",
"--cutoff-pr-number",
"5",
"--pr-json",
json.dumps(data),
"--now",
"2022-01-26T17:54:19Z",
],
cwd=git.cwd,
)
assert_in(check, proc.stdout)
def assert_in(needle: str, haystack: str):
"""
Check that 'needle' is in 'haystack'
"""
if needle not in haystack:
raise AssertionError(f"item not found:\n{needle}\nin:\n{haystack}")
@tvm.testing.skip_if_wheel_test
@parameterize_named(
no_cc=dict(
source_type="ISSUE",
data={
"title": "A title",
"number": 1234,
"user": {
"login": "person5",
},
"labels": [{"name": "abc"}],
"body": textwrap.dedent(
"""
hello
""".strip()
),
},
check="No one to cc, exiting",
),
no_additional_cc=dict(
source_type="ISSUE",
data={
"title": "A title",
"number": 1234,
"user": {
"login": "person5",
},
"labels": [{"name": "abc"}],
"body": textwrap.dedent(
"""
hello
cc @test
""".strip()
),
},
check="No one to cc, exiting",
),
cc_update=dict(
source_type="ISSUE",
data={
"title": "A title",
"number": 1234,
"user": {
"login": "person5",
},
"labels": [{"name": "something"}],
"body": textwrap.dedent(
"""
hello
something"""
),
},
check="would have updated issues/1234 with {'body': "
"'\\nhello\\n\\nsomething\\n\\ncc @person1 @person2 @person4'}",
),
already_cced=dict(
source_type="ISSUE",
data={
"title": "A title",
"number": 1234,
"user": {
"login": "person5",
},
"labels": [{"name": "something"}],
"body": textwrap.dedent(
"""
hello
cc @person1 @person2 @person4"""
),
},
check="No one to cc, exiting",
),
not_already_cced=dict(
source_type="ISSUE",
data={
"title": "[something] A title",
"number": 1234,
"user": {
"login": "person5",
},
"labels": [{"name": "something2"}],
"body": textwrap.dedent(
"""
hello
something"""
),
},
check="would have updated issues/1234 with {'body': "
"'\\nhello\\n\\nsomething\\n\\ncc @person1 @person2 @person4'}",
),
no_new_ccs=dict(
source_type="ISSUE",
data={
"title": "[something] A title",
"number": 1234,
"user": {
"login": "person5",
},
"labels": [{"name": "something2"}],
"body": textwrap.dedent(
"""
hello
cc @person1 @person2 @person4"""
),
},
check="No one to cc, exiting",
),
mismatching_tags=dict(
source_type="PR",
data={
"title": "[something] A title",
"number": 1234,
"draft": False,
"user": {
"login": "person5",
},
"labels": [{"name": "something2"}],
"body": textwrap.dedent(
"""
hello
cc @person1 @person2 @person4"""
),
},
check="No one to cc, exiting",
),
draft_pr=dict(
source_type="PR",
data={
"title": "[something] A title",
"number": 1234,
"draft": True,
"user": {
"login": "person5",
},
"labels": [{"name": "something2"}],
"body": textwrap.dedent(
"""
hello
cc @person1 @person2 @person4"""
),
},
check="Terminating since 1234 is a draft",
),
edit_inplace=dict(
source_type="ISSUE",
data={
"title": "[something] A title",
"number": 1234,
"user": {
"login": "person5",
},
"labels": [{"name": "something2"}],
"body": "`mold` and `lld` can be a much faster alternative to `ld` from gcc. "
"We should modify our CMakeLists.txt to detect and use these when possible. cc @person1"
"\n\ncc @person4",
},
check="would have updated issues/1234 with {'body': '`mold` and `lld` can be a much"
" faster alternative to `ld` from gcc. We should modify our CMakeLists.txt to "
"detect and use these when possible. cc @person1\\n\\ncc @person2 @person4'}",
),
edit_out_of_place=dict(
source_type="ISSUE",
data={
"title": "[something3] A title",
"number": 1234,
"user": {
"login": "person5",
},
"labels": [{"name": "something2"}],
"body": "@person2 @SOME1-ONE-",
},
check="Dry run, would have updated issues/1234 with"
" {'body': '@person2 @SOME1-ONE-\\n\\ncc @person1'}",
),
atted_but_not_cced=dict(
source_type="ISSUE",
data={
"title": "[] A title",
"number": 1234,
"user": {
"login": "person5",
},
"labels": [],
"body": "@person2 @SOME1-ONE-",
},
check="No one to cc, exiting",
),
)
def test_github_tag_teams(tmpdir_factory, source_type, data, check):
"""
Check that individuals are tagged from team headers
"""
tag_script = GITHUB_SCRIPT_ROOT / "github_tag_teams.py"
git = TempGit(tmpdir_factory.mktemp("tmp_git_dir"))
issue_body = """
some text
[temporary] opt-in: @person5
- something: @person1 @person2
- something3: @person1 @person2 @SOME1-ONE-
- something else @person1 @person2
- something else2: @person1 @person2
- something-else @person1 @person2
"""
comment1 = """
another thing: @person3
another-thing @person3
"""
comment2 = """
something @person4
@person5
"""
teams = {
"data": {
"repository": {
"issue": {
"body": issue_body,
"comments": {"nodes": [{"body": comment1}, {"body": comment2}]},
}
}
}
}
env = {
source_type: json.dumps(data),
}
proc = run_script(
[
tag_script,
"--dry-run",
"--team-issue-json",
json.dumps(teams),
],
stderr=subprocess.STDOUT,
cwd=git.cwd,
env=env,
)
assert_in(check, proc.stdout)
@tvm.testing.skip_if_wheel_test
@parameterize_named(
same_tags=dict(
tlcpackstaging_body={
"results": [
{
"last_updated": "2022-06-01T00:00:00.123456Z",
"name": "123-123-abc",
},
]
},
tlcpack_body={
"results": [
{
"last_updated": "2022-06-01T00:00:00.123456Z",
"name": "123-123-abc",
},
]
},
expected="Tag names were the same, no update needed",
expected_images=[],
),
staging_update=dict(
tlcpackstaging_body={
"results": [
{
"last_updated": "2022-06-01T01:00:00.123456Z",
"name": "234-234-abc-staging",
},
{
"last_updated": "2022-06-01T00:00:00.123456Z",
"name": "456-456-abc",
},
]
},
tlcpack_body={
"results": [
{
"last_updated": "2022-06-01T00:00:00.123456Z",
"name": "123-123-abc",
},
]
},
expected="Using tlcpackstaging tag on tlcpack",
expected_images=[
'"tag": "tlcpack/ci-arm:456-456-abc"',
],
),
tlcpack_update=dict(
tlcpackstaging_body={
"results": [
{
"last_updated": "2022-06-01T00:00:00.123456Z",
"name": "123-123-abc",
},
]
},
tlcpack_body={
"results": [
{
"last_updated": "2022-06-01T00:01:00.123456Z",
"name": "234-234-abc",
},
]
},
expected="Found newer image, using: tlcpack",
expected_images=[
'"tag": "tlcpack/ci-arm:234-234-abc",',
],
),
)
def test_open_docker_update_pr(
tmpdir_factory, tlcpackstaging_body, tlcpack_body, expected, expected_images
):
"""Test workflow to open a PR to update Docker images"""
tag_script = JENKINS_SCRIPT_ROOT / "open_docker_update_pr.py"
git = TempGit(tmpdir_factory.mktemp("tmp_git_dir"))
git.run("config", "user.name", "ci")
git.run("config", "user.email", "email@example.com")
images = [
"ci_arm",
"ci_cortexm",
"ci_cpu",
"ci_gpu",
"ci_hexagon",
"ci_i386",
"ci_lint",
"ci_minimal",
"ci_riscv",
"ci_wasm",
]
docker_data = {}
for image in images:
docker_data[f"repositories/tlcpackstaging/{image}/tags"] = tlcpackstaging_body
docker_data[f"repositories/tlcpack/{image.replace('_', '-')}/tags"] = tlcpack_body
proc = run_script(
[
tag_script,
"--dry-run",
"--testing-docker-data",
json.dumps(docker_data),
],
cwd=git.cwd,
env={"GITHUB_TOKEN": "1234"},
stderr=subprocess.STDOUT,
)
for line in expected_images:
if line not in proc.stdout:
raise RuntimeError(f"Missing line {line} in output:\n{proc.stdout}")
assert_in(expected, proc.stdout)
@parameterize_named(
use_tlcpack=dict(
images=["ci_arm", "ci_lint"],
expected={
"ci_arm": "tlcpack/ci-arm:abc-abc-123",
"ci_lint": "tlcpack/ci-lint:abc-abc-234",
},
),
use_staging=dict(
images=["ci_arm2"],
expected={
"ci_arm2": "tlcpackstaging/ci_arm2:abc-abc-123",
},
),
)
def test_determine_docker_images(tmpdir_factory, images, expected):
"""Test script to decide whether to use tlcpack or tlcpackstaging for images"""
script = JENKINS_SCRIPT_ROOT / "determine_docker_images.py"
git_dir = tmpdir_factory.mktemp("tmp_git_dir")
docker_data = {
"repositories/tlcpack/ci-arm/tags/abc-abc-123": {},
"repositories/tlcpack/ci-lint/tags/abc-abc-234": {},
}
images_data = {
"ci_arm": "tlcpack/ci-arm:abc-abc-123",
"ci_lint": "tlcpack/ci-lint:abc-abc-234",
"ci_arm2": "tlcpack/ci-arm2:abc-abc-123",
}
run_script(
[
script,
"--testing-docker-data",
json.dumps(docker_data),
"--testing-images-data",
json.dumps(images_data),
"--base-dir",
git_dir,
]
+ images,
cwd=git_dir,
)
for expected_filename, expected_image in expected.items():
with open(Path(git_dir) / expected_filename) as f:
actual_image = f.read()
assert actual_image == expected_image
@parameterize_named(
invalid_name=dict(
changed_files=[],
name="abc",
check="Image abc is not using new naming scheme",
expected_code=1,
),
no_hash=dict(
changed_files=[], name="123-123-abc", check="No extant hash found", expected_code=1
),
no_changes=dict(
changed_files=[["test.txt"]],
name=None,
check="Did not find changes, no rebuild necessary",
expected_code=0,
),
docker_changes=dict(
changed_files=[["test.txt"], ["docker/test.txt"]],
name=None,
check="Found docker changes",
expected_code=2,
),
)
def test_should_rebuild_docker(tmpdir_factory, changed_files, name, check, expected_code):
"""
Check that the Docker images are built when necessary
"""
tag_script = JENKINS_SCRIPT_ROOT / "should_rebuild_docker.py"
git = TempGit(tmpdir_factory.mktemp("tmp_git_dir"))
git.run("config", "user.name", "ci")
git.run("config", "user.email", "email@example.com")
git_path = Path(git.cwd)
for i, commits in enumerate(changed_files):
for filename in commits:
path = git_path / filename
path.parent.mkdir(exist_ok=True, parents=True)
path.touch()
git.run("add", filename)
git.run("commit", "-m", f"message {i}")
if name is None:
ref = "HEAD"
if len(changed_files) > 1:
ref = f"HEAD~{len(changed_files) - 1}"
proc = git.run("rev-parse", ref, stdout=subprocess.PIPE)
last_hash = proc.stdout.strip()
name = f"123-123-{last_hash}"
docker_data = {
"repositories/tlcpack": {
"results": [
{
"name": "ci-something",
},
{
"name": "something-else",
},
],
},
"repositories/tlcpack/ci-something/tags": {
"results": [{"name": name}, {"name": name + "old"}],
},
}
proc = run_script(
[
tag_script,
"--testing-docker-data",
json.dumps(docker_data),
],
stderr=subprocess.STDOUT,
cwd=git.cwd,
check=False,
)
assert_in(check, proc.stdout)
assert proc.returncode == expected_code
@parameterize_named(
passing=dict(
title="[something] a change",
body="something",
expected="All checks passed",
expected_code=0,
),
period=dict(
title="[something] a change.",
body="something",
expected="trailing_period: FAILED",
expected_code=1,
),
empty_body=dict(
title="[something] a change",
body=None,
expected="non_empty: FAILED",
expected_code=1,
),
)
def test_pr_linter(title, body, expected, expected_code):
"""
Test the PR linter
"""
tag_script = JENKINS_SCRIPT_ROOT / "check_pr.py"
pr_data = {
"title": title,
"body": body,
}
proc = run_script(
[
tag_script,
"--pr",
1234,
"--pr-data",
json.dumps(pr_data),
],
check=False,
)
assert proc.returncode == expected_code
assert_in(expected, proc.stdout)
if __name__ == "__main__":
tvm.testing.main()
| 50,769 | 33.989662 | 763 | py |
tvm | tvm-main/tests/python/ci/test_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Constants used in various CI tests
"""
import subprocess
import pathlib
from typing import List, Any
REPO_ROOT = pathlib.Path(__file__).resolve().parent.parent.parent.parent
GITHUB_SCRIPT_ROOT = REPO_ROOT / "ci" / "scripts" / "github"
JENKINS_SCRIPT_ROOT = REPO_ROOT / "ci" / "scripts" / "jenkins"
class TempGit:
"""
A wrapper to run commands in a directory (specifically for use in CI tests)
"""
def __init__(self, cwd):
self.cwd = cwd
# Jenkins git is too old and doesn't have 'git init --initial-branch',
# so init and checkout need to be separate steps
self.run("init", stderr=subprocess.PIPE, stdout=subprocess.PIPE)
self.run("checkout", "-b", "main", stderr=subprocess.PIPE)
self.run("remote", "add", "origin", "https://github.com/apache/tvm.git")
def run(self, *args, **kwargs):
"""
Run a git command based on *args
"""
proc = subprocess.run(
["git"] + list(args), encoding="utf-8", cwd=self.cwd, check=False, **kwargs
)
if proc.returncode != 0:
raise RuntimeError(f"git command failed: '{args}'")
return proc
def run_script(command: List[Any], check: bool = True, **kwargs):
"""
Wrapper to run a script and print its output if there was an error
"""
command = [str(c) for c in command]
kwargs_to_send = {
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
"encoding": "utf-8",
}
kwargs_to_send.update(kwargs)
proc = subprocess.run(
command,
check=False,
**kwargs_to_send,
)
if check and proc.returncode != 0:
raise RuntimeError(f"Process failed:\nstdout:\n{proc.stdout}\n\nstderr:\n{proc.stderr}")
return proc
| 2,567 | 33.24 | 96 | py |
tvm | tvm-main/tests/scripts/task_build.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import shutil
import os
import logging
import sys
import multiprocessing
from pathlib import Path
# Hackery to enable importing of utils from ci/scripts/jenkins
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
sys.path.append(str(REPO_ROOT / "ci" / "scripts" / "jenkins"))
from cmd_utils import Sh, init_log, REPO_ROOT
if __name__ == "__main__":
init_log()
parser = argparse.ArgumentParser(description="List pytest nodeids for a folder")
parser.add_argument("--sccache-bucket", required=False, help="sccache bucket name")
parser.add_argument("--build-dir", default="build", help="build folder")
parser.add_argument("--cmake-target", help="optional build target")
args = parser.parse_args()
env = {"VTA_HW_PATH": str(Path(os.getcwd()) / "3rdparty" / "vta-hw")}
sccache_exe = shutil.which("sccache")
use_sccache = sccache_exe is not None
build_dir = Path(os.getcwd()) / args.build_dir
build_dir = build_dir.relative_to(REPO_ROOT)
if use_sccache:
if args.sccache_bucket:
env["SCCACHE_BUCKET"] = args.sccache_bucket
logging.info(f"Using sccache bucket: {args.sccache_bucket}")
else:
logging.info(f"No sccache bucket set, using local cache")
env["CXX"] = "/opt/sccache/c++"
env["CC"] = "/opt/sccache/cc"
else:
if sccache_exe is None:
reason = "'sccache' executable not found"
else:
reason = "<unknown>"
logging.info(f"Not using sccache, reason: {reason}")
sh = Sh(env)
if use_sccache:
sh.run("sccache --start-server", check=False)
logging.info("===== sccache stats =====")
sh.run("sccache --show-stats")
executors = int(os.environ.get("CI_NUM_EXECUTORS", 1))
build_platform = os.environ.get("PLATFORM", None)
nproc = multiprocessing.cpu_count()
available_cpus = nproc // executors
num_cpus = max(available_cpus, 1)
if build_platform == "i386":
sh.run("cmake ..", cwd=build_dir)
else:
sh.run("cmake -GNinja -DCMAKE_BUILD_TYPE=RelWithDebInfo ..", cwd=build_dir)
target = ""
if args.cmake_target:
target = args.cmake_target
verbose = os.environ.get("VERBOSE", "true").lower() in {"1", "true", "yes"}
ninja_args = [target, f"-j{num_cpus}"]
if verbose:
ninja_args.append("-v")
if build_platform == "i386":
if args.cmake_target:
sh.run(f"make {args.cmake_target} -j{num_cpus}", cwd=build_dir)
else:
sh.run(f"make -j{num_cpus}", cwd=build_dir)
else:
sh.run(f"cmake --build . -- " + " ".join(ninja_args), cwd=build_dir)
if use_sccache:
logging.info("===== sccache stats =====")
sh.run("sccache --show-stats")
| 3,603 | 33.653846 | 87 | py |
tvm | tvm-main/tests/scripts/ci.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import multiprocessing
import os
import getpass
import inspect
import argparse
import json
import shutil
import grp
import string
import random
import subprocess
import platform
import textwrap
import typing
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple, Callable, Union
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
SCRIPT_DIR = REPO_ROOT / ".ci-py-scripts"
NPROC = multiprocessing.cpu_count()
class col:
BLUE = "\033[94m"
CYAN = "\033[96m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
RESET = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def print_color(color: str, msg: str, bold: bool, **kwargs: Any) -> None:
if hasattr(sys.stdout, "isatty") and sys.stdout.isatty():
bold_code = col.BOLD if bold else ""
print(bold_code + color + msg + col.RESET, **kwargs)
else:
print(msg, **kwargs)
warnings: List[str] = []
def clean_exit(msg: str) -> None:
print_color(col.RED, msg, bold=True, file=sys.stderr)
for warning in warnings:
print_color(col.YELLOW, warning, bold=False, file=sys.stderr)
exit(1)
def cmd(commands: List[Any], **kwargs: Any):
commands = [str(s) for s in commands]
command_str = " ".join(commands)
print_color(col.BLUE, command_str, bold=True)
proc = subprocess.run(commands, **kwargs)
if proc.returncode != 0:
raise RuntimeError(f"Command failed: '{command_str}'")
return proc
def get_build_dir(name: str) -> str:
build_dir = REPO_ROOT / f"build-{name}"
return str(build_dir.relative_to(REPO_ROOT))
def check_docker():
executable = shutil.which("docker")
if executable is None:
clean_exit("'docker' executable not found, install it first (e.g. 'apt install docker.io')")
if sys.platform == "linux":
# Check that the user is in the docker group before running
try:
group = grp.getgrnam("docker")
if getpass.getuser() not in group.gr_mem:
warnings.append(
f"Note: User '{getpass.getuser()}' is not in the 'docker' group, either:\n"
" * run with 'sudo'\n"
" * add user to 'docker': sudo usermod -aG docker $(whoami), then log out and back in",
)
except KeyError:
warnings.append("Note: 'docker' group does not exist")
def check_gpu():
if not (sys.platform == "linux" and shutil.which("lshw")):
# Can't check GPU on non-Linux platforms
return
# See if we can check if a GPU is present in case of later failures,
# but don't block on execution since this isn't critical
try:
proc = cmd(
["lshw", "-json", "-C", "display"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
stdout = proc.stdout.strip().strip(",")
stdout = json.loads(stdout)
except (subprocess.CalledProcessError, json.decoder.JSONDecodeError):
# Do nothing if any step failed
return
if isinstance(stdout, dict):
# Sometimes lshw outputs a single item as a dict instead of a list of
# dicts, so wrap it up if necessary
stdout = [stdout]
if not isinstance(stdout, list):
return
vendors = [s.get("vendor", "").lower() for s in stdout]
if not any("nvidia" in vendor for vendor in vendors):
warnings.append(
"nvidia GPU not found in 'lshw', maybe use --cpu flag when running 'docs' command?"
)
def gen_name(s: str) -> str:
# random 4 letters
suffix = "".join([random.choice(string.ascii_lowercase) for i in range(5)])
return f"{s}-{suffix}"
def docker(
name: str,
image: str,
scripts: List[str],
env: Dict[str, str],
interactive: bool,
additional_flags: Optional[Dict[str, str]] = None,
):
"""
Invoke a set of bash scripts through docker/bash.sh
name: container name
image: docker image name
scripts: list of bash commands to run
env: environment to set
"""
check_docker()
# As sccache is added to these images these can be uncommented
sccache_images = {
# "ci_lint",
"ci_gpu",
"ci_cpu",
# "ci_wasm",
# "ci_i386",
"ci_cortexm",
"ci_arm",
"ci_hexagon",
"ci_riscv",
"ci_adreno",
}
if image in sccache_images and os.getenv("USE_SCCACHE", "1") == "1":
scripts = [
"sccache --start-server",
] + scripts
# Set the C/C++ compiler so CMake picks them up in the build
env["CC"] = "/opt/sccache/cc"
env["CXX"] = "/opt/sccache/c++"
env["SCCACHE_CACHE_SIZE"] = os.getenv("SCCACHE_CACHE_SIZE", "50G")
env["SCCACHE_SERVER_PORT"] = os.getenv("SCCACHE_SERVER_PORT", "4226")
docker_bash = REPO_ROOT / "docker" / "bash.sh"
command = [docker_bash]
if sys.stdout.isatty():
command.append("-t")
command.append("--name")
command.append(name)
if interactive:
command.append("-i")
scripts = ["interact() {", " bash", "}", "trap interact 0", ""] + scripts
for key, value in env.items():
command.append("--env")
command.append(f"{key}={value}")
if additional_flags is not None:
for key, value in additional_flags.items():
command.append(key)
command.append(value)
SCRIPT_DIR.mkdir(exist_ok=True)
script_file = SCRIPT_DIR / f"{name}.sh"
with open(script_file, "w") as f:
f.write("set -eux\n\n")
f.write("\n".join(scripts))
f.write("\n")
command += [image, "bash", str(script_file.relative_to(REPO_ROOT))]
try:
cmd(command)
except RuntimeError as e:
clean_exit(f"Error invoking Docker: {e}")
except KeyboardInterrupt:
cmd(["docker", "stop", "--time", "1", name])
finally:
if os.getenv("DEBUG", "0") != "1":
script_file.unlink()
def docs(
tutorial_pattern: Optional[str] = None,
cpu: bool = False,
full: bool = False,
interactive: bool = False,
skip_build: bool = False,
docker_image: Optional[str] = None,
) -> None:
"""
Build the documentation from gallery/ and docs/. By default this builds only
the Python docs without any tutorials.
arguments:
full -- Build all language docs, not just Python (cannot be used with --cpu)
cpu -- Use the 'ci_cpu' Docker image (useful for building docs on a machine without a GPU)
tutorial-pattern -- Regex for which tutorials to execute when building docs (cannot be used with --cpu)
skip_build -- skip build and setup scripts
interactive -- start a shell after running build / test scripts
docker-image -- manually specify the docker image to use
"""
build_dir = get_build_dir("gpu")
extra_setup = []
image = "ci_gpu" if docker_image is None else docker_image
if cpu:
# TODO: Change this to tlcpack/docs once that is uploaded
image = "ci_cpu" if docker_image is None else docker_image
build_dir = get_build_dir("cpu")
config_script = " && ".join(
[
f"mkdir -p {build_dir}",
f"pushd {build_dir}",
"cp ../cmake/config.cmake .",
# The docs import tvm.micro, so it has to be enabled in the build
"echo set\(USE_MICRO ON\) >> config.cmake",
"popd",
]
)
# These are taken from the ci-gpu image via pip freeze, consult that
# if there are any changes: https://github.com/apache/tvm/tree/main/docs#native
requirements = [
"Sphinx==4.2.0",
"tlcpack-sphinx-addon==0.2.1",
"image==1.5.33",
# Temporary git link until a release is published
"git+https://github.com/sphinx-gallery/sphinx-gallery.git@6142f1791151849b5bec4bf3959f75697ba226cd",
"sphinx-rtd-theme==1.0.0",
"matplotlib==3.3.4",
"commonmark==0.9.1",
"Pillow==8.3.2",
"autodocsumm==0.2.7",
"docutils==0.16",
]
extra_setup = [
"python3 -m pip install " + " ".join(requirements),
]
else:
check_gpu()
config_script = f"./tests/scripts/task_config_build_gpu.sh {build_dir}"
scripts = extra_setup + [
config_script,
f"./tests/scripts/task_build.py --build-dir {build_dir}",
]
if skip_build:
scripts = []
scripts.append("./tests/scripts/task_python_docs.sh")
if tutorial_pattern is None:
tutorial_pattern = os.getenv("TVM_TUTORIAL_EXEC_PATTERN", ".py" if full else "none")
env = {
"TVM_TUTORIAL_EXEC_PATTERN": tutorial_pattern,
"PYTHON_DOCS_ONLY": "0" if full else "1",
"IS_LOCAL": "1",
"TVM_LIBRARY_PATH": str(REPO_ROOT / build_dir),
}
docker(name=gen_name("docs"), image=image, scripts=scripts, env=env, interactive=interactive)
print_color(
col.GREEN,
"Done building the docs. You can view them by running "
"'python3 tests/scripts/ci.py serve-docs' and visiting:"
" http://localhost:8000 in your browser.",
bold=True,
)
def serve_docs(directory: str = "_docs") -> None:
"""
Serve the docs using Python's http server
arguments:
directory -- Directory to serve from
"""
directory_path = Path(directory)
if not directory_path.exists():
clean_exit("Docs have not been built, run 'ci.py docs' first")
cmd([sys.executable, "-m", "http.server"], cwd=directory_path)
def lint(interactive: bool = False, fix: bool = False, docker_image: Optional[str] = None) -> None:
"""
Run CI's Sanity Check step
arguments:
interactive -- start a shell after running build / test scripts
fix -- where possible (currently black and clang-format) edit files in place with formatting fixes
docker-image -- manually specify the docker image to use
"""
env = {}
if fix:
env["IS_LOCAL"] = "true"
env["INPLACE_FORMAT"] = "true"
docker(
name=gen_name(f"ci-lint"),
image="ci_lint" if docker_image is None else docker_image,
scripts=["./tests/scripts/task_lint.sh"],
env=env,
interactive=interactive,
)
Option = Tuple[str, List[str]]
def generate_command(
name: str,
options: Dict[str, Option],
help: str,
precheck: Optional[Callable[[], None]] = None,
post_build: Optional[List[str]] = None,
additional_flags: Optional[Dict[str, str]] = None,
):
"""
Helper to generate CLIs that:
1. Build a with a config matching a specific CI Docker image (e.g. 'cpu')
2. Run tests (either a pre-defined set from scripts or manually via invoking
pytest)
3. (optional) Drop down into a terminal into the Docker container
"""
def fn(
tests: Optional[List[str]],
skip_build: bool = False,
interactive: bool = False,
docker_image: Optional[str] = None,
verbose: bool = False,
**kwargs,
) -> None:
"""
arguments:
tests -- pytest test IDs (e.g. tests/python or tests/python/a_file.py::a_test[param=1])
skip_build -- skip build and setup scripts
interactive -- start a shell after running build / test scripts
docker-image -- manually specify the docker image to use
verbose -- run verbose build
"""
if precheck is not None:
precheck()
build_dir = get_build_dir(name)
if skip_build:
scripts = []
else:
scripts = [
f"./tests/scripts/task_config_build_{name}.sh {build_dir}",
f"./tests/scripts/task_build.py --build-dir {build_dir}",
]
if post_build is not None:
scripts += post_build
# Check that a test suite was not used alongside specific test names
if any(v for v in kwargs.values()) and tests is not None:
option_flags = ", ".join([f"--{k}" for k in options.keys()])
clean_exit(f"{option_flags} cannot be used with --tests")
if tests is not None:
scripts.append(f"python3 -m pytest {' '.join(tests)}")
# Add named test suites
for option_name, (_, extra_scripts) in options.items():
if kwargs.get(option_name, False):
scripts.extend(script.format(build_dir=build_dir) for script in extra_scripts)
docker(
name=gen_name(f"ci-{name}"),
image=f"ci_{name}" if docker_image is None else docker_image,
scripts=scripts,
env={
# Need to specify the library path manually or else TVM can't
# determine which build directory to use (i.e. if there are
# multiple copies of libtvm.so laying around)
"TVM_LIBRARY_PATH": str(REPO_ROOT / get_build_dir(name)),
"VERBOSE": "true" if verbose else "false",
},
interactive=interactive,
additional_flags=additional_flags,
)
fn.__name__ = name
return fn, options, help
def check_arm_qemu() -> None:
"""
Check if a machine is ready to run an ARM Docker image
"""
machine = platform.machine().lower()
if "arm" in machine or "aarch64" in machine:
# No need to check anything if the machine runs ARM
return
binfmt = Path("/proc/sys/fs/binfmt_misc")
if not binfmt.exists() or len(list(binfmt.glob("qemu-*"))) == 0:
clean_exit(
textwrap.dedent(
"""
You must run a one-time setup to use ARM containers on x86 via QEMU:
sudo apt install -y qemu binfmt-support qemu-user-static
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
See https://www.stereolabs.com/docs/docker/building-arm-container-on-x86/ for details""".strip(
"\n"
)
)
)
def cli_name(s: str) -> str:
return s.replace("_", "-")
def typing_get_origin(annotation):
if sys.version_info >= (3, 8):
return typing.get_origin(annotation)
else:
return annotation.__origin__
def typing_get_args(annotation):
if sys.version_info >= (3, 8):
return typing.get_args(annotation)
else:
return annotation.__args__
def is_optional_type(annotation):
return (
hasattr(annotation, "__origin__")
and (typing_get_origin(annotation) == typing.Union)
and (type(None) in typing_get_args(annotation))
)
def add_subparser(
func: Callable,
subparsers: Any,
options: Optional[Dict[str, Option]] = None,
help: Optional[str] = None,
) -> Any:
"""
Utility function to make it so subparser commands can be defined locally
as a function rather than directly via argparse and manually dispatched
out.
"""
# Each function is intended follow the example for arguments in PEP257, so
# split apart the function documentation from the arguments
split = [s.strip() for s in func.__doc__.split("arguments:\n")]
if len(split) == 1:
args_help = None
command_help = split[0]
else:
command_help, args_help = split
if help is not None:
command_help = help
# Parse out the help text for each argument if present
arg_help_texts = {}
if args_help is not None:
for line in args_help.split("\n"):
line = line.strip()
name, help_text = [t.strip() for t in line.split(" -- ")]
arg_help_texts[cli_name(name)] = help_text
subparser = subparsers.add_parser(cli_name(func.__name__), help=command_help)
seen_prefixes = set()
# Add each parameter to the subparser
signature = inspect.signature(func)
for name, value in signature.parameters.items():
if name == "kwargs":
continue
arg_cli_name = cli_name(name)
kwargs: Dict[str, Union[str, bool]] = {"help": arg_help_texts[arg_cli_name]}
is_optional = is_optional_type(value.annotation)
if is_optional:
arg_type = typing_get_args(value.annotation)[0]
else:
arg_type = value.annotation
# Grab the default value if present
has_default = False
if value.default is not value.empty:
kwargs["default"] = value.default
has_default = True
# Check if it should be a flag
if arg_type is bool:
kwargs["action"] = "store_true"
else:
kwargs["required"] = not is_optional and not has_default
if str(arg_type).startswith("typing.List"):
kwargs["action"] = "append"
if arg_cli_name[0] not in seen_prefixes:
subparser.add_argument(f"-{arg_cli_name[0]}", f"--{arg_cli_name}", **kwargs)
seen_prefixes.add(arg_cli_name[0])
else:
subparser.add_argument(f"--{arg_cli_name}", **kwargs)
if options is not None:
for option_name, (help, _) in options.items():
option_cli_name = cli_name(option_name)
if option_cli_name[0] not in seen_prefixes:
subparser.add_argument(
f"-{option_cli_name[0]}", f"--{option_cli_name}", action="store_true", help=help
)
seen_prefixes.add(option_cli_name[0])
else:
subparser.add_argument(f"--{option_cli_name}", action="store_true", help=help)
return subparser
CPP_UNITTEST = ("run c++ unitests", ["./tests/scripts/task_cpp_unittest.sh {build_dir}"])
generated = [
generate_command(
name="gpu",
help="Run GPU build and test(s)",
options={
"cpp": CPP_UNITTEST,
"topi": ("run topi tests", ["./tests/scripts/task_python_topi.sh"]),
"unittest": (
"run unit tests",
[
"./tests/scripts/task_java_unittest.sh",
"./tests/scripts/task_opencl_cpp_unittest.sh",
"./tests/scripts/task_python_unittest_gpuonly.sh",
"./tests/scripts/task_python_integration_gpuonly.sh",
],
),
"frontend": ("run frontend tests", ["./tests/scripts/task_python_frontend.sh"]),
},
),
generate_command(
name="cpu",
help="Run CPU build and test(s)",
options={
"cpp": CPP_UNITTEST,
"integration": (
"run integration tests",
["./tests/scripts/task_python_integration.sh"],
),
"unittest": (
"run unit tests",
[
"./tests/scripts/task_python_unittest.sh",
"./tests/scripts/task_python_vta_fsim.sh",
"./tests/scripts/task_python_vta_tsim.sh",
],
),
"frontend": ("run frontend tests", ["./tests/scripts/task_python_frontend_cpu.sh"]),
},
),
generate_command(
name="minimal",
help="Run minimal CPU build and test(s)",
options={
"cpp": CPP_UNITTEST,
"unittest": (
"run unit tests",
[
"./tests/scripts/task_python_unittest.sh",
],
),
},
),
generate_command(
name="i386",
help="Run i386 build and test(s)",
options={
"cpp": CPP_UNITTEST,
"integration": (
"run integration tests",
[
"./tests/scripts/task_python_unittest.sh",
"./tests/scripts/task_python_integration_i386only.sh",
],
),
},
),
generate_command(
name="wasm",
help="Run WASM build and test(s)",
options={
"cpp": CPP_UNITTEST,
"test": ("run WASM tests", ["./tests/scripts/task_web_wasm.sh"]),
},
),
generate_command(
name="cortexm",
help="Run Cortex-M build and test(s)",
options={
"cpp": CPP_UNITTEST,
"test": (
"run microTVM tests",
[
"./tests/scripts/task_python_microtvm.sh",
"./tests/scripts/task_demo_microtvm.sh",
],
),
},
),
generate_command(
name="hexagon",
help="Run Hexagon build and test(s)",
post_build=["./tests/scripts/task_build_hexagon_api.sh --output build-hexagon"],
options={
"cpp": CPP_UNITTEST,
"test": (
"run Hexagon API/Python tests",
[
"./tests/scripts/task_python_hexagon.sh",
],
),
},
),
generate_command(
name="arm",
help="Run ARM build and test(s) (native or via QEMU on x86)",
precheck=check_arm_qemu,
options={
"cpp": CPP_UNITTEST,
"python": (
"run full Python tests",
[
"./tests/scripts/task_python_unittest.sh",
"./tests/scripts/task_python_arm_compute_library.sh",
],
),
},
),
generate_command(
name="riscv",
help="Run RISC-V build and test(s)",
options={
"cpp": CPP_UNITTEST,
"python": (
"run full Python tests",
[
"./tests/scripts/task_riscv_microtvm.sh",
],
),
},
),
generate_command(
name="adreno",
help="Run Adreno build and test(s)",
post_build=["./tests/scripts/task_build_adreno_bins.sh"],
additional_flags={
"--volume": os.environ.get("ADRENO_OPENCL", "") + ":/adreno-opencl",
"--env": "ADRENO_OPENCL=/adreno-opencl",
"--net": "host",
},
options={
"test": (
"run Adreno API/Python tests",
[
"./tests/scripts/task_python_adreno.sh " + os.environ.get("ANDROID_SERIAL", ""),
],
),
"benchmarks": (
"run Adreno Benchmarks (Native OpenCL, CLML SDK)",
[
"./apps/benchmark/adreno/bench.sh texture "
+ os.environ.get("ANDROID_SERIAL", ""),
"./apps/benchmark/adreno/bench.sh clml " + os.environ.get("ANDROID_SERIAL", ""),
],
),
"nativebenchmarks": (
"run Adreno Texture Benchmarks",
[
"./apps/benchmark/adreno/bench.sh texture "
+ os.environ.get("ANDROID_SERIAL", ""),
],
),
"clmlbenchmarks": (
"run Adreno CLML SDK Benchmarks",
[
"./apps/benchmark/adreno/bench.sh clml " + os.environ.get("ANDROID_SERIAL", ""),
],
),
},
),
]
def main():
description = """
Run CI jobs locally via Docker. This facilitates reproducing CI failures for
fast iteration. Note that many of the Docker images required are large (the
CPU and GPU images are both over 25GB) and may take some time to download on first use.
"""
parser = argparse.ArgumentParser(description=description)
subparsers = parser.add_subparsers(dest="command")
commands = {}
# Add manually defined commands
for func in [docs, serve_docs, lint]:
add_subparser(func, subparsers)
commands[cli_name(func.__name__)] = func
# Add generated commands
for func, options, help in generated:
add_subparser(func, subparsers, options, help)
commands[cli_name(func.__name__)] = func
args = parser.parse_args()
if args.command is None:
# Command not found in list, error out
parser.print_help()
exit(1)
func = commands[args.command]
# Extract out the parsed args and invoke the relevant function
kwargs = {k: getattr(args, k) for k in dir(args) if not k.startswith("_") and k != "command"}
func(**kwargs)
if __name__ == "__main__":
main()
| 25,368 | 30.950882 | 112 | py |
tvm | tvm-main/tests/scripts/request_hook/request_hook.py | #!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import urllib.request
import logging
from urllib.parse import quote
LOGGER = None
# To update this list, run https://github.com/apache/tvm/actions/workflows/upload_ci_resource.yml
# with the URL to download and the SHA-256 hash of the file.
BASE = "https://tvm-ci-resources.s3.us-west-2.amazonaws.com"
URL_MAP = {
"http://data.mxnet.io.s3-website-us-west-1.amazonaws.com/data/val_256_q90.rec": f"{BASE}/mxnet-val_256_q90.rec",
"http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel": f"{BASE}/bvlc_alexnet.caffemodel",
"http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel": f"{BASE}/bvlc_googlenet.caffemodel",
"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz": f"{BASE}/tf-mobilenet_v1_1.0_224.tgz",
"http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz": f"{BASE}/models/object_detection/ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz",
"http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz": f"{BASE}/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz",
"http://images.cocodataset.org/zips/val2017.zip": f"{BASE}/cocodataset-val2017.zip",
"http://pjreddie.com/media/files/alexnet.weights?raw=true": f"{BASE}/media/files/alexnet.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/alexnet.weights?raw=true": f"{BASE}/media/files/alexnet.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/extraction.weights?raw=true": f"{BASE}/media/files/extraction.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/extraction.weights?raw=true": f"{BASE}/media/files/extraction.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/resnet50.weights?raw=true": f"{BASE}/media/files/resnet50.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/resnext50.weights?raw=true": f"{BASE}/media/files/resnext50.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/yolov2.weights?raw=true": f"{BASE}/media/files/yolov2.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/yolov3.weights?raw=true": f"{BASE}/media/files/yolov3.weights"
+ quote("?raw=true"),
"http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz": f"{BASE}/imikolov/rnnlm/simple-examples.tgz",
"https://bj.bcebos.com/x2paddle/models/paddle_resnet50.tar": f"{BASE}/bcebos-paddle_resnet50.tar",
"https://data.deepai.org/stanfordcars.zip": f"{BASE}/deepai-stanfordcars.zip",
"https://download.pytorch.org/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth": f"{BASE}/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth",
"https://github.com/ARM-software/ML-zoo/blob/48f458af1e9065d9aad2ad94d24b58d6e7c00817/models/keyword_spotting/ds_cnn_small/tflite_int16/ds_cnn_quantized.tflite?raw=true": f"{BASE}/ARM-software/ML-zoo/blob/48f458af1e9065d9aad2ad94d24b58d6e7c00817/models/keyword_spotting/ds_cnn_small/tflite_int16/ds_cnn_quantized.tflite"
+ quote("?raw=true"),
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/adreno_v0.01.log": f"{BASE}/tlc-pack/tophub/main/tophub/adreno_v0.01.log",
"https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel": f"{BASE}/2022-10-05/MobileNet.mlmodel",
"https://docs-assets.developer.apple.com/coreml/models/Resnet50.mlmodel": f"{BASE}/coreml/models/Resnet50.mlmodel",
"https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth": f"{BASE}/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth",
"https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth": f"{BASE}/models/deeplabv3_resnet101_coco-586e9e4e.pth",
"https://download.pytorch.org/models/densenet121-a639ec97.pth": f"{BASE}/models/densenet121-a639ec97.pth",
"https://download.pytorch.org/models/efficientnet_b4_rwightman-7eb33cd5.pth": f"{BASE}/models/efficientnet_b4_rwightman-7eb33cd5.pth",
"https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth": f"{BASE}/models/fcn_resnet101_coco-7ecb50ca.pth",
"https://download.pytorch.org/models/googlenet-1378be20.pth": f"{BASE}/models/googlenet-1378be20.pth",
"https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth": f"{BASE}/models/inception_v3_google-0cc3c7bd.pth",
"https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth": f"{BASE}/2022-10-05/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth",
"https://download.pytorch.org/models/mnasnet0.5_top1_67.823-3ffadce67e.pth": f"{BASE}/models/mnasnet0.5_top1_67.823-3ffadce67e.pth",
"https://download.pytorch.org/models/mobilenet_v2-b0353104.pth": f"{BASE}/2022-10-05/mobilenet_v2-b0353104.pth",
"https://download.pytorch.org/models/r3d_18-b3b3357e.pth": f"{BASE}/models/r3d_18-b3b3357e.pth",
"https://download.pytorch.org/models/resnet18-f37072fd.pth": f"{BASE}/2022-10-05/resnet18-f37072fd.pth",
"https://download.pytorch.org/models/resnet50-0676ba61.pth": f"{BASE}/models/resnet50-0676ba61.pth",
"https://download.pytorch.org/models/squeezenet1_0-b66bff10.pth": f"{BASE}/models/squeezenet1_0-b66bff10.pth",
"https://download.pytorch.org/models/squeezenet1_1-b8a52dc0.pth": f"{BASE}/models/squeezenet1_1-b8a52dc0.pth",
"https://download.pytorch.org/models/vgg16_features-amdegroot-88682ab5.pth": f"{BASE}/models/vgg16_features-amdegroot-88682ab5.pth",
"https://gist.github.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/93672b029103648953c4e5ad3ac3aadf346a4cdc/super_resolution_0.2.onnx": f"{BASE}/2022-10-05/super_resolution_0.2.onnx",
"https://gist.githubusercontent.com/zhreshold/4d0b62f3d01426887599d4f7ede23ee5/raw/596b27d23537e5a1b5751d2b0481ef172f58b539/imagenet1000_clsid_to_human.txt": f"{BASE}/2022-10-05/imagenet1000_clsid_to_human.txt",
"https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png": f"{BASE}/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png",
"https://github.com/ARM-software/ML-zoo/raw/48a22ee22325d15d2371a6df24eb7d67e21dcc97/models/keyword_spotting/cnn_small/tflite_int8/cnn_s_quantized.tflite": f"{BASE}/ARM-software/ML-zoo/raw/48a22ee22325d15d2371a6df24eb7d67e21dcc97/models/keyword_spotting/cnn_small/tflite_int8/cnn_s_quantized.tflite",
"https://github.com/czh978/models_for_tvm_test/raw/main/tflite_graph_with_postprocess.pb": f"{BASE}/czh978/models_for_tvm_test/raw/main/tflite_graph_with_postprocess.pb",
"https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true": f"{BASE}/dmlc/mxnet.js/blob/main/data/cat.png"
+ quote("?raw=true"),
"https://github.com/dmlc/mxnet.js/raw/main/data/cat.png": f"{BASE}/dmlc/mxnet.js/raw/main/data/cat.png",
"https://github.com/dmlc/web-data/blob/main/darknet/cfg/yolov3.cfg?raw=true": f"{BASE}/dmlc/web-data/blob/main/darknet/cfg/yolov3.cfg"
+ quote("?raw=true"),
"https://github.com/dmlc/web-data/blob/main/darknet/data/arial.ttf?raw=true": f"{BASE}/dmlc/web-data/blob/main/darknet/data/arial.ttf"
+ quote("?raw=true"),
"https://github.com/dmlc/web-data/blob/main/darknet/data/coco.names?raw=true": f"{BASE}/dmlc/web-data/blob/main/darknet/data/coco.names"
+ quote("?raw=true"),
"https://github.com/dmlc/web-data/blob/main/darknet/data/dog.jpg?raw=true": f"{BASE}/dmlc/web-data/blob/main/darknet/data/dog.jpg"
+ quote("?raw=true"),
"https://github.com/dmlc/web-data/blob/main/darknet/data/dog.jpg": f"{BASE}/dog.jpg",
"https://github.com/dmlc/web-data/blob/main/darknet/data/person.jpg?raw=true": f"{BASE}/dmlc/web-data/blob/main/darknet/data/person.jpg"
+ quote("?raw=true"),
"https://github.com/dmlc/web-data/blob/main/darknet/lib/libdarknet2.0.so?raw=true": f"{BASE}/dmlc/web-data/blob/main/darknet/lib/libdarknet2.0.so"
+ quote("?raw=true"),
"https://github.com/dmlc/web-data/blob/main/gluoncv/detection/street_small.jpg?raw=true": f"{BASE}/2022-10-05/small_street_raw.jpg",
"https://github.com/dmlc/web-data/raw/main/darknet/cfg/yolov3.cfg": f"{BASE}/dmlc/web-data/raw/main/darknet/cfg/yolov3.cfg",
"https://github.com/dmlc/web-data/raw/main/darknet/data/arial.ttf": f"{BASE}/dmlc/web-data/raw/main/darknet/data/arial.ttf",
"https://github.com/dmlc/web-data/raw/main/darknet/data/coco.names": f"{BASE}/dmlc/web-data/raw/main/darknet/data/coco.names",
"https://github.com/dmlc/web-data/raw/main/darknet/data/dog.jpg": f"{BASE}/dmlc/web-data/raw/main/darknet/data/dog.jpg",
"https://github.com/dmlc/web-data/raw/main/darknet/data/person.jpg": f"{BASE}/dmlc/web-data/raw/main/darknet/data/person.jpg",
"https://github.com/dmlc/web-data/raw/main/darknet/lib/libdarknet2.0.so": f"{BASE}/dmlc/web-data/raw/main/darknet/lib/libdarknet2.0.so",
"https://github.com/dmlc/web-data/raw/main/gluoncv/detection/street_small.jpg": f"{BASE}/2022-10-05/gluon-small-stree.jpg",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/Custom/placeholder.pb": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/Custom/placeholder.pb",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/classify_image_graph_def-with_shapes.pb": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/classify_image_graph_def-with_shapes.pb",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/elephant-299.jpg": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/elephant-299.jpg",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/imagenet_2012_challenge_label_map_proto.pbtxt": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/imagenet_2012_challenge_label_map_proto.pbtxt",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/imagenet_synset_to_human_label_map.txt": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/imagenet_synset_to_human_label_map.txt",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/RNN/ptb/ptb_model_with_lstmblockcell.pb": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/RNN/ptb/ptb_model_with_lstmblockcell.pb",
"https://github.com/dmlc/web-data/raw/master/tensorflow/models/InceptionV1/elephant-299.jpg": f"{BASE}/dmlc/web-data/raw/master/tensorflow/models/InceptionV1/elephant-299.jpg",
"https://github.com/fernchen/CaffeModels/raw/master/resnet/ResNet-50-deploy.prototxt": f"{BASE}/fernchen/CaffeModels/raw/master/resnet/ResNet-50-deploy.prototxt",
"https://github.com/fernchen/CaffeModels/raw/master/resnet/ResNet-50-deploy.prototxt": f"{BASE}/fernchen/CaffeModels/raw/master/resnet/ResNet-50-deploy.prototxt",
"https://github.com/fernchen/CaffeModels/raw/master/resnet/ResNet-50-model.caffemodel": f"{BASE}/fernchen/CaffeModels/raw/master/resnet/ResNet-50-model.caffemodel",
"https://github.com/google/mediapipe/raw/v0.7.4/mediapipe/models/hand_landmark.tflite": f"{BASE}/google/mediapipe/raw/v0.7.4/mediapipe/models/hand_landmark.tflite",
"https://github.com/JonathanCMitchell/mobilenet_v2_keras/releases/download/v1.1/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5": f"{BASE}/2022-10-05/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5",
"https://github.com/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/mnist/model/mnist-1.onnx": f"{BASE}/onnx/mnist-1.onnx",
"https://github.com/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/resnet/model/resnet50-v2-7.onnx": f"{BASE}/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/resnet/model/resnet50-v2-7.onnx",
"https://github.com/onnx/models/raw/main/vision/classification/mobilenet/model/mobilenetv2-7.onnx": f"{BASE}/onnx/models/raw/main/vision/classification/mobilenet/model/mobilenetv2-7.onnx",
"https://github.com/onnx/models/raw/main/vision/classification/resnet/model/resnet50-v2-7.onnx": f"{BASE}/2022-10-05/resnet50-v2-7.onnx",
"https://github.com/pjreddie/darknet/blob/master/cfg/alexnet.cfg?raw=true": f"{BASE}/pjreddie/darknet/blob/master/cfg/alexnet.cfg"
+ quote("?raw=true"),
"https://github.com/pjreddie/darknet/blob/master/cfg/extraction.cfg?raw=true": f"{BASE}/pjreddie/darknet/blob/master/cfg/extraction.cfg"
+ quote("?raw=true"),
"https://github.com/pjreddie/darknet/blob/master/cfg/resnet50.cfg?raw=true": f"{BASE}/pjreddie/darknet/blob/master/cfg/resnet50.cfg"
+ quote("?raw=true"),
"https://github.com/pjreddie/darknet/blob/master/cfg/resnext50.cfg?raw=true": f"{BASE}/pjreddie/darknet/blob/master/cfg/resnext50.cfg"
+ quote("?raw=true"),
"https://github.com/pjreddie/darknet/blob/master/cfg/yolov2.cfg?raw=true": f"{BASE}/pjreddie/darknet/blob/master/cfg/yolov2.cfg"
+ quote("?raw=true"),
"https://github.com/pjreddie/darknet/blob/master/cfg/yolov3-tiny.cfg?raw=true": f"{BASE}/2022-10-05/yolov3-tiny-raw.cfg",
"https://github.com/pjreddie/darknet/blob/master/cfg/yolov3.cfg?raw=true": f"{BASE}/pjreddie/darknet/blob/master/cfg/yolov3.cfg"
+ quote("?raw=true"),
"https://github.com/SebastianBoblestETAS/nn_models/blob/ce49c5de64889493161ca4194a20e0fd5eb707e6/lstm_1_in_3_out_2_ts_4.tflite?raw=true": f"{BASE}/SebastianBoblestETAS/nn_models/blob/ce49c5de64889493161ca4194a20e0fd5eb707e6/lstm_1_in_3_out_2_ts_4.tflite"
+ quote("?raw=true"),
"https://github.com/shicai/MobileNet-Caffe/blob/master/mobilenet_v2.caffemodel?raw=true": f"{BASE}/shicai/MobileNet-Caffe/blob/master/mobilenet_v2.caffemodel"
+ quote("?raw=true"),
"https://github.com/shicai/MobileNet-Caffe/raw/master/mobilenet_v2_deploy.prototxt": f"{BASE}/shicai/MobileNet-Caffe/raw/master/mobilenet_v2_deploy.prototxt",
"https://github.com/tensorflow/tflite-micro/raw/a56087ffa2703b4d5632f024a8a4c899815c31bb/tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite": f"{BASE}/tensorflow/tflite-micro/raw/a56087ffa2703b4d5632f024a8a4c899815c31bb/tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite",
"https://github.com/mlcommons/tiny/raw/v0.7/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite": f"{BASE}/mlcommons/tiny/raw/v0.7/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite",
"https://github.com/uwsampl/web-data/raw/main/vta/models/synset.txt": f"{BASE}/2022-10-05/synset.txt",
"https://homes.cs.washington.edu/~cyulin/media/gnn_model/gcn_cora.torch": f"{BASE}/gcn_cora.torch",
"https://homes.cs.washington.edu/~moreau/media/vta/cat.jpg": f"{BASE}/vta_cat.jpg",
"https://objects.githubusercontent.com/github-production-release-asset-2e65be/130932608/4b196a8a-4e2d-11e8-9a11-be3c41846711?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20221004%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20221004T170456Z&X-Amz-Expires=300&X-Amz-Signature=0602b68e8864b9b01c9142eee22aed3543fe98a5482686eec33d98e2617a2295&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=130932608&response-content-disposition=attachment%3B%20filename%3Dmobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5&response-content-type=application%2Foctet-stream": f"{BASE}/2022-10-05/aws-mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5",
"https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResNet/resnet18.zip": f"{BASE}/oneflow/resnet18.zip",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/sine_model.tflite": f"{BASE}/tlc-pack/web-data/testdata/microTVM/model/sine_model.tflite",
"https://pjreddie.com/media/files/yolov3-tiny.weights?raw=true": f"{BASE}/yolov3-tiny.weights",
"https://pjreddie.com/media/files/yolov3.weights": f"{BASE}/yolov3.weights",
"https://raw.githubusercontent.com/Cadene/pretrained-models.pytorch/master/data/imagenet_classes.txt": f"{BASE}/2022-10-05/imagenet_classes.txt",
"https://raw.githubusercontent.com/Cadene/pretrained-models.pytorch/master/data/imagenet_synsets.txt": f"{BASE}/2022-10-05/imagenet_synsets.txt",
"https://raw.githubusercontent.com/dmlc/mxnet.js/main/data/cat.png": f"{BASE}/dmlc/mxnet.js/main/data/cat.png",
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/cfg/yolov3.cfg": f"{BASE}/dmlc/web-data/main/darknet/cfg/yolov3.cfg",
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/data/arial.ttf": f"{BASE}/dmlc/web-data/main/darknet/data/arial.ttf",
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/data/coco.names": f"{BASE}/dmlc/web-data/main/darknet/data/coco.names",
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/data/dog.jpg": f"{BASE}/dmlc/web-data/main/darknet/data/dog.jpg",
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/data/person.jpg": f"{BASE}/dmlc/web-data/main/darknet/data/person.jpg",
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/lib/libdarknet2.0.so": f"{BASE}/dmlc/web-data/main/darknet/lib/libdarknet2.0.so",
"https://raw.githubusercontent.com/dmlc/web-data/main/gluoncv/detection/street_small.jpg": f"{BASE}/2022-10-05/small_street.jpg",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/InceptionV1/classify_image_graph_def-with_shapes.pb": f"{BASE}/dmlc/web-data/main/tensorflow/models/InceptionV1/classify_image_graph_def-with_shapes.pb",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/InceptionV1/elephant-299.jpg": f"{BASE}/dmlc/web-data/main/tensorflow/models/InceptionV1/elephant-299.jpg",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/InceptionV1/imagenet_2012_challenge_label_map_proto.pbtxt": f"{BASE}/dmlc/web-data/main/tensorflow/models/InceptionV1/imagenet_2012_challenge_label_map_proto.pbtxt",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/InceptionV1/imagenet_synset_to_human_label_map.txt": f"{BASE}/dmlc/web-data/main/tensorflow/models/InceptionV1/imagenet_synset_to_human_label_map.txt",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tgz": f"{BASE}/dmlc/web-data/main/tensorflow/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tgz",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/inception_v1_quantized.tflite": f"{BASE}/dmlc/web-data/main/tensorflow/models/Quantized/inception_v1_quantized.tflite",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/mobilenet_v2_quantized.tflite": f"{BASE}/dmlc/web-data/main/tensorflow/models/Quantized/mobilenet_v2_quantized.tflite",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/resnet_50_quantized.tflite": f"{BASE}/dmlc/web-data/main/tensorflow/models/Quantized/resnet_50_quantized.tflite",
"https://raw.githubusercontent.com/dmlc/web-data/master/gluoncv/detection/street_small.jpg": f"{BASE}/2022-10-05/street_small.jpg",
"https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/lite/java/demo/app/src/main/assets/labels_mobilenet_quant_v1_224.txt": f"{BASE}/2022-10-05/labels_mobilenet_quant_v1_224.txt",
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/arm_cpu_v0.08.log": f"{BASE}/tlc-pack/tophub/main/tophub/arm_cpu_v0.08.log",
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/cuda_v0.10.log": f"{BASE}/tlc-pack/tophub/main/tophub/cuda_v0.10.log",
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/llvm_v0.04.log": f"{BASE}/tlc-pack/tophub/main/tophub/llvm_v0.04.log",
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/mali_v0.06.log": f"{BASE}/2022-10-05/mali_v0.06.log",
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/opencl_v0.04.log": f"{BASE}/tlc-pack/tophub/main/tophub/opencl_v0.04.log",
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/vta_v0.10.log": f"{BASE}/tlc-pack/tophub/main/tophub/vta_v0.10.log",
"https://s3.amazonaws.com/model-server/inputs/kitten.jpg": f"{BASE}/2022-10-05/kitten.jpg",
"https://s3.amazonaws.com/onnx-model-zoo/synset.txt": f"{BASE}/2022-10-05/synset-s3.txt",
"https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_224_quant_20181026.tgz": f"{BASE}/download.tensorflow.org/models/inception_v1_224_quant_20181026.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/inception_v4_299_quant_20181026.tgz": f"{BASE}/download.tensorflow.org/models/inception_v4_299_quant_20181026.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz": f"{BASE}/download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz": f"{BASE}/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz": f"{BASE}/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/inception_v3_quant.tgz": f"{BASE}/download.tensorflow.org/models/tflite_11_05_08/inception_v3_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz": f"{BASE}/2022-10-05/mobilenet_v2_1.0_224_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip": f"{BASE}/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/digit_classifier/mnist.tflite": f"{BASE}/download.tensorflow.org/models/tflite/digit_classifier/mnist.tflite",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz": f"{BASE}/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz": f"{BASE}/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz": f"{BASE}/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz",
"https://storage.googleapis.com/fast-convnets/tflite-models/mbv1_140_90_12b4_720.tflite": f"{BASE}/fast-convnets/tflite-models/mbv1_140_90_12b4_720.tflite",
"https://storage.googleapis.com/fast-convnets/tflite-models/mbv2_200_85_11-16b2_744.tflite": f"{BASE}/fast-convnets/tflite-models/mbv2_200_85_11-16b2_744.tflite",
"https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz": f"{BASE}/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz",
"https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-large_224_1.0_float.tgz": f"{BASE}/mobilenet_v3/checkpoints/v3-large_224_1.0_float.tgz",
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf_no_top.h5": f"{BASE}/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf_no_top.h5",
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf.h5": f"{BASE}/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf.h5",
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_2_5_128_tf.h5": f"{BASE}/2022-10-05/mobilenet_2_5_128_tf.h5",
"https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5": f"{BASE}/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5",
"https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5": f"{BASE}/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5",
"https://storage.googleapis.com/tensorflow/keras-applications/xception/xception_weights_tf_dim_ordering_tf_kernels.h5": f"{BASE}/tensorflow/keras-applications/xception/xception_weights_tf_dim_ordering_tf_kernels.h5",
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz": f"{BASE}/tensorflow/tf-keras-datasets/mnist.npz",
"https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite": f"{BASE}/mlcommons/tiny/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite",
"https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/keyword_spotting/trained_models/kws_ref_model.tflite": f"{BASE}/mlcommons/tiny/benchmark/training/keyword_spotting/trained_models/kws_ref_model.tflite",
"https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/anomaly_detection/trained_models/ToyCar/baseline_tf23/model/model_ToyCar_quant_fullint_micro.tflite": f"{BASE}/mlcommons/tiny/benchmark/training/anomaly_detection/trained_models/ToyCar/baseline_tf23/model/model_ToyCar_quant_fullint_micro.tflite",
"https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/image_classification/trained_models/pretrainedResnet_quant.tflite": f"{BASE}/mlcommons/tiny/benchmark/training/image_classification/trained_models/pretrainedResnet_quant.tflite",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy": f"{BASE}/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/visual_wake_word_int8_1.npy": f"{BASE}/tlc-pack/web-data/raw/main/testdata/microTVM/data/visual_wake_word_int8_1.npy",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/anomaly_detection_normal_id_01.npy": f"{BASE}/tlc-pack/web-data/raw/main/testdata/microTVM/data/anomaly_detection_normal_id_01.npy",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/image_classification_int8_0.npy": f"{BASE}/tlc-pack/web-data/raw/main/testdata/microTVM/data/image_classification_int8_0.npy",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/vww_sample_person.jpg": f"{BASE}/tlc-pack/web-data/testdata/microTVM/data/vww_sample_person.jpg",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/vww_sample_not_person.jpg": f"{BASE}/tlc-pack/web-data/testdata/microTVM/data/vww_sample_not_person.jpg",
"https://github.com/tensorflow/tflite-micro/raw/de8f61a074460e1fa5227d875c95aa303be01240/tensorflow/lite/micro/models/keyword_scrambled.tflite": f"{BASE}/models/tflite/keyword_scrambled_8bit.tflite",
}
class TvmRequestHook(urllib.request.Request):
def __init__(self, url, *args, **kwargs):
LOGGER.info(f"Caught access to {url}")
url = url.strip()
if url not in URL_MAP and not url.startswith(BASE):
# Dis-allow any accesses that aren't going through S3
msg = (
f"Uncaught URL found in CI: {url}. "
"A committer must upload the relevant file to S3 via "
"https://github.com/apache/tvm/actions/workflows/upload_ci_resource.yml "
"and add it to the mapping in tests/scripts/request_hook/request_hook.py"
)
raise RuntimeError(msg)
new_url = URL_MAP[url]
LOGGER.info(f"Mapped URL {url} to {new_url}")
super().__init__(new_url, *args, **kwargs)
def init():
global LOGGER
urllib.request.Request = TvmRequestHook
LOGGER = logging.getLogger("tvm_request_hook")
LOGGER.setLevel(logging.DEBUG)
fh = logging.FileHandler("redirected_urls.log")
fh.setLevel(logging.DEBUG)
LOGGER.addHandler(fh)
| 29,189 | 117.658537 | 683 | py |
tvm | tvm-main/tests/scripts/release/gather_prs.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import os
import pickle
from pathlib import Path
import csv
import sys
from typing import Callable, Dict, List, Any
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "ci" / "scripts" / "jenkins"))
sys.path.append(str(REPO_ROOT / "ci" / "scripts" / "github"))
from git_utils import git, GitHubRepo
from github_tag_teams import tags_from_title
GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
PRS_QUERY = """
query ($owner: String!, $name: String!, $after: String, $pageSize: Int!) {
repository(owner: $owner, name: $name) {
defaultBranchRef {
name
target {
... on Commit {
oid
history(after: $after, first: $pageSize) {
pageInfo {
hasNextPage
endCursor
}
nodes {
oid
committedDate
associatedPullRequests(first: 1) {
nodes {
number
additions
changedFiles
deletions
author {
login
}
title
body
}
}
}
}
}
}
}
}
}
"""
def append_and_save(items, file):
if not file.exists():
data = []
else:
with open(file, "rb") as f:
data = pickle.load(f)
data += items
with open(file, "wb") as f:
pickle.dump(data, f)
def fetch_pr_data(args, cache):
github = GitHubRepo(user=user, repo=repo, token=GITHUB_TOKEN)
if args.from_commit is None or args.to_commit is None:
print("--from-commit and --to-commit must be specified if --skip-query is not used")
exit(1)
i = 0
page_size = 80
cursor = f"{args.from_commit} {i}"
while True:
try:
r = github.graphql(
query=PRS_QUERY,
variables={
"owner": user,
"name": repo,
"after": cursor,
"pageSize": page_size,
},
)
except RuntimeError as e:
print(f"{e}\nPlease check enviroment variable GITHUB_TOKEN whether is valid.")
exit(1)
data = r["data"]["repository"]["defaultBranchRef"]["target"]["history"]
if not data["pageInfo"]["hasNextPage"]:
break
cursor = data["pageInfo"]["endCursor"]
results = data["nodes"]
to_add = []
stop = False
for r in results:
if r["oid"] == args.to_commit:
print(f"Found {r['oid']}, stopping")
stop = True
break
else:
to_add.append(r)
oids = [r["oid"] for r in to_add]
print(oids)
append_and_save(to_add, cache)
if stop:
break
print(i)
i += page_size
def write_csv(
filename: str, data: List[Dict[str, Any]], threshold_filter: Callable[[Dict[str, Any]], bool]
) -> None:
with open(filename, "w", newline="") as csvfile:
writer = csv.writer(csvfile, quotechar='"')
writer.writerow(
(
"category",
"subject",
"date",
"url",
"author",
"pr_title_tags",
"pr_title",
"additions",
"deletions",
"additions+deletions>threshold",
"changed_files",
)
)
for item in data:
nodes = item["associatedPullRequests"]["nodes"]
if len(nodes) == 0:
continue
pr = nodes[0]
tags = tags_from_title(pr["title"])
actual_tags = []
for t in tags:
items = [x.strip() for x in t.split(",")]
actual_tags += items
tags = actual_tags
tags = [t.lower() for t in tags]
category = tags[0] if len(tags) > 0 else ""
author = pr["author"] if pr["author"] else "ghost"
author = author.get("login", "") if isinstance(author, dict) else author
writer.writerow(
(
category,
"n/a",
item["committedDate"],
f'https://github.com/apache/tvm/pull/{pr["number"]}',
author,
"/".join(tags),
pr["title"].replace(",", " "),
pr["additions"],
pr["deletions"],
1 if threshold_filter(pr) else 0,
pr["changedFiles"],
)
)
print(f"{filename} generated!")
if __name__ == "__main__":
help = "List out commits with attached PRs since a certain commit"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--from-commit", help="commit to start checking PRs from")
parser.add_argument("--to-commit", help="commit to stop checking PRs from")
parser.add_argument(
"--threshold", default=0, help="sum of additions + deletions to consider large, such as 150"
)
parser.add_argument(
"--skip-query", action="store_true", help="don't query GitHub and instead use cache file"
)
args = parser.parse_args()
user = "apache"
repo = "tvm"
threshold = int(args.threshold)
cache = Path("out.pkl")
if not args.skip_query:
fetch_pr_data(args, cache)
with open(cache, "rb") as f:
data = pickle.load(f)
print(f"Found {len(data)} PRs")
write_csv(
filename="out_pr_gathered.csv",
data=data,
threshold_filter=lambda pr: pr["additions"] + pr["deletions"] > threshold,
)
| 6,666 | 29.031532 | 100 | py |
tvm | tvm-main/tests/scripts/release/make_notes.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import pickle
from pathlib import Path
import csv
import sys
import re
from collections import defaultdict
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "tests" / "scripts"))
sys.path.append(str(REPO_ROOT / "tests" / "scripts" / "github"))
sys.path.append(str(REPO_ROOT / "tests" / "scripts" / "jenkins"))
# Tag dictionary used to create a mapping relation to categorize PRs owning same tag.
TAG_DICT = {
"metaschedule": "MetaSchedule",
"cuda": "cuda & cutlass & tensorrt",
"cutlass": "cuda & cutlass & tensorrt",
"tensorrt": "cuda & cutlass & tensorrt",
"ethosn": "Ethosn",
"hexagon": "Hexagon",
"metal": "Metal",
"vulkan": "Vulkan",
"cmsis-nn": "CMSIS-NN",
"clml": "OpenCL & CLML",
"opencl": "OpenCL & CLML",
"openclml": "OpenCL & CLML",
"adreno": "Adreno",
"acl": "ArmComputeLibrary",
"rocm": "ROCm",
"crt": "CRT",
"micronpu": "micoNPU",
"microtvm": "microTVM",
"web": "web",
"wasm": "web",
"runtime": "Runtime",
"aot": "AOT",
"arith": "Arith",
"byoc": "BYOC",
"community": "Community",
"tensorir": "TIR",
"tir": "TIR",
"tensorflow": "Frontend",
"tflite": "Frontend",
"paddle": "Frontend",
"oneflow": "Frontend",
"pytorch": "Frontend",
"torch": "Frontend",
"keras": "Frontend",
"frontend": "Frontend",
"onnx": "Frontend",
"roofline": "Misc",
"rpc": "Misc",
"transform": "Misc",
"tophub": "Misc",
"vta": "Misc",
"ux": "Misc",
"APP": "Misc",
"docker": "Docker",
"doc": "Docs",
"docs": "Docs",
"llvm": "LLVM",
"sve": "LLVM",
"ci": "CI",
"test": "CI",
"tests": "CI",
"testing": "CI",
"unittest": "CI",
"bugfix": "BugFix",
"fix": "BugFix",
"bug": "BugFix",
"hotfix": "BugFix",
"relay": "Relay",
"qnn": "Relay",
"quantization": "Relay",
"tvmscript": "TVMScript",
"tvmscripts": "TVMScript",
"tvmc": "TVMC",
"topi": "TOPI",
}
def strip_header(title: str, header: str) -> str:
pos = title.lower().find(header.lower())
if pos == -1:
return title
return title[0:pos] + title[pos + len(header) :].strip()
def sprint(*args):
print(*args, file=sys.stderr)
def create_pr_dict(cache: Path):
with open(cache, "rb") as f:
data = pickle.load(f)
sprint(data[1])
pr_dict = {}
for item in data:
prs = item["associatedPullRequests"]["nodes"]
if len(prs) != 1:
continue
pr = prs[0]
pr_dict[pr["number"]] = pr
return pr_dict
def categorize_csv_file(csv_path: str):
headings = defaultdict(lambda: defaultdict(list))
sprint("Opening CSV")
with open(csv_path) as f:
input_file = csv.DictReader(f)
i = 0
blank_cate_set = {"Misc"}
for row in input_file:
# print(row)
tags = row["pr_title_tags"].split("/")
tags = ["misc"] if len(tags) == 0 else tags
categories = map(lambda t: TAG_DICT.get(t.lower(), "Misc"), tags)
categories = list(categories)
categories = list(set(categories) - blank_cate_set)
category = "Misc" if len(categories) == 0 else categories[0]
subject = row["subject"].strip()
pr_number = row["url"].split("/")[-1]
if category == "" or subject == "":
sprint(f"Skipping {i}th pr with number: {pr_number}, row: {row}")
continue
headings[category][subject].append(pr_number)
i += 1
# if i > 30:
# break
return headings
if __name__ == "__main__":
help = "List out commits with attached PRs since a certain commit"
parser = argparse.ArgumentParser(description=help)
parser.add_argument(
"--notes", required=True, help="csv or markdown file of categorized PRs in order"
)
parser.add_argument(
"--is-pr-with-link",
required=False,
help="exported pr number with hyper-link for forum format",
)
parser.add_argument(
"--convert-with-link",
required=False,
help="make PR number in markdown file owning hyper-link",
)
args = parser.parse_args()
user = "apache"
repo = "tvm"
if args.convert_with_link:
with open("release_note_0.13.0.md", "r") as f:
lines = f.readlines()
formated = []
for line in lines:
match = re.search(r"#\d+", line)
if match:
pr_num_str = match.group()
pr_num_int = pr_num_str.replace("#", "")
pr_number_str = f"[#{pr_num_int}](https://github.com/apache/tvm/pull/{pr_num_int})"
line = line.replace(pr_num_str, pr_number_str)
formated.append(line)
result = "".join(formated)
print(result)
exit(0)
# 1. Create PR dict from cache file
cache = Path("out.pkl")
if not cache.exists():
sprint("run gather_prs.py first to generate out.pkl")
exit(1)
pr_dict = create_pr_dict(cache)
# 2. Categorize csv file as dict by category and subject (sub-category)
headings = categorize_csv_file(args.notes_csv)
# 3. Summarize and sort all categories
def sorter(x):
if x == "Misc":
return 10
return 0
keys = list(headings.keys())
keys = list(sorted(keys))
keys = list(sorted(keys, key=sorter))
# 4. Generate markdown by loop categorized csv file dict
def pr_title(number, heading):
# print(f"number:{number}, heading:{heading}, len(pr_dict):{len(pr_dict)}")
try:
title = pr_dict[int(number)]["title"]
title = strip_header(title, heading)
except:
sprint("The out.pkl file is not match with csv file.")
exit(1)
return title
output = ""
for key in keys:
value = headings[key]
if key == "DO NOT INCLUDE":
continue
value = dict(value)
output += f"### {key}\n"
misc = []
misc += value.get("n/a", [])
misc += value.get("Misc", [])
for pr_number in misc:
if args.is_pr_with_link:
pr_number_str = f"[#{pr_number}](https://github.com/apache/tvm/pull/{pr_number})"
else:
pr_number_str = f"#{pr_number}"
pr_str = f" * {pr_number_str} - {pr_title(pr_number, '[' + key + ']')}\n"
output += pr_str
for subheading, pr_numbers in value.items():
if subheading == "DO NOT INCLUDE":
continue
if subheading == "n/a" or subheading == "Misc":
continue
else:
output += f" * {subheading} - " + ", ".join([f"#{n}" for n in pr_numbers]) + "\n"
# print(value)
output += "\n"
# 5. Print markdown-format output
print(output)
| 7,805 | 29.138996 | 99 | py |
tvm | tvm-main/tests/scripts/release/list_rfcs.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import subprocess
import sys
LINK_BASE = "https://github.com/apache/tvm-rfcs/blob/main/"
COMMIT_BASE = "https://github.com/apache/tvm-rfcs/commit/"
def sprint(*args):
print(*args, file=sys.stderr)
if __name__ == "__main__":
help = "List out RFCs since a commit"
parser = argparse.ArgumentParser(description=help)
parser.add_argument("--since-commit", required=True, help="last commit to include")
parser.add_argument("--rfcs-repo", required=True, help="path to checkout of apache/tvm-rfcs")
args = parser.parse_args()
user = "apache"
repo = "tvm"
rfc_repo = args.rfcs_repo
subprocess.run("git fetch origin main", cwd=rfc_repo, shell=True)
subprocess.run("git checkout main", cwd=rfc_repo, shell=True)
subprocess.run("git reset --hard origin/main", cwd=rfc_repo, shell=True)
r = subprocess.run(
f"git log {args.since_commit}..HEAD --format='%H %s'",
cwd=rfc_repo,
shell=True,
stdout=subprocess.PIPE,
encoding="utf-8",
)
commits = r.stdout.strip().split("\n")
for commit in commits:
parts = commit.split()
commit = parts[0]
subject = " ".join(parts[1:])
r2 = subprocess.run(
f"git diff-tree --no-commit-id --name-only -r {commit}",
cwd=rfc_repo,
shell=True,
stdout=subprocess.PIPE,
encoding="utf-8",
)
files = r2.stdout.strip().split("\n")
rfc_file = None
for file in files:
if file.startswith("rfcs/") and file.endswith(".md"):
if rfc_file is not None:
sprint(f"error on {commit} {subject}")
rfc_file = file
if rfc_file is None:
sprint(f"error on {commit} {subject}")
continue
print(f" * [{subject}]({LINK_BASE + rfc_file}) ([`{commit[:7]}`]({COMMIT_BASE + commit}))")
| 2,733 | 34.973684 | 99 | py |
tvm | tvm-main/tests/lint/trailing_newlines.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import fileinput
import os
def has_one_trailing_newline(filename: str) -> bool:
"""
Returns True if 'filename' has a single trailing newline
"""
with open(filename, "rb") as f:
start_bytes = len(f.read(2))
if start_bytes == 0:
# empty file
return True
elif start_bytes == 1:
# 1 byte file
return False
else:
# skip to the end
f.seek(-2, os.SEEK_END)
end_bytes = f.read(2)
# should be a non-newline followed by a newline
return end_bytes[0] != ord("\n") and end_bytes[1] == ord("\n")
if __name__ == "__main__":
exit_code = 1
for line in fileinput.input():
filename = line.rstrip()
if not has_one_trailing_newline(filename):
exit_code = 0
print(filename)
exit(exit_code)
| 1,691 | 31.538462 | 74 | py |
tvm | tvm-main/tests/lint/add_asf_header.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Helper tool to add ASF header to files that cannot be handled by Rat."""
import os
import sys
header_cstyle = """
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
""".strip()
header_mdstyle = """
<!--- Licensed to the Apache Software Foundation (ASF) under one -->
<!--- or more contributor license agreements. See the NOTICE file -->
<!--- distributed with this work for additional information -->
<!--- regarding copyright ownership. The ASF licenses this file -->
<!--- to you under the Apache License, Version 2.0 (the -->
<!--- "License"); you may not use this file except in compliance -->
<!--- with the License. You may obtain a copy of the License at -->
<!--- http://www.apache.org/licenses/LICENSE-2.0 -->
<!--- Unless required by applicable law or agreed to in writing, -->
<!--- software distributed under the License is distributed on an -->
<!--- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -->
<!--- KIND, either express or implied. See the License for the -->
<!--- specific language governing permissions and limitations -->
<!--- under the License. -->
""".strip()
header_pystyle = """
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""".strip()
header_rststyle = """
.. Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
.. http://www.apache.org/licenses/LICENSE-2.0
.. Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
""".strip()
header_groovystyle = """
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
""".strip()
header_cmdstyle = """
:: Licensed to the Apache Software Foundation (ASF) under one
:: or more contributor license agreements. See the NOTICE file
:: distributed with this work for additional information
:: regarding copyright ownership. The ASF licenses this file
:: to you under the Apache License, Version 2.0 (the
:: "License"); you may not use this file except in compliance
:: with the License. You may obtain a copy of the License at
::
:: http://www.apache.org/licenses/LICENSE-2.0
::
:: Unless required by applicable law or agreed to in writing,
:: software distributed under the License is distributed on an
:: "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
:: KIND, either express or implied. See the License for the
:: specific language governing permissions and limitations
:: under the License.
""".strip()
FMT_MAP = {
"sh": header_pystyle,
"cc": header_cstyle,
"c": header_cstyle,
"mm": header_cstyle,
"m": header_cstyle,
"go": header_cstyle,
"java": header_cstyle,
"h": header_cstyle,
"py": header_pystyle,
"toml": header_pystyle,
"yml": header_pystyle,
"yaml": header_pystyle,
"rs": header_cstyle,
"md": header_mdstyle,
"cmake": header_pystyle,
"mk": header_pystyle,
"rst": header_rststyle,
"gradle": header_groovystyle,
"tcl": header_pystyle,
"xml": header_mdstyle,
"storyboard": header_mdstyle,
"pbxproj": header_cstyle,
"plist": header_mdstyle,
"xcworkspacedata": header_mdstyle,
"html": header_mdstyle,
"bat": header_cmdstyle,
}
def copyright_line(line):
# Following two items are intentionally break apart
# so that the copyright detector won"t detect the file itself.
if line.find("Copyright " + "(c)") != -1:
return True
# break pattern into two lines to avoid false-negative check
spattern1 = "Copyright"
if line.find(spattern1) != -1 and line.find("by") != -1:
return True
return False
def add_header(fname, header):
"""Add header to file"""
if not os.path.exists(fname):
print("Cannot find %s ..." % fname)
return
lines = open(fname).readlines()
has_asf_header = False
has_copyright = False
for i, l in enumerate(lines):
if l.find("Licensed to the Apache Software Foundation") != -1:
has_asf_header = True
elif copyright_line(l):
has_copyright = True
lines[i] = ""
if has_asf_header and not has_copyright:
print("Skip file %s ..." % fname)
return
with open(fname, "w") as outfile:
skipline = False
ext = os.path.splitext(fname)[1][1:]
if not lines:
skipline = False # File is enpty
elif lines[0][:2] == "#!":
skipline = True
elif lines[0][:2] == "<?":
skipline = True
elif lines[0].startswith("<html>"):
skipline = True
elif lines[0].startswith("// !$"):
skipline = True
if skipline:
outfile.write(lines[0])
if not has_asf_header:
outfile.write(header + "\n\n")
outfile.write("".join(lines[1:]))
else:
if not has_asf_header:
outfile.write(header + "\n\n")
outfile.write("".join(lines))
if not has_asf_header:
print("Add header to %s" % fname)
if has_copyright:
print("Removed copyright line from %s" % fname)
def main(args):
if len(args) != 2:
print("Usage: python add_asf_header.py <file_list>")
for l in open(args[1]):
if l.startswith("---"):
continue
if l.find("File:") != -1:
l = l.split(":")[-1]
fname = l.strip()
if len(fname) == 0:
continue
suffix = fname.split(".")[-1]
if suffix in FMT_MAP:
add_header(fname, FMT_MAP[suffix])
elif os.path.basename(fname) == "gradle.properties":
add_header(fname, FMT_MAP["h"])
else:
print("Cannot handle %s ..." % fname)
if __name__ == "__main__":
main(sys.argv)
| 9,290 | 35.57874 | 75 | py |
tvm | tvm-main/tests/lint/check_cmake_options.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import re
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
LIBINFO_CC = REPO_ROOT / "src" / "support" / "libinfo.cc"
LIBINFO_CMAKE = REPO_ROOT / "cmake" / "modules" / "LibInfo.cmake"
CMAKELISTS = REPO_ROOT / "CMakeLists.txt"
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Check that CMake options are mirrored to libinfo.cc"
)
with open(CMAKELISTS) as f:
cmake = f.readlines()
with open(LIBINFO_CC) as f:
libinfo = f.read()
with open(LIBINFO_CMAKE) as f:
libinfo_cmake = f.read()
# Read tvm_options from CMakeLists.txt
options = []
for line in cmake:
m = re.search(r"tvm_option\((.*?) ", line)
if m is not None:
options.append(m.groups()[0])
# Check that each option is present in libinfo.cc
missing_lines = []
for option in options:
expected_line = f' {{"{option}", TVM_INFO_{option}}},'
if expected_line not in libinfo:
missing_lines.append(expected_line)
error = False
if len(missing_lines) > 0:
missing_lines = "\n".join(missing_lines)
print(
f"Missing these lines from {LIBINFO_CC.relative_to(REPO_ROOT)}, please update it\n{missing_lines}"
)
error = True
# Check that each option has a compile defintion in LibInfo.cmake
missing_cmake_lines = []
for option in options:
expected_line = f' TVM_INFO_{option}="${{{option}}}"'
if expected_line not in libinfo_cmake:
missing_cmake_lines.append(expected_line)
if len(missing_cmake_lines) > 0:
missing_cmake_lines = "\n".join(missing_cmake_lines)
print(
f"Missing these lines from {LIBINFO_CMAKE.relative_to(REPO_ROOT)}, please update it\n{missing_cmake_lines}"
)
error = True
if error:
exit(1)
| 2,712 | 32.493827 | 119 | py |
tvm | tvm-main/tests/lint/check_file_type.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Helper tool to check file types that are allowed to checkin."""
import os
import sys
import subprocess
# List of file types we allow
ALLOW_EXTENSION = {
# source code
"cc",
"c",
"h",
"s",
"rs",
"m",
"mm",
"g4",
"gradle",
"js",
"tcl",
"scala",
"java",
"go",
"ts",
"sh",
"py",
"pyi",
"pxi",
"pyd",
"pyx",
"cu",
"bat",
# relay text format
"rly",
# configurations
"mk",
"in",
"cmake",
"xml",
"toml",
"yml",
"yaml",
"json",
# docs
"txt",
"md",
"rst",
# sgx
"edl",
"lds",
# ios
"pbxproj",
"plist",
"xcworkspacedata",
"storyboard",
"xcscheme",
# hw/chisel
"sbt",
"properties",
"v",
"sdc",
# generated parser
"interp",
"tokens",
# interface definition
"idl",
# opencl file
"cl",
# zephyr config file
"conf",
# arduino sketch file
"ino",
# linker scripts
"ld",
# Jinja2 templates
"j2",
# Jenkinsfiles
"groovy",
# Python-parseable config files
"ini",
}
# List of file names allowed
ALLOW_FILE_NAME = {
".gitignore",
".eslintignore",
".gitattributes",
"README",
"Makefile",
"Doxyfile",
"pylintrc",
"condarc",
"rat-excludes",
"log4j.properties",
".clang-format",
".gitmodules",
"CODEOWNERSHIP",
".scalafmt.conf",
"Cargo.lock",
"poetry.lock",
"with_the_same_user",
}
# List of specific files allowed in relpath to <proj_root>
ALLOW_SPECIFIC_FILE = {
"LICENSE",
"NOTICE",
"KEYS",
"DISCLAIMER",
"Jenkinsfile",
"mypy.ini",
# cargo config
"rust/runtime/tests/test_wasm32/.cargo/config",
"rust/tvm-graph-rt/tests/test_wasm32/.cargo/config",
"apps/sgx/.cargo/config",
"apps/wasm-standalone/wasm-graph/.cargo/config",
# html for demo purposes
"web/apps/browser/rpc_server.html",
# images are normally not allowed
# discuss with committers before add more images
"apps/android_rpc/app/src/main/res/mipmap-hdpi/ic_launcher.png",
"apps/android_rpc/app/src/main/res/mipmap-mdpi/ic_launcher.png",
# documentation related files
"docs/_static/css/tvm_theme.css",
"docs/_static/img/tvm-logo-small.png",
"docs/_static/img/tvm-logo-square.png",
# pytest config
"pytest.ini",
# microTVM tests
"tests/micro/testdata/mnist/digit-2.jpg",
"tests/micro/testdata/mnist/digit-9.jpg",
"tests/micro/testdata/mnist/mnist-8.onnx",
# microTVM Zephyr runtime
"apps/microtvm/zephyr/template_project/CMakeLists.txt.template",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-arm",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-xilinx-aarch64",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-i386",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-riscv32",
"apps/microtvm/zephyr/template_project/qemu-hack/qemu-system-riscv64",
"apps/microtvm/zephyr/template_project/fvp-hack/FVP_Corstone_SSE-300_Ethos-U55",
"apps/microtvm/zephyr/template_project/app-overlay/nucleo_l4r5zi.overlay",
# microTVM Arduino runtime
"apps/microtvm/arduino/template_project/Makefile.template",
# microTVM CRT
"src/runtime/crt/crt_config.h.template",
"src/runtime/crt/host/CMakeLists.txt.template",
# microTVM Virtual Machines
"apps/microtvm/poetry.lock",
"apps/microtvm/reference-vm/Vagrantfile",
"apps/microtvm/reference-vm/base-box/Vagrantfile.packer-template",
# Hexagon
"src/runtime/hexagon/rpc/android_bash.sh.template",
"src/runtime/hexagon/profiler/lwp_handler.S",
}
def filename_allowed(name):
"""Check if name is allowed by the current policy.
Paramaters
----------
name : str
Input name
Returns
-------
allowed : bool
Whether the filename is allowed.
"""
arr = name.rsplit(".", 1)
if arr[-1] in ALLOW_EXTENSION:
return True
if os.path.basename(name) in ALLOW_FILE_NAME:
return True
if os.path.basename(name).startswith("Dockerfile"):
return True
if name.startswith("3rdparty"):
return True
if name in ALLOW_SPECIFIC_FILE:
return True
return False
def copyright_line(line):
# Following two items are intentionally break apart
# so that the copyright detector won't detect the file itself.
if line.find("Copyright " + "(c)") != -1:
return True
# break pattern into two lines to avoid false-negative check
spattern1 = "Copyright"
if line.find(spattern1) != -1 and line.find("by") != -1:
return True
return False
def check_asf_copyright(fname):
if fname.endswith(".png"):
return True
if not os.path.isfile(fname):
return True
has_asf_header = False
has_copyright = False
try:
for line in open(fname):
if line.find("Licensed to the Apache Software Foundation") != -1:
has_asf_header = True
if copyright_line(line):
has_copyright = True
if has_asf_header and has_copyright:
return False
except UnicodeDecodeError:
pass
return True
def main():
cmd = ["git", "ls-files"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
assert proc.returncode == 0, f'{" ".join(cmd)} errored: {out}'
res = out.decode("utf-8")
flist = res.split()
error_list = []
for fname in flist:
if not filename_allowed(fname):
error_list.append(fname)
if error_list:
report = "------File type check report----\n"
report += "\n".join(error_list)
report += "\nFound %d files that are not allowed\n" % len(error_list)
report += (
"We do not check in binary files into the repo.\n"
"If necessary, please discuss with committers and"
"modify tests/lint/check_file_type.py to enable the file you need.\n"
)
sys.stderr.write(report)
sys.stderr.flush()
sys.exit(-1)
asf_copyright_list = []
for fname in res.split():
if not check_asf_copyright(fname):
asf_copyright_list.append(fname)
if asf_copyright_list:
report = "------File type check report----\n"
report += "\n".join(asf_copyright_list) + "\n"
report += "------Found %d files that has ASF header with copyright message----\n" % len(
asf_copyright_list
)
report += "--- Files with ASF header do not need Copyright lines.\n"
report += "--- Contributors retain copyright to their contribution by default.\n"
report += "--- If a file comes with a different license, consider put it under the 3rdparty folder instead.\n"
report += "---\n"
report += "--- You can use the following steps to remove the copyright lines\n"
report += "--- Create file_list.txt in your text editor\n"
report += "--- Copy paste the above content in file-list into file_list.txt\n"
report += "--- python3 tests/lint/add_asf_header.py file_list.txt\n"
sys.stderr.write(report)
sys.stderr.flush()
sys.exit(-1)
print("check_file_type.py: all checks passed..")
if __name__ == "__main__":
main()
| 8,219 | 27.247423 | 118 | py |
tvm | tvm-main/tests/lint/filter_untracked.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os.path
import subprocess
import sys
def check_output(args, **kw):
proc = subprocess.Popen(args, **kw, stdout=subprocess.PIPE)
out, _ = proc.communicate()
if proc.returncode:
sys.stderr.write("exited with code %d: %s\n" % (proc.returncode, " ".join(args)))
sys.exit(2)
if sys.version_info[0] == 2:
return unicode(out, "utf-8")
else:
return str(out, "utf-8")
def main():
script_dir = os.path.dirname(__file__) or os.getcwd()
toplevel_dir = check_output(["git", "rev-parse", "--show-toplevel"], cwd=script_dir).strip("\n")
# NOTE: --ignore-submodules because this can drag in some problems related to mounting a git
# worktree in the docker VM in a different location than it exists on the host. The problem
# isn't quite clear, but anyhow it shouldn't be necessary to filter untracked files in
# submodules here.
git_status_output = check_output(["git", "status", "-s", "--ignored"], cwd=toplevel_dir)
untracked = [
line[3:]
for line in git_status_output.split("\n")
if line.startswith("?? ") or line.startswith("!! ")
]
# also add .git in case rat picks up files in .git or the .git file (if a worktree).
toplevel_git_dentry = os.path.join(toplevel_dir, ".git")
if os.path.isfile(toplevel_git_dentry):
untracked.append(".git")
else:
untracked.append(".git/")
for line in sys.stdin:
cleaned_line = line
if line[:2] == "./":
cleaned_line = line[2:]
cleaned_line = cleaned_line.strip("\n")
if any(
(cleaned_line.startswith(u) if u[-1] == "/" else cleaned_line == u) for u in untracked
):
continue
sys.stdout.write(line)
if __name__ == "__main__":
main()
| 2,614 | 34.337838 | 100 | py |
tvm | tvm-main/tests/micro/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
tvm | tvm-main/tests/micro/arduino/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
pytest_plugins = [
"tvm.micro.testing.pytest_plugin",
]
import pytest
def pytest_configure(config):
config.addinivalue_line(
"markers", "requires_hardware: mark test to run only when an Arduino board is connected"
)
| 1,025 | 34.37931 | 96 | py |
tvm | tvm-main/tests/micro/arduino/test_arduino_error_detection.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from tvm.micro.project_api.server import ServerError
import test_utils
import tvm.testing
@pytest.fixture
def project(board, microtvm_debug, workspace_dir, serial_number):
return test_utils.make_kws_project(board, microtvm_debug, workspace_dir, serial_number)
def test_blank_project_compiles(workspace_dir, project):
project.build()
# Add a bug (an extra curly brace) and make sure the project doesn't compile
def test_bugged_project_compile_fails(workspace_dir, project):
with open(workspace_dir / "project" / "project.ino", "a") as main_file:
main_file.write("}\n")
with pytest.raises(ServerError):
project.build()
if __name__ == "__main__":
tvm.testing.main()
| 1,509 | 32.555556 | 91 | py |
tvm | tvm-main/tests/micro/arduino/test_arduino_rpc_server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This unit test simulates an autotuning workflow, where we:
1. Instantiate the Arduino RPC server project
2. Build and flash that project onto our target board
"""
import pathlib
import numpy as np
import onnx
import pytest
import tvm
import tvm.testing
from PIL import Image
from tvm import relay
from tvm.relay.testing import byoc
from tvm.relay.backend import Executor, Runtime
import test_utils
def _make_session(
model,
arduino_board,
workspace_dir,
mod,
build_config,
serial_number: str = None,
):
project = tvm.micro.generate_project(
str(test_utils.TEMPLATE_PROJECT_DIR),
mod,
workspace_dir / "project",
{
"board": arduino_board,
"project_type": "host_driven",
"verbose": bool(build_config.get("debug")),
"serial_number": serial_number,
},
)
project.build()
project.flash()
return tvm.micro.Session(project.transport())
def _make_sess_from_op(
model,
arduino_board,
workspace_dir,
op_name,
sched,
arg_bufs,
build_config,
serial_number: str = None,
):
target = tvm.target.target.micro(model)
runtime = Runtime("crt", {"system-lib": True})
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.build(sched, arg_bufs, target=target, runtime=runtime, name=op_name)
return _make_session(model, arduino_board, workspace_dir, mod, build_config, serial_number)
def _make_add_sess(model, arduino_board, workspace_dir, build_config, serial_number: str = None):
A = tvm.te.placeholder((2,), dtype="int8")
B = tvm.te.placeholder((1,), dtype="int8")
C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
sched = tvm.te.create_schedule(C.op)
return _make_sess_from_op(
model,
arduino_board,
workspace_dir,
"add",
sched,
[A, B, C],
build_config,
serial_number,
)
# The same test code can be executed on both the QEMU simulation and on real hardware.
@tvm.testing.requires_micro
@pytest.mark.requires_hardware
def test_compile_runtime(board, microtvm_debug, workspace_dir, serial_number):
"""Test compiling the on-device runtime."""
model = test_utils.ARDUINO_BOARDS[board]
build_config = {"debug": microtvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device)
assert (A_data.numpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device)
assert (B_data.numpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
system_lib.get_function("add")(A_data, B_data, C_data)
assert (C_data.numpy() == np.array([6, 7])).all()
with _make_add_sess(model, board, workspace_dir, build_config, serial_number) as sess:
test_basic_add(sess)
@tvm.testing.requires_micro
@pytest.mark.requires_hardware
def test_platform_timer(board, microtvm_debug, workspace_dir, serial_number):
"""Test compiling the on-device runtime."""
model = test_utils.ARDUINO_BOARDS[board]
build_config = {"debug": microtvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device)
assert (A_data.numpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device)
assert (B_data.numpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
time_eval_f = system_lib.time_evaluator(
"add", sess.device, number=20, repeat=3, min_repeat_ms=40
)
result = time_eval_f(A_data, B_data, C_data)
assert (C_data.numpy() == np.array([6, 7])).all()
assert result.mean > 0
assert len(result.results) == 3
with _make_add_sess(model, board, workspace_dir, build_config, serial_number) as sess:
test_basic_add(sess)
@tvm.testing.requires_micro
@pytest.mark.requires_hardware
def test_relay(board, microtvm_debug, workspace_dir, serial_number):
"""Testing a simple relay graph"""
model = test_utils.ARDUINO_BOARDS[board]
build_config = {"debug": microtvm_debug}
shape = (10,)
dtype = "int8"
# Construct Relay program.
x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
xx = relay.multiply(x, x)
z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))
func = relay.Function([x], z)
target = tvm.target.target.micro(model)
runtime = Runtime("crt", {"system-lib": True})
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(func, target=target, runtime=runtime)
with _make_session(model, board, workspace_dir, mod, build_config, serial_number) as session:
graph_mod = tvm.micro.create_local_graph_executor(
mod.get_graph_json(), session.get_system_lib(), session.device
)
graph_mod.set_input(**mod.get_params())
x_in = np.random.randint(10, size=shape[0], dtype=dtype)
graph_mod.run(x=x_in)
result = graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(graph_mod.get_input(0).numpy(), x_in)
tvm.testing.assert_allclose(result, x_in * x_in + 1)
@tvm.testing.requires_micro
@pytest.mark.requires_hardware
def test_onnx(board, microtvm_debug, workspace_dir, serial_number):
"""Testing a simple ONNX model."""
model = test_utils.ARDUINO_BOARDS[board]
build_config = {"debug": microtvm_debug}
# Load test images.
this_dir = pathlib.Path(__file__).parent
mnist_testdata = this_dir.parent / "testdata" / "mnist"
digit_2 = Image.open(mnist_testdata / "digit-2.jpg").resize((28, 28))
digit_2 = np.asarray(digit_2).astype("float32")
digit_2 = np.expand_dims(digit_2, axis=0)
digit_9 = Image.open(mnist_testdata / "digit-9.jpg").resize((28, 28))
digit_9 = np.asarray(digit_9).astype("float32")
digit_9 = np.expand_dims(digit_9, axis=0)
# Load ONNX model and convert to Relay.
onnx_model = onnx.load(mnist_testdata / "mnist-8.onnx")
shape = {"Input3": (1, 1, 28, 28)}
relay_mod, params = relay.frontend.from_onnx(onnx_model, shape=shape, freeze_params=True)
relay_mod = relay.transform.DynamicToStatic()(relay_mod)
target = tvm.target.target.micro(model)
runtime = Runtime("crt", {"system-lib": True})
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
executor = Executor("graph", {"link-params": True})
lowered = relay.build(relay_mod, target, params=params, executor=executor, runtime=runtime)
graph = lowered.get_graph_json()
with _make_session(
model, board, workspace_dir, lowered, build_config, serial_number
) as session:
graph_mod = tvm.micro.create_local_graph_executor(
graph, session.get_system_lib(), session.device
)
# Send the digit-2 image and confirm that the correct result is returned.
graph_mod.set_input("Input3", tvm.nd.array(digit_2))
graph_mod.run()
result = graph_mod.get_output(0).numpy()
print(result)
assert np.argmax(result) == 2
# Send the digit-9 image and confirm that the correct result is returned.
graph_mod.set_input("Input3", tvm.nd.array(digit_9))
graph_mod.run()
result = graph_mod.get_output(0).numpy()
assert np.argmax(result) == 9
def check_result(
relay_mod,
model,
arduino_board,
workspace_dir,
map_inputs,
out_shape,
result,
build_config,
serial_number,
):
"""Helper function to verify results"""
TOL = 1e-5
target = tvm.target.target.micro(model)
runtime = Runtime("crt", {"system-lib": True})
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(relay_mod, target=target, runtime=runtime)
with _make_session(
model, arduino_board, workspace_dir, mod, build_config, serial_number
) as session:
rt_mod = tvm.micro.create_local_graph_executor(
mod.get_graph_json(), session.get_system_lib(), session.device
)
rt_mod.set_input(**mod.get_params())
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**mod.get_params())
rt_mod.run()
out_shapes = out_shape if isinstance(out_shape, list) else [out_shape]
results = result if isinstance(result, list) else [result]
for idx, shape in enumerate(out_shapes):
out = tvm.nd.empty(shape, device=session.device)
out = rt_mod.get_output(idx, out)
tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=TOL, atol=TOL)
@tvm.testing.requires_micro
@pytest.mark.requires_hardware
def test_byoc_microtvm(board, microtvm_debug, workspace_dir, serial_number):
"""This is a simple test case to check BYOC capabilities of microTVM"""
model = test_utils.ARDUINO_BOARDS[board]
build_config = {"debug": microtvm_debug}
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
w3 = relay.var("w3", shape=(10, 10))
w4 = relay.var("w4", shape=(10, 10))
w5 = relay.var("w5", shape=(10, 10))
w6 = relay.var("w6", shape=(10, 10))
w7 = relay.var("w7", shape=(10, 10))
# C compiler
z0 = relay.add(x, w0)
p0 = relay.subtract(z0, w1)
q0 = relay.multiply(p0, w2)
z1 = relay.add(x, w3)
p1 = relay.subtract(z1, w4)
q1 = relay.multiply(p1, w5)
# Other parts on TVM
z2 = relay.add(x, w6)
q2 = relay.subtract(z2, w7)
r = relay.concatenate((q0, q1, q2), axis=0)
f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
mod = tvm.IRModule()
ann = byoc.CcompilerAnnotator()
mod["main"] = ann.visit(f)
mod = tvm.relay.transform.PartitionGraph()(mod)
mod = tvm.relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = {"w{}".format(i): w_data[i] for i in range(8)}
map_inputs["x"] = x_data
check_result(
relay_mod=mod,
map_inputs=map_inputs,
out_shape=(30, 10),
result=np.concatenate(
(
((x_data + w_data[0]) - w_data[1]) * w_data[2],
((x_data + w_data[3]) - w_data[4]) * w_data[5],
x_data + w_data[6] - w_data[7],
),
axis=0,
),
model=model,
build_config=build_config,
arduino_board=board,
workspace_dir=workspace_dir,
serial_number=serial_number,
)
def _make_add_sess_with_shape(
model,
arduino_board,
workspace_dir,
shape,
build_config,
serial_number: str = None,
):
A = tvm.te.placeholder(shape, dtype="int8")
C = tvm.te.compute(A.shape, lambda i: A[i] + A[i], name="C")
sched = tvm.te.create_schedule(C.op)
return _make_sess_from_op(
model,
arduino_board,
workspace_dir,
"add",
sched,
[A, C],
build_config,
serial_number,
)
@pytest.mark.parametrize(
"shape,",
[
pytest.param((1 * 1024,), id="(1*1024)"),
pytest.param((4 * 1024,), id="(4*1024)"),
pytest.param((16 * 1024,), id="(16*1024)"),
],
)
@tvm.testing.requires_micro
@pytest.mark.requires_hardware
def test_rpc_large_array(board, microtvm_debug, workspace_dir, shape, serial_number):
"""Test large RPC array transfer."""
model = test_utils.ARDUINO_BOARDS[board]
build_config = {"debug": microtvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_tensors(sess):
a_np = np.random.randint(low=-128, high=127, size=shape, dtype="int8")
A_data = tvm.nd.array(a_np, device=sess.device)
assert (A_data.numpy() == a_np).all()
C_data = tvm.nd.array(np.zeros(shape, dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.zeros(shape)).all()
with _make_add_sess_with_shape(
model, board, workspace_dir, shape, build_config, serial_number
) as sess:
test_tensors(sess)
if __name__ == "__main__":
tvm.testing.main()
| 13,826 | 33.654135 | 99 | py |
tvm | tvm-main/tests/micro/arduino/test_arduino_workflow.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pathlib
import re
import shutil
import pytest
import tvm.testing
import test_utils
"""
This unit test simulates a simple user workflow, where we:
1. Generate a base sketch using a simple audio model
2. Modify the .ino file, much like a user would
3. Compile the sketch for the target board
-- If physical hardware is present --
4. Upload the sketch to a connected board
5. Open a serial connection to the board
6. Use serial connection to ensure model behaves correctly
"""
# Since these tests are sequential, we'll use the same project/workspace
# directory for all tests in this file. Note that --board can't be loaded
# from the fixture, since the fixture is function scoped (it has to be
# for the tests to be named correctly via parameterization).
@pytest.fixture(scope="module")
def workflow_workspace_dir(request):
board = request.config.getoption("--board")
return test_utils.make_workspace_dir("arduino_workflow", board)
@pytest.fixture(scope="module")
def project_dir(workflow_workspace_dir):
return workflow_workspace_dir / "project"
# We MUST pass workspace_dir, not project_dir, or the workspace will be dereferenced
# too soon. We can't use the board fixture either for the reason mentioned above.
@pytest.fixture(scope="module")
def project(request, microtvm_debug, workflow_workspace_dir):
board = request.config.getoption("--board")
serial_number = request.config.getoption("--serial-number")
return test_utils.make_kws_project(board, microtvm_debug, workflow_workspace_dir, serial_number)
def _get_directory_elements(directory):
return set(f.name for f in directory.iterdir())
def test_project_folder_structure(project_dir, project):
assert set(["microtvm_api_server.py", "project.ino", "src"]).issubset(
_get_directory_elements(project_dir)
)
source_dir = project_dir / "src"
assert _get_directory_elements(source_dir) == set(
["model", "standalone_crt", "platform.c", "platform.h"]
)
def test_project_model_integrity(project_dir, project):
model_dir = project_dir / "src" / "model"
assert _get_directory_elements(model_dir) == set(
["default_lib0.c", "default_lib1.c", "default_lib2.c", "model.tar"]
)
def test_model_platform_templating(project_dir, project):
# Ensure platform.c was templated with correct TVM_WORKSPACE_SIZE_BYTES
with (project_dir / "src" / "platform.c").open() as f:
platform_c = f.read()
workspace_size_defs = re.findall(r"\#define TVM_WORKSPACE_SIZE_BYTES ([0-9]*)", platform_c)
assert workspace_size_defs
assert len(workspace_size_defs) == 1
# Make sure the TVM_WORKSPACE_SIZE_BYTES we define is a reasonable size. We don't want
# to set an exact value, as this test shouldn't break if an improvement to
# TVM causes the amount of memory needed to decrease.
workspace_size = int(workspace_size_defs[0])
assert workspace_size < 30000
assert workspace_size > 9000
def test_import_rerouting(project_dir, project):
# Check one file to ensure imports were rerouted
runtime_path = project_dir / "src" / "standalone_crt" / "src" / "runtime"
c_backend_api_path = runtime_path / "crt" / "common" / "crt_backend_api.c"
assert c_backend_api_path.exists()
with c_backend_api_path.open() as f:
c_backend_api_c = f.read()
assert '#include "inttypes.h"' in c_backend_api_c
assert "include/tvm/runtime/crt/platform.h" in c_backend_api_c
# Build on top of the generated project by replacing the
# top-level .ino fileand adding data input files, much
# like a user would
@pytest.fixture(scope="module")
def modified_project(project_dir, project):
this_dir = pathlib.Path(__file__).parent
kws_testdata_dir = this_dir.parent / "testdata" / "kws"
arduino_testdata_dir = this_dir / "testdata"
shutil.copy2(arduino_testdata_dir / "project.ino", project_dir / "project.ino")
project_data_dir = project_dir / "src" / "data"
project_data_dir.mkdir()
for sample in ["yes.c", "no.c", "silence.c", "unknown.c"]:
shutil.copy2(kws_testdata_dir / sample, project_data_dir / sample)
return project
@pytest.fixture(scope="module")
def compiled_project(modified_project):
modified_project.build()
return modified_project
def test_compile_yes_no_project(project_dir, project, compiled_project):
build_dir = project_dir / "build"
assert build_dir.exists()
first_build_file = next(build_dir.iterdir(), None)
assert first_build_file is not None
"""------------------------------------------------------------
If we're not running on real hardware, no further tests are run
------------------------------------------------------------"""
@pytest.fixture(scope="module")
def uploaded_project(compiled_project):
compiled_project.flash()
return compiled_project
""" Sample serial output:
category,runtime,yes,no,silence,unknown
yes,56762,115,-123,-125,-123,
no,56762,-128,4,-123,-9,
silence,56792,-128,-118,107,-117,
unknown,56792,-128,-125,-128,125,
"""
SERIAL_OUTPUT_HEADERS = "category,runtime,yes,no,silence,unknown"
@pytest.fixture(scope="module")
def serial_output(uploaded_project):
transport = uploaded_project.transport()
transport.open()
out = transport.read(2048, 60)
out_str = out.decode("utf-8")
out_lines = out_str.split("\r\n")
assert SERIAL_OUTPUT_HEADERS in out_lines
headers_index = out_lines.index(SERIAL_OUTPUT_HEADERS)
data_lines = out_lines[headers_index + 1 : headers_index + 5]
split_lines = [line.split(",") for line in data_lines]
return [[line[0]] + list(map(int, line[1:6])) for line in split_lines]
TENSORFLOW_EVALUATIONS = {
"yes": [115, -123, -125, -123],
"no": [-128, 4, -123, -9],
"silence": [-128, -118, 107, -117],
"unknown": [-128, -125, -128, 125],
}
MAX_PREDICTION_DIFFERENCE = 2
@pytest.mark.requires_hardware
def test_project_inference_correctness(serial_output):
predictions = {line[0]: line[2:] for line in serial_output}
for sample, prediction in predictions.items():
# Due to rounding issues, we don't get the *exact* same
# values as Tensorflow gives, but they're pretty close
reference_prediction = TENSORFLOW_EVALUATIONS[sample]
deltas = [prediction[i] - reference_prediction[i] for i in range(4)]
assert max(deltas) < MAX_PREDICTION_DIFFERENCE
MAX_INFERENCE_TIME_US = 200 * 1000
MAX_INFERENCE_TIME_RANGE_US = 1000
@pytest.mark.requires_hardware
def test_project_inference_runtime(serial_output):
runtimes_us = [line[1] for line in serial_output]
# Inference time will vary based on architecture
# and clock speed. However, anything more than 200 ms
# is way too long. Each inference takes ~60 ms on the
# Sony spresense, running at 156 MHz
assert max(runtimes_us) < MAX_INFERENCE_TIME_US
# Clock speeds should be consistent for each input. On
# the Sony spresense, they vary by <100 us. Note that
# running with other attached hardware (like the
# Spresense extension board) may cause this check to fail
range_runtimes_us = max(runtimes_us) - min(runtimes_us)
assert range_runtimes_us < MAX_INFERENCE_TIME_RANGE_US
if __name__ == "__main__":
tvm.testing.main()
| 8,101 | 34.535088 | 100 | py |
tvm | tvm-main/tests/micro/arduino/test_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import pathlib
import requests
import datetime
import tvm.micro
import tvm.target.target
from tvm.micro import project
from tvm import relay
from tvm.relay.backend import Executor, Runtime
from tvm.testing.utils import fetch_model_from_url
TEMPLATE_PROJECT_DIR = pathlib.Path(tvm.micro.get_microtvm_template_projects("arduino"))
BOARDS = TEMPLATE_PROJECT_DIR / "boards.json"
def arduino_boards() -> dict:
"""Returns a dict mapping board to target model"""
with open(BOARDS) as f:
board_properties = json.load(f)
boards_model = {board: info["model"] for board, info in board_properties.items()}
return boards_model
ARDUINO_BOARDS = arduino_boards()
def make_workspace_dir(test_name, board):
filepath = pathlib.Path(__file__)
board_workspace = (
filepath.parent
/ f"workspace_{test_name}_{board}"
/ datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
)
number = 0
while board_workspace.exists():
number += 1
board_workspace = pathlib.Path(str(board_workspace) + f"-{number}")
board_workspace.parent.mkdir(exist_ok=True, parents=True)
t = tvm.contrib.utils.tempdir(board_workspace)
return t
def make_kws_project(board, microtvm_debug, workspace_dir, serial_number: str):
this_dir = pathlib.Path(__file__).parent
model = ARDUINO_BOARDS[board]
build_config = {"debug": microtvm_debug}
mod, params = fetch_model_from_url(
url="https://github.com/tensorflow/tflite-micro/raw/a56087ffa2703b4d5632f024a8a4c899815c31bb/tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite",
model_format="tflite",
sha256="09e5e2a9dfb2d8ed78802bf18ce297bff54281a66ca18e0c23d69ca14f822a83",
)
target = tvm.target.target.micro(model)
runtime = Runtime("crt")
executor = Executor("aot", {"unpacked-api": True})
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = relay.build(mod, target, runtime=runtime, executor=executor, params=params)
return tvm.micro.generate_project(
str(TEMPLATE_PROJECT_DIR),
mod,
workspace_dir / "project",
{
"board": board,
"project_type": "example_project",
"verbose": bool(build_config.get("debug")),
"serial_number": serial_number,
},
)
| 3,163 | 33.021505 | 166 | py |
tvm | tvm-main/tests/micro/common/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
pytest_plugins = [
"tvm.micro.testing.pytest_plugin",
]
| 825 | 40.3 | 62 | py |
tvm | tvm-main/tests/micro/common/test_autotune.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from io import StringIO
import json
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.micro.testing
from tvm.testing.utils import fetch_model_from_url
TUNING_RUNS_PER_OPERATOR = 2
@pytest.mark.requires_hardware
@tvm.testing.requires_micro
@pytest.mark.skip_boards(
["nucleo_l4r5zi", "", "nucleo_f746zg", "stm32f746g_disco", "nrf5340dk_nrf5340_cpuapp"]
)
def test_kws_autotune_workflow(platform, board, tmp_path):
mod, params = fetch_model_from_url(
url="https://github.com/tensorflow/tflite-micro/raw/a56087ffa2703b4d5632f024a8a4c899815c31bb/tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite",
model_format="tflite",
sha256="09e5e2a9dfb2d8ed78802bf18ce297bff54281a66ca18e0c23d69ca14f822a83",
)
target = tvm.micro.testing.get_target(platform, board)
str_io_logs = tvm.micro.testing.tune_model(
platform, board, target, mod, params, TUNING_RUNS_PER_OPERATOR
)
assert isinstance(str_io_logs, StringIO)
str_logs = str_io_logs.getvalue().rstrip().split("\n")
logs = list(map(json.loads, str_logs))
# Some tuning tasks don't have any config space, and will only be run once
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target)
assert len(tasks) <= len(logs) <= len(tasks) * TUNING_RUNS_PER_OPERATOR
# Check we tested both operators
op_names = list(map(lambda x: x["input"][1], logs))
assert op_names[0] == op_names[1] == "conv2d_nhwc_spatial_pack.arm_cpu"
# Make sure we tested different code. != does deep comparison in Python 3
assert logs[0]["config"]["index"] != logs[1]["config"]["index"]
assert logs[0]["config"]["entity"] != logs[1]["config"]["entity"]
# Compile the best model with AOT and connect to it
str_io_logs.seek(0)
with tvm.micro.testing.create_aot_session(
platform,
board,
target,
mod,
params,
build_dir=tmp_path,
tune_logs=str_io_logs,
) as session:
aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor())
samples = (
np.random.randint(low=-127, high=128, size=(1, 1960), dtype=np.int8) for x in range(3)
)
# Validate perforance across random runs
runtimes = [
runtime
for _, runtime in tvm.micro.testing.predict_labels_aot(
session, aot_executor, samples, runs_per_sample=20
)
]
# `time` is the average time taken to execute model inference on the
# device, measured in seconds. It does not include the time to upload
# the input data via RPC. On slow boards like the Arduino Due, time
# is around 0.12 (120 ms), so this gives us plenty of buffer.
assert np.median(runtimes) < 1
if __name__ == "__main__":
tvm.testing.main()
| 3,756 | 36.949495 | 166 | py |
tvm | tvm-main/tests/micro/common/test_tvmc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import subprocess
import shlex
import sys
import logging
import tempfile
import pathlib
import sys
import os
import shutil
import tvm
import tvm.testing
from tvm.contrib.download import download_testdata
TVMC_COMMAND = [sys.executable, "-m", "tvm.driver.tvmc"]
MODEL_URL = "https://github.com/tensorflow/tflite-micro/raw/a56087ffa2703b4d5632f024a8a4c899815c31bb/tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite"
MODEL_FILE = "micro_speech.tflite"
executor = tvm.testing.parameter("aot", "graph")
use_local_path = tvm.testing.parameter(True, False)
# TODO(mehrdadh): replace this with _main from tvm.driver.tvmc.main
# Issue: https://github.com/apache/tvm/issues/9612
def _run_tvmc(cmd_args: list, *args, **kwargs):
"""Run a tvmc command and return the results"""
cmd_args_list = TVMC_COMMAND + cmd_args
cwd_str = "" if "cwd" not in kwargs else f" (in cwd: {kwargs['cwd']})"
logging.debug("run%s: %s", cwd_str, " ".join(shlex.quote(a) for a in cmd_args_list))
return subprocess.check_call(cmd_args_list, *args, **kwargs)
def create_project_command(project_path: str, mlf_path: str, platform: str, board: str) -> list:
"""Returns create project command with tvmc micro."""
cmd = [
"micro",
"create-project",
project_path,
mlf_path,
platform,
"--project-option",
"project_type=host_driven",
f"board={board}",
]
if platform == "zephyr":
# TODO: 4096 is driven by experiment on nucleo_l4r5zi. We should cleanup this after we have
# better memory management.
cmd.append("config_main_stack_size=4096")
return cmd
def compile_command(
model_path: str, target: tvm.target.Target, tar_path: pathlib.Path, executor: str
):
runtime = "crt"
cmd = [
"compile",
model_path,
f"--target={target}",
f"--runtime={runtime}",
f"--runtime-crt-system-lib",
str(1),
f"--executor={executor}",
]
if executor == "graph":
cmd += [
"--executor-graph-link-params",
str(0),
]
cmd += [
"--output",
str(tar_path),
"--output-format",
"mlf",
"--pass-config",
"tir.disable_vectorize=1",
]
if executor == "graph":
cmd += ["--disabled-pass=AlterOpLayout"]
cmd_str = ""
for item in cmd:
cmd_str += item
cmd_str += " "
return cmd
def get_workspace_dir(use_local_path: bool) -> pathlib.Path:
if use_local_path:
out_dir_temp = pathlib.Path(os.path.abspath("./tvmc_relative_path_test"))
if os.path.isdir(out_dir_temp):
shutil.rmtree(out_dir_temp)
os.mkdir(out_dir_temp)
else:
out_dir_temp = tvm.contrib.utils.tempdir()
return out_dir_temp
@tvm.testing.requires_micro
def test_tvmc_exist(platform, board):
cmd_result = _run_tvmc(["micro", "-h"])
assert cmd_result == 0
@tvm.testing.requires_micro
def test_tvmc_model_build_only(platform, board, executor, use_local_path):
target = tvm.micro.testing.get_target(platform, board)
output_dir = get_workspace_dir(use_local_path)
model_path = download_testdata(MODEL_URL, MODEL_FILE, module="model")
tar_path = str(output_dir / "model.tar")
project_dir = str(output_dir / "project")
cmd_result = _run_tvmc(compile_command(model_path, target, tar_path, executor))
assert cmd_result == 0, "tvmc failed in step: compile"
cmd_result = _run_tvmc(create_project_command(project_dir, tar_path, platform, board))
assert cmd_result == 0, "tvmc micro failed in step: create-project"
build_cmd = ["micro", "build", project_dir, platform]
cmd_result = _run_tvmc(build_cmd)
assert cmd_result == 0, "tvmc micro failed in step: build"
if use_local_path:
shutil.rmtree(output_dir)
@pytest.mark.skip("Flaky, https://github.com/apache/tvm/issues/14004")
@pytest.mark.requires_hardware
@tvm.testing.requires_micro
@pytest.mark.skip_boards(
["nucleo_l4r5zi", "nucleo_f746zg", "stm32f746g_disco", "nrf5340dk_nrf5340_cpuapp"]
)
def test_tvmc_model_run(platform, board, executor, use_local_path):
target = tvm.micro.testing.get_target(platform, board)
output_dir = get_workspace_dir(use_local_path)
model_path = model_path = download_testdata(MODEL_URL, MODEL_FILE, module="data")
tar_path = str(output_dir / "model.tar")
project_dir = str(output_dir / "project")
cmd_result = _run_tvmc(compile_command(model_path, target, tar_path, executor))
assert cmd_result == 0, "tvmc failed in step: compile"
cmd_result = _run_tvmc(create_project_command(project_dir, tar_path, platform, board))
assert cmd_result == 0, "tvmc micro failed in step: create-project"
build_cmd = ["micro", "build", project_dir, platform]
cmd_result = _run_tvmc(build_cmd)
assert cmd_result == 0, "tvmc micro failed in step: build"
flash_cmd = ["micro", "flash", project_dir, platform]
cmd_result = _run_tvmc(flash_cmd)
assert cmd_result == 0, "tvmc micro failed in step: flash"
run_cmd = [
"run",
"--device",
"micro",
project_dir,
]
run_cmd += ["--fill-mode", "random"]
cmd_result = _run_tvmc(run_cmd)
assert cmd_result == 0, "tvmc micro failed in step: run"
if use_local_path:
shutil.rmtree(output_dir)
if __name__ == "__main__":
tvm.testing.main()
| 6,245 | 31.030769 | 165 | py |
tvm | tvm-main/tests/micro/common/test_mlperftiny.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import logging
from urllib.parse import urlparse
import struct
import pytest
import tensorflow as tf
import numpy as np
import tarfile
import tempfile
import pathlib
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.backend import Executor, Runtime
from tvm.micro.project_api import server
from tvm.contrib.download import download_testdata
from tvm.micro import export_model_library_format
from tvm.micro.model_library_format import generate_c_interface_header
from tvm.micro.testing import create_aot_session, predict_labels_aot
from tvm.micro.testing.utils import (
create_header_file,
mlf_extract_workspace_size_bytes,
)
from tvm.micro.testing.utils import aot_transport_find_message
MLPERF_TINY_MODELS = {
"kws": {
"name": "Keyword Spotting",
"index": 1,
"url": "https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/keyword_spotting/trained_models/kws_ref_model.tflite",
"sample": "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy",
"sample_label": 6,
},
"vww": {
"name": "Visual Wake Words",
"index": 2,
"url": "https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite",
"sample": "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/visual_wake_word_int8_1.npy",
"sample_label": 1,
},
# Note: The reason we use quantized model with float32 I/O is
# that TVM does not handle the int8 I/O correctly and accuracy
# would drop significantly.
"ad": {
"name": "Anomaly Detection",
"index": 3,
"url": "https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/anomaly_detection/trained_models/ToyCar/baseline_tf23/model/model_ToyCar_quant_fullint_micro.tflite",
"sample": "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/anomaly_detection_normal_id_01.npy",
# This model takes in a (1, 640) vector, so it must be called 40 times -
# once for each time slice.
},
"ic": {
"name": "Image Classification",
"index": 4,
"url": "https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/image_classification/trained_models/pretrainedResnet_quant.tflite",
"sample": "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/image_classification_int8_0.npy",
"sample_label": 0,
},
}
MLPERFTINY_READY_MSG = "m-ready"
MLPERFTINY_RESULT_MSG = "m-results"
MLPERFTINY_NAME_MSG = "m-name"
def mlperftiny_get_module(model_name: str):
model_url = MLPERF_TINY_MODELS[model_name]["url"]
url = urlparse(model_url)
model_path = download_testdata(model_url, os.path.basename(url.path), module="model")
tflite_model_buf = open(model_path, "rb").read()
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
interpreter = tf.lite.Interpreter(model_path=str(model_path))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
model_info = {
"input_name": input_details[0]["name"],
"input_shape": tuple(input_details[0]["shape"]),
"input_dtype": np.dtype(input_details[0]["dtype"]).name,
"output_name": output_details[0]["name"],
"output_shape": tuple(output_details[0]["shape"]),
"output_dtype": np.dtype(output_details[0]["dtype"]).name,
}
if model_name != "ad":
model_info["quant_output_scale"] = output_details[0]["quantization_parameters"]["scales"][0]
model_info["quant_output_zero_point"] = output_details[0]["quantization_parameters"][
"zero_points"
][0]
relay_mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={model_info["input_name"]: model_info["input_shape"]},
dtype_dict={model_info["input_name"]: model_info["input_dtype"]},
)
return relay_mod, params, model_info
def get_test_data(model_name: str, project_type: str) -> list:
sample_url = MLPERF_TINY_MODELS[model_name]["sample"]
url = urlparse(sample_url)
sample_path = download_testdata(sample_url, os.path.basename(url.path), module="data")
sample = np.load(sample_path)
if project_type == "mlperftiny" and model_name != "ad":
sample = sample.astype(np.uint8)
sample_label = None
if "sample_label" in MLPERF_TINY_MODELS[model_name].keys():
sample_label = MLPERF_TINY_MODELS[model_name]["sample_label"]
return [sample], [sample_label]
def predict_ad_labels_aot(session, aot_executor, input_data, runs_per_sample=1):
"""A special version of tvm/micro/testing/evaluation.py's predict_labels_aot.
The runtime returned for each sample is the median of the runtimes for all slices
in that sample."""
assert runs_per_sample > 0
assert aot_executor.get_num_inputs() == 1
assert aot_executor.get_num_outputs() == 1
sample_counter = 0
for sample in input_data:
output_fp32 = np.empty_like(sample)
slice_runtimes = []
for i, time_slice in enumerate(sample):
aot_executor.get_input(0).copyfrom(time_slice.reshape((1, 640)))
result = aot_executor.module.time_evaluator(
"run", session.device, number=runs_per_sample
)()
slice_runtimes.append(result.mean)
output_fp32[i, :] = aot_executor.get_output(0).numpy()
sample_counter += 1
errors = np.mean(np.square(sample - output_fp32), axis=1)
yield np.mean(errors), np.median(slice_runtimes)
def _mlperftiny_get_name(device_transport) -> str:
"""Get device name."""
device_transport.write(b"name%", timeout_sec=5)
name_message = aot_transport_find_message(device_transport, MLPERFTINY_NAME_MSG, timeout_sec=5)
m = re.search(r"\[([A-Za-z0-9_]+)\]", name_message)
return m.group(1)
def _mlperftiny_infer(transport, warmup: int, infer: int, timeout: int):
"""Send MLPerfTiny infer command."""
cmd = f"infer {warmup} {infer}%".encode("UTF-8")
transport.write(cmd, timeout_sec=timeout)
def _mlperftiny_write_sample(device_transport, data: list, timeout: int):
"""Write a sample with MLPerfTiny compatible format."""
cmd = f"db load {len(data)}%".encode("UTF-8")
logging.debug(f"transport write: {cmd}")
device_transport.write(cmd, timeout)
aot_transport_find_message(device_transport, MLPERFTINY_READY_MSG, timeout_sec=timeout)
for item in data:
if isinstance(item, float):
ba = bytearray(struct.pack("<f", item))
hex_array = ["%02x" % b for b in ba]
else:
hex_val = format(item, "x")
# make sure hex value is in HH format
if len(hex_val) < 2:
hex_val = "0" + hex_val
elif len(hex_val) > 2:
raise ValueError(f"Hex value not in HH format: {hex_val}")
hex_array = [hex_val]
for hex_val in hex_array:
cmd = f"db {hex_val}%".encode("UTF-8")
logging.debug(f"transport write: {cmd}")
device_transport.write(cmd, timeout)
aot_transport_find_message(device_transport, MLPERFTINY_READY_MSG, timeout_sec=timeout)
def _mlperftiny_test_dataset(device_transport, dataset, timeout):
"""Run test dataset compatible with MLPerfTiny format."""
num_correct = 0
total = 0
samples, labels = dataset
i_counter = 0
for sample in samples:
label = labels[i_counter]
logging.info(f"Writing Sample {i_counter}")
_mlperftiny_write_sample(device_transport, sample.flatten().tolist(), timeout)
_mlperftiny_infer(device_transport, 1, 0, timeout)
results = aot_transport_find_message(
device_transport, MLPERFTINY_RESULT_MSG, timeout_sec=timeout
)
m = re.search(r"m\-results\-\[([A-Za-z0-9_,.]+)\]", results)
results = m.group(1).split(",")
results_val = [float(x) for x in results]
results_val = np.array(results_val)
if np.argmax(results_val) == label:
num_correct += 1
total += 1
i_counter += 1
return float(num_correct / total)
def _mlperftiny_test_dataset_ad(device_transport, dataset, timeout):
"""Run test dataset compatible with MLPerfTiny format for AD model."""
samples, _ = dataset
result_output = np.zeros(samples[0].shape[0])
for slice in range(0, 40):
_mlperftiny_write_sample(device_transport, samples[0][slice, :].flatten().tolist(), timeout)
_mlperftiny_infer(device_transport, 1, 0, timeout)
results = aot_transport_find_message(
device_transport, MLPERFTINY_RESULT_MSG, timeout_sec=timeout
)
m = re.search(r"m\-results\-\[([A-Za-z0-9_,.]+)\]", results)
results = m.group(1).split(",")
results_val = [float(x) for x in results]
result_output[slice] = np.array(results_val)
return np.average(result_output)
@pytest.mark.parametrize("model_name", ["kws", "vww", "ad", "ic"])
@pytest.mark.parametrize("project_type", ["host_driven", "mlperftiny"])
@tvm.testing.requires_micro
@pytest.mark.skip_boards(
["mps2_an521", "mps3_an547", "stm32f746g_disco", "nucleo_f746zg", "nrf5340dk_nrf5340_cpuapp"]
)
def test_mlperftiny_models(platform, board, workspace_dir, serial_number, model_name, project_type):
"""MLPerfTiny models test.
Testing MLPerfTiny models using host_driven project. In this case one input sample is used
to verify the end to end execution. Accuracy is not checked in this test.
Also, this test builds each model in standalone mode that can be used for MLPerfTiny submissions.
"""
if platform != "zephyr":
pytest.skip(reason="Other platforms are not supported yet.")
use_cmsis_nn = False
relay_mod, params, model_info = mlperftiny_get_module(model_name)
target = tvm.micro.testing.get_target(platform, board)
project_options = {"config_main_stack_size": 4000, "serial_number": serial_number}
if use_cmsis_nn:
project_options["cmsis_path"] = os.getenv("CMSIS_PATH")
if model_name == "ad":
predictor = predict_ad_labels_aot
else:
predictor = predict_labels_aot
samples, labels = get_test_data(model_name, project_type)
if project_type == "host_driven":
with create_aot_session(
platform,
board,
target,
relay_mod,
params,
build_dir=workspace_dir,
# The longest models take ~5 seconds to infer, but running them
# ten times (with NUM_TESTING_RUNS_PER_SAMPLE) makes that 50
timeout_override=server.TransportTimeouts(
session_start_retry_timeout_sec=300,
session_start_timeout_sec=150,
session_established_timeout_sec=150,
),
project_options=project_options,
use_cmsis_nn=use_cmsis_nn,
) as session:
aot_executor = tvm.runtime.executor.aot_executor.AotModule(
session.create_aot_executor()
)
args = {
"session": session,
"aot_executor": aot_executor,
"input_data": samples,
"runs_per_sample": 10,
}
predicted_labels, runtimes = zip(*predictor(**args))
avg_runtime = float(np.mean(runtimes) * 1000)
print(f"Model {model_name} average runtime: {avg_runtime}")
elif project_type == "mlperftiny":
runtime = Runtime("crt")
executor = Executor(
"aot", {"unpacked-api": True, "interface-api": "c", "workspace-byte-alignment": 8}
)
config = {"tir.disable_vectorize": True}
if use_cmsis_nn:
from tvm.relay.op.contrib import cmsisnn
config["relay.ext.cmsisnn.options"] = {"mcpu": target.mcpu}
relay_mod = cmsisnn.partition_for_cmsisnn(relay_mod, params, mcpu=target.mcpu)
with tvm.transform.PassContext(opt_level=3, config=config):
module = tvm.relay.build(
relay_mod, target=target, params=params, runtime=runtime, executor=executor
)
temp_dir = tvm.contrib.utils.tempdir()
model_tar_path = temp_dir / "model.tar"
export_model_library_format(module, model_tar_path)
workspace_size = mlf_extract_workspace_size_bytes(model_tar_path)
extra_tar_dir = tvm.contrib.utils.tempdir()
extra_tar_file = extra_tar_dir / "extra.tar"
with tarfile.open(extra_tar_file, "w:gz") as tf:
with tempfile.TemporaryDirectory() as tar_temp_dir:
model_files_path = os.path.join(tar_temp_dir, "include")
os.mkdir(model_files_path)
header_path = generate_c_interface_header(
module.libmod_name,
[model_info["input_name"]],
[model_info["output_name"]],
[],
{},
[],
0,
model_files_path,
{},
{},
)
tf.add(header_path, arcname=os.path.relpath(header_path, tar_temp_dir))
create_header_file(
"output_data",
np.zeros(
shape=model_info["output_shape"],
dtype=model_info["output_dtype"],
),
"include/tvm",
tf,
)
input_total_size = 1
input_shape = model_info["input_shape"]
for i in range(len(input_shape)):
input_total_size *= input_shape[i]
# float input
if model_name == "ad":
input_total_size *= 4
template_project_path = pathlib.Path(tvm.micro.get_microtvm_template_projects(platform))
project_options.update(
{
"extra_files_tar": str(extra_tar_file),
"project_type": project_type,
"board": board,
"compile_definitions": [
f"-DWORKSPACE_SIZE={workspace_size + 512}", # Memory workspace size, 512 is a temporary offset
# since the memory calculation is not accurate.
f'-DTARGET_MODEL={MLPERF_TINY_MODELS[model_name]["index"]}', # Sets the model index for project compilation.
f"-DTH_MODEL_VERSION=EE_MODEL_VERSION_{model_name.upper()}01", # Sets model version. This is required by MLPerfTiny API.
f"-DMAX_DB_INPUT_SIZE={input_total_size}", # Max size of the input data array.
],
}
)
if model_name != "ad":
project_options["compile_definitions"].append(
f'-DOUT_QUANT_SCALE={model_info["quant_output_scale"]}'
)
project_options["compile_definitions"].append(
f'-DOUT_QUANT_ZERO={model_info["quant_output_zero_point"]}'
)
project = tvm.micro.project.generate_project_from_mlf(
template_project_path, workspace_dir / "project", model_tar_path, project_options
)
project.build()
project.flash()
with project.transport() as transport:
aot_transport_find_message(transport, MLPERFTINY_READY_MSG, timeout_sec=200)
print(f"Testing {model_name} on {_mlperftiny_get_name(transport)}.")
assert _mlperftiny_get_name(transport) == "microTVM"
if model_name != "ad":
accuracy = _mlperftiny_test_dataset(transport, [samples, labels], 100)
print(f"Model {model_name} accuracy: {accuracy}")
else:
mean_error = _mlperftiny_test_dataset_ad(transport, [samples, None], 100)
print(
f"""Model {model_name} mean error: {mean_error}.
Note that this is not the final accuracy number.
To calculate that, you need to use sklearn.metrics.roc_auc_score function."""
)
if __name__ == "__main__":
tvm.testing.main()
| 17,396 | 40.03066 | 215 | py |
tvm | tvm-main/tests/micro/common/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
tvm | tvm-main/tests/micro/project_api/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import relay
from tvm.relay.backend import Runtime
from tvm.micro.testing import get_target
def build_project_api(platform: str):
"""Build a relay module with Project API."""
shape = (10,)
dtype = "int8"
x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
xx = relay.multiply(x, x)
z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))
func = relay.Function([x], z)
ir_mod = tvm.IRModule.from_expr(func)
if platform == "arduino":
board = "due"
elif platform == "zephyr":
board = "qemu_x86"
runtime = Runtime("crt", {"system-lib": True})
target = get_target(platform, board)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(ir_mod, target=target, runtime=runtime)
project_options = {
"project_type": "host_driven",
"board": board,
}
temp_dir = tvm.contrib.utils.tempdir()
project = tvm.micro.generate_project(
tvm.micro.get_microtvm_template_projects(platform),
mod,
temp_dir / "project",
project_options,
)
project.build()
| 1,984 | 32.644068 | 88 | py |
tvm | tvm-main/tests/micro/project_api/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test Project API in different platforms infrastructure."""
| 848 | 43.684211 | 62 | py |
tvm | tvm-main/tests/micro/project_api/test_project_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import tvm
from tvm import relay
from tvm.micro.project_api import server
from tvm.relay.backend import Runtime
from tvm.micro.testing import get_target
from tvm.relay.backend import Runtime
import tvm.micro.testing
from .utils import build_project_api
API_GENERATE_PROJECT = "generate_project"
API_BUILD = "build"
API_FLASH = "flash"
API_OPEN_TRANSPORT = "open_transport"
PLATFORM_ARDUINO = "arduino"
PLATFORM_ZEPHYR = "zephyr"
platform = tvm.testing.parameter(PLATFORM_ARDUINO, PLATFORM_ZEPHYR)
@tvm.testing.requires_micro
def test_default_options_exist(platform):
board = "qemu_x86" if platform == "zephyr" else "due"
x = relay.var("x", relay.TensorType(shape=(10,), dtype="int8"))
xx = relay.multiply(x, x)
z = relay.add(xx, relay.const(np.ones(shape=(10,), dtype="int8")))
func = relay.Function([x], z)
ir_mod = tvm.IRModule.from_expr(func)
with tvm.transform.PassContext(opt_level=3):
mod = tvm.relay.build(
ir_mod, target=tvm.micro.testing.get_target("crt"), runtime=Runtime("crt")
)
temp_dir = tvm.contrib.utils.tempdir()
project = tvm.micro.generate_project(
str(tvm.micro.get_microtvm_template_projects(platform)),
mod,
temp_dir / "project",
{
"board": board,
"project_type": "host_driven",
},
)
platform_options = project._info["project_options"]
default_options = server.default_project_options()
option_names = []
for option in platform_options:
option_names.append(option["name"])
for option in default_options:
assert option.name in option_names
@tvm.testing.requires_micro
def test_project_minimal_options(platform):
"""Test template project with minimum projectOptions"""
build_project_api(platform)
if __name__ == "__main__":
tvm.testing.main()
| 2,677 | 29.431818 | 86 | py |
tvm | tvm-main/tests/micro/project_api/test_zephyr_microtvm_api_server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from .utils import build_project_api
@tvm.testing.requires_micro
def test_option_cmsis_path():
"""Test project API without CMSIS_PATH environment variable."""
cmsis_path = os.environ.get("CMSIS_PATH", None)
del os.environ["CMSIS_PATH"]
build_project_api("zephyr")
os.environ["CMSIS_PATH"] = cmsis_path
| 1,133 | 34.4375 | 67 | py |
tvm | tvm-main/tests/micro/zephyr/test_zephyr.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import pathlib
import logging
import pytest
import numpy as np
import onnx
from PIL import Image
import tvm
import tvm.testing
import tvm.relay as relay
from tvm.relay.backend import Executor, Runtime
from tvm.relay.testing import byoc
from tvm.micro.project_api import server
from tvm.contrib import utils
from tvm.micro.testing.utils import check_tune_log
from . import utils
_LOG = logging.getLogger(__name__)
def _make_sess_from_op(
temp_dir,
board,
op_name,
sched,
arg_bufs,
build_config,
use_fvp,
serial_number,
):
runtime = Runtime("crt", {"system-lib": True})
target = tvm.micro.testing.get_target("zephyr", board)
target = tvm.target.Target(target=target, host=target)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.build(sched, arg_bufs, target=target, runtime=runtime, name=op_name)
return _make_session(temp_dir, board, mod, build_config, use_fvp, serial_number)
def _make_session(temp_dir, board, mod, build_config, use_fvp, serial_number):
config_main_stack_size = None
if utils.ZEPHYR_BOARDS[board]["is_qemu"]:
config_main_stack_size = 1536
project_options = {
"project_type": "host_driven",
"verbose": bool(build_config.get("debug")),
"board": board,
"arm_fvp_path": "/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55",
"use_fvp": bool(use_fvp),
"serial_number": serial_number,
}
if config_main_stack_size is not None:
project_options["config_main_stack_size"] = config_main_stack_size
project = tvm.micro.generate_project(
str(utils.TEMPLATE_PROJECT_DIR),
mod,
temp_dir / "project",
project_options,
)
project.build()
project.flash()
return tvm.micro.Session(project.transport())
def _make_add_sess(temp_dir, board, build_config, use_fvp, serial_number, dtype="int8"):
A = tvm.te.placeholder((2,), dtype=dtype)
B = tvm.te.placeholder((1,), dtype=dtype)
C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C")
sched = tvm.te.create_schedule(C.op)
return _make_sess_from_op(
temp_dir,
board,
"add",
sched,
[A, B, C],
build_config,
use_fvp,
serial_number,
)
# The same test code can be executed on both the QEMU simulation and on real hardware.
@tvm.testing.requires_micro
@pytest.mark.skip_boards(["mps2_an521"])
@pytest.mark.xfail_on_fvp()
def test_add_uint(workspace_dir, board, microtvm_debug, use_fvp, serial_number):
"""Test compiling the on-device runtime."""
build_config = {"debug": microtvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device)
assert (A_data.numpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device)
assert (B_data.numpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
system_lib.get_function("add")(A_data, B_data, C_data)
assert (C_data.numpy() == np.array([6, 7])).all()
with _make_add_sess(workspace_dir, board, build_config, use_fvp, serial_number) as sess:
test_basic_add(sess)
# The same test code can be executed on both the QEMU simulation and on real hardware.
@tvm.testing.requires_micro
@pytest.mark.skip_boards(["mps2_an521"])
@pytest.mark.xfail_on_fvp()
def test_add_float(workspace_dir, board, microtvm_debug, use_fvp, serial_number):
"""Test compiling the on-device runtime."""
if not utils.ZEPHYR_BOARDS[board]["fpu"]:
pytest.skip(f"FPU not enabled for {board}")
build_config = {"debug": microtvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2.5, 3.5], dtype="float32"), device=sess.device)
assert (A_data.numpy() == np.array([2.5, 3.5])).all()
B_data = tvm.nd.array(np.array([4.5], dtype="float32"), device=sess.device)
assert (B_data.numpy() == np.array([4.5])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="float32"), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
system_lib.get_function("add")(A_data, B_data, C_data)
assert (C_data.numpy() == np.array([7, 8])).all()
with _make_add_sess(
workspace_dir,
board,
build_config,
use_fvp,
serial_number,
dtype="float32",
) as sess:
test_basic_add(sess)
@tvm.testing.requires_micro
@pytest.mark.skip_boards(["mps2_an521"])
@pytest.mark.xfail_on_fvp()
def test_platform_timer(workspace_dir, board, microtvm_debug, use_fvp, serial_number):
"""Test compiling the on-device runtime."""
build_config = {"debug": microtvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_basic_add(sess):
A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), device=sess.device)
assert (A_data.numpy() == np.array([2, 3])).all()
B_data = tvm.nd.array(np.array([4], dtype="int8"), device=sess.device)
assert (B_data.numpy() == np.array([4])).all()
C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.array([0, 0])).all()
system_lib = sess.get_system_lib()
time_eval_f = system_lib.time_evaluator(
"add", sess.device, number=20, repeat=3, min_repeat_ms=40
)
result = time_eval_f(A_data, B_data, C_data)
assert (C_data.numpy() == np.array([6, 7])).all()
assert result.mean > 0
assert len(result.results) == 3
with _make_add_sess(workspace_dir, board, build_config, use_fvp, serial_number) as sess:
test_basic_add(sess)
@tvm.testing.requires_micro
@pytest.mark.skip_boards(["mps2_an521"])
@pytest.mark.xfail_on_fvp()
def test_relay(workspace_dir, board, microtvm_debug, use_fvp, serial_number):
"""Testing a simple relay graph"""
build_config = {"debug": microtvm_debug}
shape = (10,)
dtype = "int8"
# Construct Relay program.
x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
xx = relay.multiply(x, x)
z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))
func = relay.Function([x], z)
ir_mod = tvm.IRModule.from_expr(func)
runtime = Runtime("crt", {"system-lib": True})
target = tvm.micro.testing.get_target("zephyr", board)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(ir_mod, target=target, runtime=runtime)
with _make_session(workspace_dir, board, mod, build_config, use_fvp, serial_number) as session:
graph_mod = tvm.micro.create_local_graph_executor(
mod.get_graph_json(), session.get_system_lib(), session.device
)
graph_mod.set_input(**mod.get_params())
x_in = np.random.randint(10, size=shape[0], dtype=dtype)
graph_mod.run(x=x_in)
result = graph_mod.get_output(0).numpy()
tvm.testing.assert_allclose(graph_mod.get_input(0).numpy(), x_in)
tvm.testing.assert_allclose(result, x_in * x_in + 1)
@tvm.testing.requires_micro
@pytest.mark.skip_boards(["mps2_an521"])
@pytest.mark.xfail_on_fvp()
def test_onnx(workspace_dir, board, microtvm_debug, use_fvp, serial_number):
"""Testing a simple ONNX model."""
build_config = {"debug": microtvm_debug}
this_dir = pathlib.Path(os.path.dirname(__file__))
mnist_testdata = this_dir.parent / "testdata" / "mnist"
digit_2 = Image.open(mnist_testdata / "digit-2.jpg").resize((28, 28))
digit_2 = np.asarray(digit_2).astype("float32")
digit_2 = np.expand_dims(digit_2, axis=0)
digit_9 = Image.open(mnist_testdata / "digit-9.jpg").resize((28, 28))
digit_9 = np.asarray(digit_9).astype("float32")
digit_9 = np.expand_dims(digit_9, axis=0)
# Load ONNX model and convert to Relay.
onnx_model = onnx.load(mnist_testdata / "mnist-8.onnx")
shape = {"Input3": (1, 1, 28, 28)}
relay_mod, params = relay.frontend.from_onnx(onnx_model, shape=shape, freeze_params=True)
relay_mod = relay.transform.DynamicToStatic()(relay_mod)
# We add the link-params=True option to ensure the model parameters are compiled in.
# There is currently a bug preventing the host_driven environment from receiving
# the model weights when set using graph_mod.set_input().
# See: https://github.com/apache/tvm/issues/7567
target = tvm.micro.testing.get_target("zephyr", board)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
executor = Executor("graph", {"link-params": True})
runtime = Runtime("crt", {"system-lib": True})
lowered = relay.build(relay_mod, target, params=params, executor=executor, runtime=runtime)
graph = lowered.get_graph_json()
with _make_session(
workspace_dir, board, lowered, build_config, use_fvp, serial_number
) as session:
graph_mod = tvm.micro.create_local_graph_executor(
graph, session.get_system_lib(), session.device
)
# Send the digit-2 image and confirm that the correct result is returned.
graph_mod.set_input("Input3", tvm.nd.array(digit_2))
graph_mod.run()
result = graph_mod.get_output(0).numpy()
assert np.argmax(result) == 2
# Send the digit-9 image and confirm that the correct result is returned.
graph_mod.set_input("Input3", tvm.nd.array(digit_9))
graph_mod.run()
result = graph_mod.get_output(0).numpy()
assert np.argmax(result) == 9
def check_result(
temp_dir,
relay_mod,
board,
map_inputs,
out_shape,
result,
build_config,
use_fvp,
serial_number,
):
"""Helper function to verify results"""
TOL = 1e-5
runtime = Runtime("crt", {"system-lib": True})
target = tvm.micro.testing.get_target("zephyr", board)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(relay_mod, target=target, runtime=runtime)
with _make_session(temp_dir, board, mod, build_config, use_fvp, serial_number) as session:
rt_mod = tvm.micro.create_local_graph_executor(
mod.get_graph_json(), session.get_system_lib(), session.device
)
rt_mod.set_input(**mod.get_params())
for name, data in map_inputs.items():
rt_mod.set_input(name, data)
rt_mod.set_input(**mod.get_params())
rt_mod.run()
out_shapes = out_shape if isinstance(out_shape, list) else [out_shape]
results = result if isinstance(result, list) else [result]
for idx, shape in enumerate(out_shapes):
out = tvm.nd.empty(shape, device=session.device)
out = rt_mod.get_output(idx, out)
tvm.testing.assert_allclose(out.numpy(), results[idx], rtol=TOL, atol=TOL)
@tvm.testing.requires_micro
@pytest.mark.skip_boards(["mps2_an521"])
@pytest.mark.xfail_on_fvp()
def test_byoc_microtvm(workspace_dir, board, microtvm_debug, use_fvp, serial_number):
"""This is a simple test case to check BYOC capabilities of microTVM"""
build_config = {"debug": microtvm_debug}
x = relay.var("x", shape=(10, 10))
w0 = relay.var("w0", shape=(10, 10))
w1 = relay.var("w1", shape=(10, 10))
w2 = relay.var("w2", shape=(10, 10))
w3 = relay.var("w3", shape=(10, 10))
w4 = relay.var("w4", shape=(10, 10))
w5 = relay.var("w5", shape=(10, 10))
w6 = relay.var("w6", shape=(10, 10))
w7 = relay.var("w7", shape=(10, 10))
# C compiler
z0 = relay.add(x, w0)
p0 = relay.subtract(z0, w1)
q0 = relay.multiply(p0, w2)
z1 = relay.add(x, w3)
p1 = relay.subtract(z1, w4)
q1 = relay.multiply(p1, w5)
# Other parts on TVM
z2 = relay.add(x, w6)
q2 = relay.subtract(z2, w7)
r = relay.concatenate((q0, q1, q2), axis=0)
f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r)
mod = tvm.IRModule()
ann = byoc.CcompilerAnnotator()
mod["main"] = ann.visit(f)
mod = tvm.relay.transform.PartitionGraph()(mod)
mod = tvm.relay.transform.InferType()(mod)
x_data = np.random.rand(10, 10).astype("float32")
w_data = []
for _ in range(8):
w_data.append(np.random.rand(10, 10).astype("float32"))
map_inputs = {"w{}".format(i): w_data[i] for i in range(8)}
map_inputs["x"] = x_data
check_result(
temp_dir=workspace_dir,
relay_mod=mod,
map_inputs=map_inputs,
out_shape=(30, 10),
result=np.concatenate(
(
((x_data + w_data[0]) - w_data[1]) * w_data[2],
((x_data + w_data[3]) - w_data[4]) * w_data[5],
x_data + w_data[6] - w_data[7],
),
axis=0,
),
board=board,
build_config=build_config,
use_fvp=use_fvp,
serial_number=serial_number,
)
def _make_add_sess_with_shape(temp_dir, board, shape, build_config, use_fvp, serial_number):
A = tvm.te.placeholder(shape, dtype="int8")
C = tvm.te.compute(A.shape, lambda i: A[i] + A[i], name="C")
sched = tvm.te.create_schedule(C.op)
return _make_sess_from_op(
temp_dir, board, "add", sched, [A, C], build_config, use_fvp, serial_number
)
@pytest.mark.parametrize(
"shape,",
[
pytest.param((1 * 1024,), id="(1*1024)"),
pytest.param((4 * 1024,), id="(4*1024)"),
pytest.param((16 * 1024,), id="(16*1024)"),
],
)
@tvm.testing.requires_micro
@pytest.mark.skip_boards(["mps2_an521"])
@pytest.mark.xfail_on_fvp()
def test_rpc_large_array(workspace_dir, board, microtvm_debug, shape, use_fvp, serial_number):
"""Test large RPC array transfer."""
build_config = {"debug": microtvm_debug}
# NOTE: run test in a nested function so cPython will delete arrays before closing the session.
def test_tensors(sess):
a_np = np.random.randint(low=-128, high=127, size=shape, dtype="int8")
A_data = tvm.nd.array(a_np, device=sess.device)
assert (A_data.numpy() == a_np).all()
C_data = tvm.nd.array(np.zeros(shape, dtype="int8"), device=sess.device)
assert (C_data.numpy() == np.zeros(shape)).all()
with _make_add_sess_with_shape(
workspace_dir, board, shape, build_config, use_fvp, serial_number
) as sess:
test_tensors(sess)
@pytest.mark.xfail(strict=False, reason="See https://github.com/apache/tvm/issues/10297")
@tvm.testing.requires_micro
def test_autotune_conv2d(workspace_dir, board, microtvm_debug, use_fvp, serial_number):
"""Test AutoTune for microTVM Zephyr"""
if board != "qemu_x86":
pytest.xfail(f"Autotune fails on {board}.")
runtime = Runtime("crt", {"system-lib": True})
build_config = {"debug": microtvm_debug}
# Create a Relay model
data_shape = (1, 3, 16, 16)
weight_shape = (8, 3, 5, 5)
data = relay.var("data", relay.TensorType(data_shape, "float32"))
weight = relay.var("weight", relay.TensorType(weight_shape, "float32"))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
kernel_layout="OIHW",
out_dtype="float32",
)
f = relay.Function([data, weight], y)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
data_sample = np.random.rand(data_shape[0], data_shape[1], data_shape[2], data_shape[3]).astype(
"float32"
)
weight_sample = np.random.rand(
weight_shape[0], weight_shape[1], weight_shape[2], weight_shape[3]
).astype("float32")
params = {mod["main"].params[1].name_hint: weight_sample}
target = tvm.micro.testing.get_target("zephyr", board)
pass_context = tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True})
with pass_context:
tasks = tvm.autotvm.task.extract_from_program(mod["main"], {}, target)
assert len(tasks) > 0
config_main_stack_size = None
if utils.ZEPHYR_BOARDS[board]["is_qemu"]:
config_main_stack_size = 1536
project_options = {
"board": board,
"verbose": 1,
"project_type": "host_driven",
"use_fvp": bool(use_fvp),
"serial_number": serial_number,
}
if config_main_stack_size is not None:
project_options["config_main_stack_size"] = config_main_stack_size
module_loader = tvm.micro.AutoTvmModuleLoader(
template_project_dir=utils.TEMPLATE_PROJECT_DIR,
project_options=project_options,
)
timeout = 200
builder = tvm.autotvm.LocalBuilder(
timeout=timeout,
n_parallel=1,
build_kwargs={"build_option": {"tir.disable_vectorize": True}},
do_fork=True,
build_func=tvm.micro.autotvm_build_func,
runtime=runtime,
)
runner = tvm.autotvm.LocalRunner(
number=1, repeat=1, timeout=timeout, module_loader=module_loader
)
measure_option = tvm.autotvm.measure_option(builder=builder, runner=runner)
log_path = pathlib.Path("zephyr_autotune.log")
if log_path.exists():
log_path.unlink()
n_trial = 10
for task in tasks:
tuner = tvm.autotvm.tuner.GATuner(task)
tuner.tune(
n_trial=n_trial,
measure_option=measure_option,
callbacks=[
tvm.autotvm.callback.log_to_file(str(log_path)),
tvm.autotvm.callback.progress_bar(n_trial, si_prefix="M"),
],
si_prefix="M",
)
assert tuner.best_flops > 0
check_tune_log(log_path)
# Build without tuning
with pass_context:
lowered = tvm.relay.build(mod, target=target, runtime=runtime, params=params)
temp_dir = utils.tempdir()
with _make_session(temp_dir, board, lowered, build_config, use_fvp, serial_number) as session:
graph_mod = tvm.micro.create_local_graph_executor(
lowered.get_graph_json(), session.get_system_lib(), session.device
)
graph_mod.set_input(**lowered.get_params())
graph_mod.run(data=data_sample)
expected_output = graph_mod.get_output(0).numpy()
del graph_mod
# Build using autotune logs
with tvm.autotvm.apply_history_best(str(log_path)):
with pass_context:
lowered_tuned = tvm.relay.build(mod, target=target, runtime=runtime, params=params)
temp_dir = utils.tempdir()
with _make_session(
temp_dir, board, lowered_tuned, build_config, use_fvp, serial_number
) as session:
graph_mod = tvm.micro.create_local_graph_executor(
lowered_tuned.get_graph_json(), session.get_system_lib(), session.device
)
graph_mod.set_input(**lowered_tuned.get_params())
graph_mod.run(data=data_sample)
output = graph_mod.get_output(0).numpy()
del graph_mod
tvm.testing.assert_allclose(output, expected_output, rtol=1e-4, atol=1e-5)
@tvm.testing.requires_micro
@pytest.mark.skip(reason="due to https://github.com/apache/tvm/issues/13856")
def test_schedule_build_with_cmsis_dependency(workspace_dir, board, microtvm_debug, use_fvp):
"""Test Relay schedule with CMSIS dependency. This test shows if microTVM Auto tuning
with Zephyr breaks if CMSIS dependency was required for a schedule.
"""
build_config = {"debug": microtvm_debug}
target = tvm.target.target.micro(
utils.ZEPHYR_BOARDS[board]["model"], options=["-keys=arm_cpu,cpu"]
)
if not target.features.has_dsp:
pytest.skip(f"ISA does not support DSP. target: {target}")
# Create a Relay conv2d
data_shape = (1, 16, 16, 3)
weight_shape = (5, 5, 8, 3)
data = relay.var("data", relay.TensorType(data_shape, "int8"))
weight = relay.var("weight", relay.TensorType(weight_shape, "int8"))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
data_layout="NHWC",
kernel_layout="HWOI",
out_dtype="int32",
)
func = relay.Function([data, weight], y)
ir_mod = tvm.IRModule.from_expr(func)
runtime = Runtime("crt", {"system-lib": True})
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(ir_mod, target=target, runtime=runtime)
project_options = {
"project_type": "host_driven",
"verbose": bool(build_config.get("debug")),
"board": board,
"cmsis_path": os.getenv("CMSIS_PATH"),
"use_fvp": bool(use_fvp),
}
project_dir = workspace_dir / "project"
project = tvm.micro.generate_project(
str(utils.TEMPLATE_PROJECT_DIR),
mod,
project_dir,
project_options,
)
project.build()
with open(project_dir / "CMakeLists.txt", "r") as cmake_f:
cmake_content = cmake_f.read()
assert "CMSIS/DSP/Include" in cmake_content
assert "CMSIS/DSP/Include/dsp" in cmake_content
assert "CMSIS/DSP/Include" in cmake_content
assert "CMSIS-NN/Include" in cmake_content
@tvm.testing.requires_micro
def test_debugging_enabled(workspace_dir):
"""Test debugging enabled for LED. `verbose=True` in project option enables
debugging. For this test a physical board(nucleo_l4r5zi) is used instead of
QEMU since LED config is not available on QEMU.
"""
board = "nucleo_l4r5zi"
project_options = {
"project_type": "host_driven",
"board": board,
"verbose": True,
}
shape = (10,)
dtype = "int8"
x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
xx = relay.multiply(x, x)
z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))
func = relay.Function([x], z)
ir_mod = tvm.IRModule.from_expr(func)
runtime = Runtime("crt", {"system-lib": True})
executor = Executor("aot")
target = tvm.micro.testing.get_target("zephyr", board)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(ir_mod, target=target, runtime=runtime, executor=executor)
project = tvm.micro.generate_project(
str(utils.TEMPLATE_PROJECT_DIR),
mod,
workspace_dir / "project",
project_options,
)
project.build()
@tvm.testing.requires_micro
@pytest.mark.skip_boards(["mps2_an521", "mps3_an547"])
def test_qemu_make_fail(workspace_dir, board, microtvm_debug, serial_number):
"""Testing QEMU make fail."""
if not utils.ZEPHYR_BOARDS[board]["is_qemu"]:
pytest.skip(msg="Only for QEMU targets.")
build_config = {"debug": microtvm_debug}
shape = (10,)
dtype = "float32"
# Construct Relay program.
x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
xx = relay.multiply(x, x)
z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))
func = relay.Function([x], z)
ir_mod = tvm.IRModule.from_expr(func)
target = tvm.micro.testing.get_target("zephyr", board)
executor = Executor("aot")
runtime = Runtime("crt", {"system-lib": True})
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
lowered = relay.build(ir_mod, target, executor=executor, runtime=runtime)
project_options = {
"project_type": "host_driven",
"verbose": bool(build_config.get("debug")),
"board": board,
}
sample = np.zeros(shape=shape, dtype=dtype)
project = tvm.micro.generate_project(
str(utils.TEMPLATE_PROJECT_DIR),
lowered,
workspace_dir / "project",
project_options,
)
project.build()
file_path = workspace_dir / "project" / "build" / "build.ninja"
assert file_path.is_file(), f"[{file_path}] does not exist."
# Remove a file to create make failure.
os.remove(file_path)
project.flash()
with pytest.raises(server.JSONRPCError) as excinfo:
project.transport().open()
assert "QEMU setup failed" in str(excinfo.value)
if __name__ == "__main__":
tvm.testing.main()
| 25,396 | 35.281429 | 110 | py |
tvm | tvm-main/tests/micro/zephyr/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
pytest_plugins = [
"tvm.micro.testing.pytest_plugin",
]
import pytest
def pytest_addoption(parser):
parser.addoption(
"--use-fvp",
action="store_true",
default=False,
help="If set true, use the FVP emulator to run the test",
)
@pytest.fixture
def use_fvp(request):
return request.config.getoption("--use-fvp")
@pytest.fixture(autouse=True)
def xfail_on_fvp(request, use_fvp):
"""mark the tests as xfail if running on fvp."""
if request.node.get_closest_marker("xfail_on_fvp"):
if use_fvp:
request.node.add_marker(
pytest.mark.xfail(reason="checking corstone300 reliability on CI")
)
def pytest_configure(config):
config.addinivalue_line(
"markers",
"xfail_on_fvp(): mark test as xfail on fvp",
)
| 1,619 | 29 | 82 | py |
tvm | tvm-main/tests/micro/zephyr/test_zephyr_armv7m.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pathlib
import pytest
import numpy as np
import tvm
import tvm.rpc
import tvm.micro
import tvm.testing
from tvm import relay
from tvm.contrib.download import download_testdata
from tvm.relay.backend import Executor, Runtime
from . import utils
def _open_tflite_model():
# Import TFLite model
model_url = "https://github.com/tlc-pack/web-data/raw/b2f3c02427b67267a00fd968ba1fce28fc833028/testdata/microTVM/model/mnist_model_quant.tflite"
model_path = download_testdata(model_url, "mnist_model_quant.tflite", module="model")
tflite_model_buf = open(model_path, "rb").read()
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
relay_mod, params = relay.frontend.from_tflite(tflite_model)
return relay_mod, params
def _get_test_data(testdata_dir):
from PIL import Image
image_files = ["digit-2.jpg"]
for file in image_files:
img = Image.open(testdata_dir / file).resize((28, 28))
img = np.asarray(img).astype("uint8")
sample = np.reshape(img, -1)
output_shape = (1, 10)
return sample, output_shape
def _apply_desired_layout_simd(relay_mod):
desired_layouts = {"qnn.conv2d": ["NHWC", "HWOI"], "nn.conv2d": ["NHWC", "HWOI"]}
seq = tvm.transform.Sequential(
[relay.transform.RemoveUnusedFunctions(), relay.transform.ConvertLayout(desired_layouts)]
)
with tvm.transform.PassContext(opt_level=3):
return seq(relay_mod)
def _apply_desired_layout_no_simd(relay_mod):
desired_layouts = {"qnn.conv2d": ["NHWC", "HWIO"], "nn.conv2d": ["NHWC", "HWIO"]}
seq = tvm.transform.Sequential(
[relay.transform.RemoveUnusedFunctions(), relay.transform.ConvertLayout(desired_layouts)]
)
with tvm.transform.PassContext(opt_level=3):
return seq(relay_mod)
@tvm.testing.requires_micro
@pytest.mark.skip_boards(
["mps2_an521", "stm32f746g_disco", "nucleo_f746zg", "nucleo_l4r5zi", "nrf5340dk_nrf5340_cpuapp"]
)
@pytest.mark.xfail(reason="due https://github.com/apache/tvm/issues/12619")
def test_armv7m_intrinsic(workspace_dir, board, microtvm_debug, serial_number):
"""Testing a ARM v7m SIMD extension."""
build_config = {"debug": microtvm_debug}
this_dir = pathlib.Path(os.path.dirname(__file__))
testdata_dir = this_dir.parent / "testdata" / "mnist"
relay_mod, params = _open_tflite_model()
sample, output_shape = _get_test_data(testdata_dir)
relay_mod_simd = _apply_desired_layout_simd(relay_mod)
# kernel layout "HWIO" is not supported by arm_cpu SIMD extension (see tvm\python\relay\op\strategy\arm_cpu.py)
relay_mod_no_simd = _apply_desired_layout_no_simd(relay_mod)
target = tvm.target.target.micro(utils.ZEPHYR_BOARDS[board]["model"], options=["-keys=cpu"])
target_simd = tvm.target.target.micro(
utils.ZEPHYR_BOARDS[board]["model"], options=["-keys=arm_cpu,cpu"]
)
executor = Executor("aot", {"unpacked-api": True, "interface-api": "c"})
runtime = Runtime("crt")
workspace_dir_simd = workspace_dir / "simd"
workspace_dir_no_simd = workspace_dir / "nosimd"
os.makedirs(workspace_dir_simd, exist_ok=True)
os.makedirs(workspace_dir_no_simd, exist_ok=True)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
lowered_simd = relay.build(
relay_mod_simd, target_simd, params=params, runtime=runtime, executor=executor
)
lowered_no_simd = relay.build(
relay_mod_no_simd, target, params=params, runtime=runtime, executor=executor
)
simd_project, _ = utils.generate_project(
workspace_dir_simd,
board,
lowered_simd,
build_config,
sample,
output_shape,
"float32",
True,
serial_number,
)
result_simd, time_simd = utils.run_model(simd_project)
no_simd_project, _ = utils.generate_project(
workspace_dir_no_simd,
board,
lowered_no_simd,
build_config,
sample,
output_shape,
"float32",
False,
serial_number,
)
result_no_simd, time_no_simd = utils.run_model(no_simd_project)
assert result_no_simd == result_simd == 2
# Time performance measurements on QEMU emulator are always equal to zero.
if board not in [
"mps2_an521",
"mps3_an547",
]:
assert time_no_simd > time_simd
if __name__ == "__main__":
tvm.testing.main()
| 5,534 | 30.271186 | 148 | py |
tvm | tvm-main/tests/micro/zephyr/test_zephyr_aot_exec.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import numpy as np
import tvm
import tvm.testing
import tvm.micro.testing
import tvm.relay as relay
from tvm.relay.backend import Executor, Runtime
from . import utils
def _make_session(workspace_dir, board, mod, build_config, use_fvp, serial_number):
config_main_stack_size = None
if utils.ZEPHYR_BOARDS[board]["is_qemu"]:
# fyi: qemu_riscv64 seems to be the greediest stack user
config_main_stack_size = 4096
else:
# increase stack size for HW platforms
config_main_stack_size = 2048
project_options = {
"project_type": "host_driven",
"verbose": bool(build_config.get("debug")),
"board": board,
"arm_fvp_path": "/opt/arm/FVP_Corstone_SSE-300/models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55",
"use_fvp": bool(use_fvp),
"serial_number": serial_number,
}
if config_main_stack_size is not None:
project_options["config_main_stack_size"] = config_main_stack_size
project = tvm.micro.generate_project(
str(utils.TEMPLATE_PROJECT_DIR),
mod,
workspace_dir / "project",
project_options,
)
project.build()
project.flash()
return tvm.micro.Session(project.transport())
@tvm.testing.requires_micro
@pytest.mark.skip_boards(["mps2_an521"])
@pytest.mark.xfail_on_fvp()
def test_relay(workspace_dir, board, microtvm_debug, use_fvp, serial_number):
"""Testing a simple relay graph"""
build_config = {"debug": microtvm_debug}
shape = (10,)
dtype = "int8"
# Construct Relay program.
x = relay.var("x", relay.TensorType(shape=shape, dtype=dtype))
xx = relay.multiply(x, x)
z = relay.add(xx, relay.const(np.ones(shape=shape, dtype=dtype)))
func = relay.Function([x], z)
ir_mod = tvm.IRModule.from_expr(func)
runtime = Runtime("crt", {"system-lib": True})
executor = Executor("aot")
target = tvm.micro.testing.get_target("zephyr", board)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(ir_mod, target=target, runtime=runtime, executor=executor)
with _make_session(workspace_dir, board, mod, build_config, use_fvp, serial_number) as session:
aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor())
x_in = np.random.randint(10, size=shape[0], dtype=dtype)
aot_executor.run(x=x_in)
result = aot_executor.get_output(0).numpy()
tvm.testing.assert_allclose(aot_executor.get_input(0).numpy(), x_in)
tvm.testing.assert_allclose(result, x_in * x_in + 1)
@tvm.testing.requires_micro
@pytest.mark.skip_boards(["mps2_an521"])
@pytest.mark.xfail_on_fvp()
def test_aot_executor(workspace_dir, board, microtvm_debug, use_fvp, serial_number):
"""Test use of the AOT executor with microTVM."""
build_config = {"debug": microtvm_debug}
shape = (10,)
dtype = "int8"
print("test_relay: construct relay program\n")
# Construct Relay program.
relay_mod = tvm.relay.fromtext(
"""
#[version = "0.0.5"]
def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), uint8]) {
%0 = %a + %b;
%0
}"""
)
runtime = Runtime("crt", {"system-lib": True})
executor = Executor("aot")
target = tvm.micro.testing.get_target("zephyr", board)
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
mod = tvm.relay.build(relay_mod, target=target, runtime=runtime, executor=executor)
def do_test():
aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor())
assert aot_executor.get_input_index("a") == 0
assert aot_executor.get_input_index("b") == 1
assert aot_executor.get_num_inputs() == 2
assert aot_executor.get_num_outputs() == 1
A_np = np.array([[2, 3]], dtype="uint8")
B_np = np.array([[4, 7]], dtype="uint8")
A_data = aot_executor.get_input("a").copyfrom(A_np)
B_data = aot_executor.get_input("b").copyfrom(B_np)
aot_executor.run()
out = aot_executor.get_output(0)
assert (out.numpy() == np.array([6, 10])).all()
B_np_new = np.array([[5, 8]])
aot_executor.set_input("b", B_np_new)
assert (B_data.numpy() == B_np_new).all()
with _make_session(workspace_dir, board, mod, build_config, use_fvp, serial_number) as session:
do_test()
if __name__ == "__main__":
tvm.testing.main()
| 5,331 | 33.849673 | 110 | py |
tvm | tvm-main/tests/micro/zephyr/test_ms_tuning.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
from types import MappingProxyType
import pathlib
import json
import tvm
from tvm import relay
import tvm.micro.testing
from tvm.relay.backend import Executor
from tvm.contrib import graph_executor
from tvm import meta_schedule as ms
from tvm.contrib.micro.meta_schedule.local_builder_micro import get_local_builder_micro
from tvm.contrib.micro.meta_schedule.rpc_runner_micro import get_rpc_runner_micro
def create_relay_module():
data_shape = (1, 3, 16, 16)
weight_shape = (8, 3, 5, 5)
data = relay.var("data", relay.TensorType(data_shape, "float32"))
weight = relay.var("weight", relay.TensorType(weight_shape, "float32"))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
kernel_layout="OIHW",
out_dtype="float32",
)
f = relay.Function([data, weight], y)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
weight_sample = np.random.rand(
weight_shape[0], weight_shape[1], weight_shape[2], weight_shape[3]
).astype("float32")
params = {mod["main"].params[1].name_hint: weight_sample}
model_info = {
"in_tensor": "data",
"in_shape": data_shape,
"in_dtype": "float32",
}
return mod, params, model_info
@tvm.testing.requires_micro
@pytest.mark.skip_boards(["mps2_an521", "mps3_an547", "nucleo_f746zg", "stm32f746g_disco"])
def test_ms_tuning_conv2d(workspace_dir, board, microtvm_debug, use_fvp, serial_number):
"""Test meta-schedule tuning for microTVM Zephyr"""
mod, params, model_info = create_relay_module()
input_name = model_info["in_tensor"]
input_shape = model_info["in_shape"]
input_dtype = model_info["in_dtype"]
data_sample = np.random.rand(*input_shape).astype(input_dtype)
platform = "zephyr"
project_options = {
"board": board,
"verbose": microtvm_debug,
"project_type": "host_driven",
"use_fvp": bool(use_fvp),
"serial_number": serial_number,
"config_main_stack_size": 4096,
}
if isinstance(serial_number, list):
project_options["serial_number"] = serial_number[0] # project_api expects an string.
serial_numbers = serial_number
else:
if serial_number is not None: # use a single device in tuning
serial_numbers = [serial_number]
else: # use two dummy serial numbers (for testing with QEMU)
serial_numbers = [str(i) for i in range(2)]
boards_file = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr")) / "boards.json"
with open(boards_file) as f:
boards = json.load(f)
target = tvm.micro.testing.get_target("zephyr", board)
runtime = relay.backend.Runtime("crt", {"system-lib": True})
executor = Executor("aot", {"link-params": True})
# This line is necessary for link-params to take effect during
# task extraction and relay.build(...).
mod = mod.with_attr("executor", executor)
builder = get_local_builder_micro()
with ms.Profiler() as profiler:
with get_rpc_runner_micro(
platform=platform,
options=project_options,
session_timeout_sec=120,
serial_numbers=serial_numbers,
) as runner:
db: ms.Database = ms.relay_integration.tune_relay(
mod=mod,
params=params,
target=target,
builder=builder,
runner=runner,
strategy="evolutionary",
num_trials_per_iter=2,
max_trials_per_task=10,
max_trials_global=100,
work_dir=str(workspace_dir),
module_equality="ignore-ndarray",
)
# Build model using meta_schedule logs
opt_mod, opt_params = relay.optimize(mod, target)
ms_mod: tvm.runtime.Module = ms.relay_integration.compile_relay(
database=db,
mod=opt_mod,
target=target,
params=opt_params,
pass_config=MappingProxyType(
{
"relay.backend.use_meta_schedule": True,
"relay.backend.tir_converter": "default",
"tir.disable_vectorize": True,
}
),
executor=executor,
runtime=runtime,
)
print(profiler.table())
project = tvm.micro.generate_project(
str(tvm.micro.get_microtvm_template_projects(platform)),
ms_mod,
str(workspace_dir / "project"),
options=project_options,
)
project.build()
project.flash()
with tvm.micro.Session(project.transport()) as session:
aot_executor = tvm.runtime.executor.aot_executor.AotModule(session.create_aot_executor())
aot_executor.get_input(0).copyfrom(data_sample)
result = aot_executor.module.time_evaluator("run", session.device, number=3)()
output = aot_executor.get_output(0).numpy()
# Build reference model (without tuning)
dev = tvm.cpu()
target = tvm.micro.testing.get_target("crt")
with tvm.transform.PassContext(
opt_level=3, config={"tir.disable_vectorize": True}, disabled_pass=["AlterOpLayout"]
):
ref_mod = relay.build(
mod,
target=target,
params=params,
runtime=runtime,
)
ref_mod.export_library(workspace_dir / "compiled_lib2.so")
mod2: tvm.runtime.Module = tvm.runtime.load_module(workspace_dir / "compiled_lib2.so")
graph_mod = graph_executor.GraphModule(mod2["default"](dev))
graph_mod.set_input(input_name, data_sample)
graph_mod.run()
ref_output = graph_mod.get_output(0).numpy()
assert np.allclose(output, ref_output, rtol=1e-4, atol=2e-4), "FAILED"
if __name__ == "__main__":
tvm.testing.main()
| 6,678 | 35.298913 | 98 | py |
tvm | tvm-main/tests/micro/zephyr/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import os
import json
import pathlib
import tarfile
import tempfile
import logging
import numpy as np
from urllib.request import urlopen, urlretrieve
from urllib.error import HTTPError
import json
import requests
import tvm.micro
from tvm.micro import export_model_library_format
from tvm.micro.testing.utils import create_header_file
from tvm.micro.testing.utils import (
mlf_extract_workspace_size_bytes,
aot_transport_init_wait,
aot_transport_find_message,
)
TEMPLATE_PROJECT_DIR = pathlib.Path(tvm.micro.get_microtvm_template_projects("zephyr"))
_LOG = logging.getLogger(__name__)
def zephyr_boards() -> dict:
"""Returns Zephyr board properties"""
with open(TEMPLATE_PROJECT_DIR / "boards.json") as f:
board_properties = json.load(f)
return board_properties
ZEPHYR_BOARDS = zephyr_boards()
def build_project(
temp_dir, zephyr_board, mod, build_config, serial_number, simd=False, extra_files_tar=None
):
project_dir = temp_dir / "project"
with tempfile.TemporaryDirectory() as tar_temp_dir:
model_tar_path = pathlib.Path(tar_temp_dir) / "model.tar"
export_model_library_format(mod, model_tar_path)
workspace_size = mlf_extract_workspace_size_bytes(model_tar_path)
project_options = {
"extra_files_tar": extra_files_tar,
"project_type": "aot_standalone_demo",
"verbose": bool(build_config.get("debug")),
"board": zephyr_board,
"serial_number": serial_number,
"compile_definitions": [
# TODO(mehrdadh): It fails without offset.
f"-DWORKSPACE_SIZE={workspace_size + 128}",
],
}
if simd:
project_options["config_main_stack_size"] = 1536
project = tvm.micro.project.generate_project_from_mlf(
str(TEMPLATE_PROJECT_DIR), project_dir, model_tar_path, project_options
)
project.build()
return project, project_dir
# TODO move CMSIS integration to microtvm_api_server.py
# see https://discuss.tvm.apache.org/t/tvm-capturing-dependent-libraries-of-code-generated-tir-initially-for-use-in-model-library-format/11080
def loadCMSIS(temp_dir):
REPO_PATH = "ARM-software/CMSIS_5"
BRANCH = "master"
API_PATH_URL = f"https://api.github.com/repos/{REPO_PATH}/git/trees"
RAW_PATH_URL = f"https://raw.githubusercontent.com/{REPO_PATH}/{BRANCH}"
url = "https://api.github.com/repos/ARM-software/CMSIS_5/git/trees/master?recursive=1"
r = requests.get(url)
res = r.json()
include_trees = {}
for file in res["tree"]:
if file["path"] in {"CMSIS/DSP/Include", "CMSIS/DSP/Include/dsp", "CMSIS/NN/Include"}:
include_trees.update({file["path"]: file["sha"]})
for path, sha in include_trees.items():
url = f"{API_PATH_URL}/{sha}"
content = json.load(urlopen(url))
temp_path = f"{temp_dir}"
if path == "CMSIS/DSP/Include/dsp":
temp_path = f"{temp_dir}/dsp"
if not os.path.isdir(temp_path):
os.makedirs(temp_path)
for item in content["tree"]:
if item["type"] == "blob":
file_name = item["path"]
file_url = f"{RAW_PATH_URL}/{path}/{file_name}"
print(file_name, " ", file_url)
try:
urlretrieve(file_url, f"{temp_path}/{file_name}")
except HTTPError as e:
print(f"Failed to download {file_url}: {e}")
def run_model(project):
project.flash()
with project.transport() as transport:
aot_transport_init_wait(transport)
transport.write(b"infer%", timeout_sec=5)
result_line = aot_transport_find_message(transport, "result", timeout_sec=60)
result_line = result_line.strip("\n")
result_line = result_line.split(":")
result = int(result_line[1])
time = int(result_line[2])
_LOG.info(f"Result: {result}\ttime: {time} ms")
return result, time
def generate_project(
temp_dir,
board,
lowered,
build_config,
sample,
output_shape,
output_type,
load_cmsis,
serial_number,
):
with tempfile.NamedTemporaryFile() as tar_temp_file:
with tarfile.open(tar_temp_file.name, "w:gz") as tf:
with tempfile.TemporaryDirectory() as tar_temp_dir:
model_files_path = pathlib.Path(tar_temp_dir) / "include"
model_files_path.mkdir(parents=True)
if load_cmsis:
loadCMSIS(model_files_path)
tf.add(
model_files_path, arcname=os.path.relpath(model_files_path, tar_temp_dir)
)
create_header_file("input_data", sample, "include/tvm", tf)
create_header_file(
"output_data", np.zeros(shape=output_shape, dtype=output_type), "include/tvm", tf
)
project, project_dir = build_project(
temp_dir,
board,
lowered,
build_config,
serial_number,
simd=load_cmsis,
extra_files_tar=tar_temp_file.name,
)
return project, project_dir
| 6,011 | 32.775281 | 142 | py |
tvm | tvm-main/tests/micro/zephyr/test_zephyr_aot_exec_standalone.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import numpy as np
import tvm
import tvm.testing
import tvm.micro.testing
import tvm.relay as relay
from tvm.relay.backend import Executor, Runtime
from tvm.contrib.download import download_testdata
from . import utils
@tvm.testing.requires_micro
@pytest.mark.skip_boards(["mps2_an521", "mps3_an547"])
def test_tflite(workspace_dir, board, microtvm_debug, serial_number):
"""Testing a TFLite model."""
input_shape = (1, 49, 10, 1)
output_shape = (1, 12)
build_config = {"debug": microtvm_debug}
model_url = "https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/keyword_spotting/trained_models/kws_ref_model.tflite"
model_path = download_testdata(model_url, "kws_ref_model.tflite", module="model")
# Import TFLite model
tflite_model_buf = open(model_path, "rb").read()
try:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
# Load TFLite model and convert to Relay
relay_mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict={"input_1": input_shape}, dtype_dict={"input_1 ": "int8"}
)
target = tvm.micro.testing.get_target("zephyr", board)
executor = Executor(
"aot", {"unpacked-api": True, "interface-api": "c", "workspace-byte-alignment": 4}
)
runtime = Runtime("crt")
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
lowered = relay.build(relay_mod, target, params=params, runtime=runtime, executor=executor)
sample_url = "https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy"
sample_path = download_testdata(sample_url, "keyword_spotting_int8_6.pyc.npy", module="data")
sample = np.load(sample_path)
project, _ = utils.generate_project(
workspace_dir,
board,
lowered,
build_config,
sample,
output_shape,
"int8",
False,
serial_number,
)
result, _ = utils.run_model(project)
assert result == 6
if __name__ == "__main__":
tvm.testing.main()
| 3,070 | 34.298851 | 168 | py |
tvm | tvm-main/tests/micro/zephyr/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Testing infrastructure for microTVM Zephyr """
| 837 | 43.105263 | 62 | py |
tvm | tvm-main/tests/micro/stm32/test_code_emitter.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import shutil
import struct
import sys
import numpy as np
import tensorflow as tf
import tvm
import tvm.relay as relay
from tvm.micro.contrib import stm32
from tvm.contrib.download import download_testdata
from tvm import testing
import conftest
NUM_ITERATIONS = 10
# =========================================================
# get_data
# =========================================================
def get_data(in_data_shapes, in_data_dtypes):
"""Generate a uint8 image."""
assert len(in_data_shapes) == 1, "Only single input models are supported."
in_data = OrderedDict()
for shape_name, shape in in_data_shapes.items():
for dtype_name, dtype in in_data_dtypes.items():
if dtype_name == shape_name:
in_data[shape_name] = np.random.uniform(size=shape).astype(dtype)
in_data = np.random.uniform(size=shape).astype("uint8")
break
if shape_name not in in_data.keys():
raise ValueError("Shape and dtype dictionaries do not fit.")
return in_data
# ==================================================================
# dump_image
# ==================================================================
def dump_image(filename, image):
# Flatten image
image_data = image.flatten()
outputRaw = []
# Raw binary format
for i in range(0, len(image_data)):
outputRaw.append(struct.pack("<B", int(image_data[i]) & 0xFF))
# Dump image in raw binary format
f = open(filename, "wb")
for i in range(0, len(outputRaw)):
f.write(outputRaw[i])
f.close()
# ==================================================================
# scale_input_data
# ==================================================================
def scale_input_data(input_details, data):
if input_details["dtype"] == np.uint8 or input_details["dtype"] == np.int8:
input_scale, input_zero_point = input_details["quantization"]
print(
"== TFLite input quantization: scale={}, zero={}".format(input_scale, input_zero_point)
)
data = data / input_scale + input_zero_point
data = data.astype(input_details["dtype"])
return data
# ==================================================================
# scale_output_data
# ==================================================================
def scale_output_data(output_details, data):
if output_details["dtype"] == np.uint8 or output_details["dtype"] == np.int8:
output_scale, output_zero_point = output_details["quantization"]
print(
"== TFLite output quantization: scale={}, zero={}".format(
output_scale, output_zero_point
)
)
data = data.astype(np.float32)
data = (data - output_zero_point) * output_scale
return data
# ========================================================
# get_tflite_model
# ========================================================
def get_tflite_model(model_path):
#
# Load TFLite model and allocate tensors.
#
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
#
# Get input and output tensors.
#
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
#
# Figure out shapes and
#
shape_dict = {}
dtype_dict = {}
for input in input_details:
input_name = input["name"]
input_shape = input["shape"].tolist()
input_dtype = str(np.dtype(input["dtype"]))
shape_dict[input_name] = input_shape
dtype_dict[input_name] = input_dtype
#
# Save the model
#
#
# Load the TFLite Model for TVM:
#
# https://docs.tvm.ai/tutorials/frontend/from_tflite.html
# https://jackwish.net/tflite/docs/
model_buf = open(model_path, "rb").read()
#
# Get TFLite model from buffer
#
try:
import tflite
model = tflite.Model.GetRootAsModel(model_buf, 0)
assert isinstance(model, tflite.Model)
except AttributeError:
import tflite.Model
model = tflite.Model.Model.GetRootAsModel(model_buf, 0)
assert isinstance(model, tflite.Model.Model)
print("TVM: Importing a TFLite model ...")
return model, shape_dict, dtype_dict
# ========================================================
# extract_tflite_quantization
# ========================================================
def _make_qnn_params(quantization):
qnn_params = {}
qnn_params["min"] = quantization.MinAsNumpy()
qnn_params["max"] = quantization.MaxAsNumpy()
qnn_params["scale"] = quantization.ScaleAsNumpy()
qnn_params["zero_point"] = quantization.ZeroPointAsNumpy()
qnn_params["dim"] = quantization.QuantizedDimension()
# print(" Quantization: ({}, {}), s={}, z={}, dim={}".format(min, max, scale, zero_point, dim))
return qnn_params
def extract_tflite_quantization(model):
assert model.SubgraphsLength() == 1, "only support one subgraph (main subgraph)"
subgraph = model.Subgraphs(0)
quantization_info = {}
# model inputs / outputs
model_inputs = subgraph.InputsAsNumpy()
model_outputs = subgraph.OutputsAsNumpy()
for node_id in model_inputs:
tensor = subgraph.Tensors(node_id)
tensor_name = tensor.Name().decode("utf-8")
tensor_type = tensor.Type()
dl_tensor_name = stm32.get_input_tensor_name(tensor_name)
quantization = tensor.Quantization()
if quantization is not None:
qnn_params = _make_qnn_params(quantization)
quantization_info[dl_tensor_name] = qnn_params
for node_id in model_outputs:
tensor = subgraph.Tensors(node_id)
tensor_name = tensor.Name().decode("utf-8")
tensor_type = tensor.Type()
#
# TODO: TVM does not preserve the output tensor names.
# Eventually, we should be able to form a valid name.
#
dl_tensor_name = stm32.get_output_tensor_name(tensor_name, 0)
quantization = tensor.Quantization()
if quantization is not None:
qnn_params = _make_qnn_params(quantization)
quantization_info[dl_tensor_name] = qnn_params
return quantization_info
# ========================================================
# run_tflite_model
# ========================================================
def run_tflite_model(model_path, image_data):
#
# Load TFLite model and allocate tensors.
#
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
#
# Get input and output tensors.
#
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
#
# Run test images
#
tf_results = np.empty(shape=[NUM_ITERATIONS, 10], dtype=np.float)
for i, image in enumerate(image_data):
#
# Normalize the input data
#
image = image / 255.0
image = scale_input_data(input_details, image)
interpreter.set_tensor(input_details["index"], image)
interpreter.invoke()
tf_results[i] = interpreter.get_tensor(output_details["index"])
tf_results[i] = scale_output_data(output_details, tf_results[i])
print(f"== [{i}] TFLite Output:")
print(tf_results[i])
return tf_results
# ========================================================
# run_tvm_model
# ========================================================
def run_tvm_model(build_dir, model_name, target_dir, image_path):
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
tvm_results_name = os.path.join(build_dir, "tvm_results.txt")
#
# Build the model
#
tvm_dir = os.path.join(curr_path, "..", "..", "..")
test_dir = os.path.join(tvm_dir, "tests", "crt", "contrib", "stm32")
command = f"make -f {test_dir}/Makefile TVM_PATH={tvm_dir} MODEL_PATH={target_dir} BUILD_PATH={build_dir} IMAGE_PATH={image_path}"
print(f"{command}")
os.system(command)
#
# Run
#
command = f"{target_dir}/{model_name}.exe"
print(f"{command}")
os.system(command)
tvm_results = np.loadtxt(tvm_results_name)
print(f"== TVM Output:\n {tvm_results}")
#
# Clean temporary image files
#
if os.path.exists(tvm_results_name):
os.remove(tvm_results_name)
return tvm_results
# ========================================================
# check_network
# ========================================================
def check_network(build_dir, target_name, model_path, image_path):
model_name = "network"
model, shape_dict, dtype_dict = get_tflite_model(model_path)
#
# Generate random input data
#
image_data = []
for i in range(NUM_ITERATIONS):
assert len(shape_dict) == 1, "Only single input models are supported."
image_shape = list(shape_dict.values())[0]
in_data = np.random.randint(0, 255, size=image_shape).astype("uint8")
# Write raw data for using with the TVM implementation
filename = os.path.join(image_path, "{:02d}.raw".format(i))
dump_image(filename, in_data)
image_data.append(in_data)
mod, params = relay.frontend.from_tflite(model, shape_dict, dtype_dict)
#
# Build a TVM C module for the ARM CPU (without compiling the kernels
# library to the object code form):
#
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
rt_module = relay.build(mod, target="c -device=arm_cpu", params=params)
#
# Export model library format
#
target_dir = os.path.join(build_dir, target_name + "_gen")
if os.path.exists(target_dir):
print(f'Removing existing "{target_dir}" directory')
try:
shutil.rmtree(target_dir)
except OSError as err:
raise ValueError(f"emit_code.Error: {target_dir} : {err.strerror}")
mlf_tar_path = os.path.join(build_dir, target_name + "_lib.tar")
import tvm.micro as micro
micro.export_model_library_format(rt_module, mlf_tar_path)
emitter = stm32.CodeEmitter()
quantization = extract_tflite_quantization(model)
emitter.parse_library_format(mlf_tar_path, quantization)
emitter.emit_code(target_dir, model_name)
#
# Results
#
tf_results = run_tflite_model(model_path, image_data)
tvm_results = run_tvm_model(build_dir, model_name, target_dir, image_path)
check_result(tf_results, tvm_results)
# ========================================================
# check_result
# ========================================================
def check_result(tflite_results, tvm_results):
"""Helper function to verify results"""
#
# MNIST quantized uint8 results in one single difference of
# ~ 0.004 so just escape this
#
ATOL = 1e-3
RTOL = 0.5
tvm.testing.assert_allclose(tflite_results, tvm_results, rtol=RTOL, atol=ATOL)
# ========================================================
# test_mnist
# ========================================================
def test_mnist():
DEBUG = False
tempdir_root = None
if DEBUG:
tempdir_root = os.path.join(
curr_path,
f"workspace",
"test_mnist",
datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S"),
)
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
build_dir = tvm.contrib.utils.tempdir(tempdir_root)
model_url = "https://storage.googleapis.com/download.tensorflow.org/models/tflite/digit_classifier/mnist.tflite"
model_path = download_testdata(model_url, "mnist.tflite", module="model")
check_network(build_dir.path, "mnist", model_path, build_dir.path)
if __name__ == "__main__":
tvm.testing.main()
| 12,694 | 31.139241 | 134 | py |
tvm | tvm-main/tests/micro/stm32/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm.target.target
| 825 | 40.3 | 62 | py |
tvm | tvm-main/vta/tutorials/matrix_multiply.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _basic-mat-mult:
Simple Matrix Multiply
======================
**Author**: `Thierry Moreau <https://homes.cs.washington.edu/~moreau/>`_
In this tutorial, we will build on top of the :ref:`vta-get-started` tutorial
and introduce additional concepts required to implement matrix multiplication
on VTA with the TVM workflow.
"""
######################################################################
# RPC Setup
# ---------
# We start by programming the Pynq's FPGA and building its RPC runtime
# as we did in the VTA introductory tutorial.
from __future__ import absolute_import, print_function
import os
import tvm
from tvm import te
import vta
import numpy as np
from tvm import rpc
from tvm.contrib import utils
from vta.testing import simulator
# Load VTA parameters from the 3rdparty/vta-hw/config/vta_config.json file
env = vta.get_env()
# We read the Pynq RPC host IP address and port number from the OS environment
host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
# We configure both the bitstream and the runtime system on the Pynq
# to match the VTA configuration specified by the vta_config.json file.
if env.TARGET == "pynq" or env.TARGET == "de10nano":
# Make sure that TVM was compiled with RPC=1
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
# Reconfigure the JIT runtime
vta.reconfig_runtime(remote)
# Program the FPGA with a pre-compiled VTA bitstream.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
vta.program_fpga(remote, bitstream=None)
# In simulation mode, host the RPC server locally.
elif env.TARGET in ["sim", "tsim"]:
remote = rpc.LocalSession()
######################################################################
# Computation Declaration
# -----------------------
# In this example we describe a simple matrix multiplication addition, which
# requires multiple computation stages, as shown in the dataflow diagram below.
# First we describe the input tensors :code:`A` and :code:`B` that are living
# in main memory.
# Second, we need to declare intermediate tensors :code:`A_buf` and
# :code:`B_buf`, which will live in VTA's on-chip buffers.
# Having this extra computational stage allows us to explicitly
# stage cached reads and writes.
# Third, we describe the matrix multiplication computation over
# :code:`A_buf` and :code:`B_buf` to produce the product matrix :code:`C_buf`.
# The last operation is a cast and copy back to DRAM, into results tensor
# :code:`C`.
#
# .. image:: https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/gemm_dataflow.png
# :align: center
######################################################################
# Data Layout
# ~~~~~~~~~~~
# We describe the placeholder tensors :code:`A`, and :code:`B` in a tiled data
# format to match the data layout requirements imposed by the VTA tensor core.
######################################################################
# .. note::
#
# **Data Tiling**
#
# One source of complexity when targeting accelerators is to make sure
# that the data layout matches the layout imposed by the accelerator design.
# VTA is designed around a *tensor core* that performs, one matrix-matrix
# operation per cycle between an activation matrix and a weight matrix,
# adding the result matrix to an accumulator matrix, as shown in the
# figure below.
#
# .. image:: https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/tensor_core.png
# :align: center
# :width: 480px
#
# The dimensions of that matrix-matrix multiplication are specified in
# the :code:`vta_config.json` configuration file.
# The activation matrix has a :code:`(BATCH, BLOCK_IN)` shape
# and the transposed weight matrix has a :code:`(BLOCK_OUT, BLOCK_IN)` shape,
# thus inferring that the resulting output matrix has a
# :code:`(BATCH, BLOCK_OUT)` shape.
# Consequently input and output tensors processed by VTA need to be
# tiled according to these aforementioned dimension.
#
# The diagram below shows the impact of data tiling on a matrix that is
# originally of shape (4, 8).
# Tiling by a (2, 2) tile shape ensures that data within each tile is
# contiguous.
# The resulting tiled tensor has a shape of (2, 4, 2, 2).
#
# .. image:: https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/data_tiling.png
# :align: center
# :width: 480px
#
# We first define the variables :code:`m`, :code:`n`, :code:`o` to represent
# the shape of the matrix multiplication. These variables are multiplicative
# factors over the :code:`BLOCK_OUT`, :code:`BLOCK_IN`, and :code:`BATCH`
# tensor dimensions respectively.
# By default, the configuration file sets :code:`BATCH`, :code:`BLOCK_IN`, and
# :code:`BLOCK_OUT` to be 1, 16 and 16 respectively (:code:`BATCH` being set to
# 1 implies that our compute building block is vector-matrix multiply).
#
######################################################################
# .. note::
#
# **Data Types**
#
# It's important to not only match the inner-tile
# dimension of VTA's tensor core, but also to match the specific data types
# expected by VTA.
# VTA for now only supports fixed point data types, which integer width is
# specified in the :code:`vta_config.json` file by :code:`INP_WIDTH` and
# :code:`WGT_WIDTH` for the activations and weights data types respectively.
# In addition, the accumulator data type integer width is specified by
# :code:`ACC_WIDTH`.
#
# By default, the configuration file sets :code:`INP_WIDTH`
# and :code:`WGT_WIDTH` to 8.
# The accumulator width :code:`ACC_WIDTH` is set to 32, in order to avoid
# overflow during accumulation.
# As a result, :code:`env.inp_dtype` and :code:`env.wgt_dtype` are all
# narrow 8-bit integers, while :code:`env.acc_dtype` is a standard 32-bit
# integer.
# Output channel factor m - total 16x16=256 output channels
m = 16
# Input channel factor n - total 16x16=256 input channels
n = 16
# Batch factor o (we use single batch inference)
o = 1
# A placeholder tensor in tiled data format
A = te.placeholder((o, n, env.BATCH, env.BLOCK_IN), name="A", dtype=env.inp_dtype)
# B placeholder tensor in tiled data format
B = te.placeholder((m, n, env.BLOCK_OUT, env.BLOCK_IN), name="B", dtype=env.wgt_dtype)
# A copy buffer
A_buf = te.compute((o, n, env.BATCH, env.BLOCK_IN), lambda *i: A(*i), "A_buf")
# B copy buffer
B_buf = te.compute((m, n, env.BLOCK_OUT, env.BLOCK_IN), lambda *i: B(*i), "B_buf")
######################################################################
# Matrix Multiplication
# ~~~~~~~~~~~~~~~~~~~~~
# Now we're ready to describe the matrix multiplication result tensor :code:`C`,
# with another compute operation.
# The compute function takes the shape of the tensor, as well as a lambda
# function that describes the computation rule for each position of the tensor.
#
# In order to implement matrix multiplication, the lambda function needs to
# include a reduction formula over the input channel dimension axes.
# To create a reduction formula, we can declare a reduction axis using
# :code:`te.reduce_axis`, which takes in the range of reductions.
# :code:`te.sum` takes in the expression to be reduced as well as
# the reduction axes to compute the sum of value over all k in the declared
# ranges.
#
# Note that the reduction needs to be performed over 32-bit :code:`env.acc_dtype`
# accumulator data types.
#
# No computation happens during this phase, as we are only declaring how
# the computation should be done.
# Outer input feature reduction axis
ko = te.reduce_axis((0, n), name="ko")
# Inner input feature reduction axis
ki = te.reduce_axis((0, env.BLOCK_IN), name="ki")
# Describe the in-VTA matrix multiplication
C_buf = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT),
lambda bo, co, bi, ci: te.sum(
A_buf[bo, ko, bi, ki].astype(env.acc_dtype) * B_buf[co, ko, ci, ki].astype(env.acc_dtype),
axis=[ko, ki],
),
name="C_buf",
)
######################################################################
# Casting the Results
# ~~~~~~~~~~~~~~~~~~~
# After the computation is done, we'll need to send the results computed by VTA
# back to main memory.
######################################################################
# .. note::
#
# **Memory Store Restrictions**
#
# One specificity of VTA is that it only supports DRAM stores in the narrow
# :code:`env.inp_dtype` data type format.
# This lets us reduce the data footprint for memory transfers, but also lets
# us quantize the wide accumulator data type down to a data format that
# matches the input activation data type.
# This means that in the context of neural network inference, the outputs
# of a given layer after activation can be consumed directly by the next
# layer.
#
# We perform one last typecast operation to the narrow
# input activation data format.
# Cast to output type, and send to main memory
C = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT), lambda *i: C_buf(*i).astype(env.inp_dtype), name="C"
)
######################################################################
# This concludes the computation declaration part of this tutorial.
######################################################################
# Scheduling the Computation
# --------------------------
# While the above lines describes the computation rule, we can obtain
# :code:`C` in many ways.
# TVM asks the user to provide an implementation of the computation called
# *schedule*.
#
# A schedule is a set of transformations to an original computation that
# transforms the implementation of the computation without affecting
# correctness.
# This simple VTA programming tutorial aims to demonstrate basic schedule
# transformations that will map the original schedule down to VTA hardware
# primitives.
######################################################################
# Default Schedule
# ~~~~~~~~~~~~~~~~
# After we construct the schedule, by default the schedule computes
# :code:`C` in the following way:
# Let's take a look at the generated schedule
s = te.create_schedule(C.op)
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# Although this schedule makes sense, it won't compile to VTA.
# In order to obtain correct code generation, we need to apply scheduling
# primitives and code annotation that will transform the schedule into
# one that can be directly lowered onto VTA hardware intrinsics.
# Those include:
#
# - DMA copy operations which will take globally-scoped tensors and copy
# those into locally-scoped tensors.
# - Tensor operations that will perform the matrix multiplication.
######################################################################
# Buffer Scopes
# ~~~~~~~~~~~~~
# First, we set the scope of the buffers to tell TVM that these buffers
# will be living in the VTA's on-chip SRAM caches.
# Below, we tell TVM that :code:`A_buf`, :code:`B_buf`, :code:`C_buf`
# will respectively live in VTA's on-chip input, weight and accumulator
# memory.
######################################################################
# .. note::
#
# **VTA's On-Chip SRAMs**
#
# VTA has three different memory scopes, each corresponding to different
# on-chip SRAM buffers.
#
# - :code:`env.inp_scope`: Input buffer, which is a read-only SRAM buffer
# that stores input matrices of shape :code:`(env.BATCH, env.BLOCK_IN)`
# of type :code:`env.inp_dtype`. The input buffer contains
# `2 ^ LOG_INP_BUFF_SIZE` matrix elements (as specified in the
# :code:`vta_config.json` file).
# - :code:`env.wgt_scope`: Weight buffer, which is a read-only SRAM buffer
# that stores weight matrices of shape :code:`(env.BLOCK_OUT, env.BLOCK_IN)`
# of type :code:`env.wgt_dtype`. The weight buffer contains
# `2 ^ LOG_WGT_BUFF_SIZE` matrix elements.
# - :code:`env.acc_scope`: Accumulator buffer, which is a read/write SRAM
# buffer that stores accumulator matrices of shape
# :code:`(env.BATCH, env.BLOCK_OUT)` of type :code:`env.acc_dtype`.
# The accumulator buffer is VTA's general purpose register file: it holds
# both intermediate results of convolutions and matrix multiplications
# as well as intermediate results of pooling, batch normalization, and
# activation layers. The accumulator buffer contains
# `2 ^ LOG_ACC_BUFF_SIZE` matrix elements.
# Set the intermediate tensor's scope to VTA's on-chip buffers
s[A_buf].set_scope(env.inp_scope)
s[B_buf].set_scope(env.wgt_scope)
s[C_buf].set_scope(env.acc_scope)
######################################################################
# DMA Transfers
# ~~~~~~~~~~~~~
# We need to schedule DMA transfers to move data living in DRAM to
# and from the VTA on-chip buffers.
# This can be achieved using the :code:`compute_at` schedule primitive
# which nests the copying of the buffers into the computation loop
# that performs the matrix multiplication.
#
# We insert :code:`dma_copy` pragmas to indicate to the compiler
# that the copy operations will be performed in bulk via DMA,
# which is common in hardware accelerators.
# Finally, we print the temporary schedule to observe the effects of
# moving the copy operations into the matrix multiplication loop.
# Move buffer copy into matrix multiply loop
s[A_buf].compute_at(s[C_buf], ko)
s[B_buf].compute_at(s[C_buf], ko)
# Tag the buffer copies with the DMA pragma to insert a DMA transfer
s[A_buf].pragma(s[A_buf].op.axis[0], env.dma_copy)
s[B_buf].pragma(s[B_buf].op.axis[0], env.dma_copy)
s[C].pragma(s[C].op.axis[0], env.dma_copy)
# Let's take a look at the transformed schedule
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# Tensorization
# ~~~~~~~~~~~~~
# The last step of the schedule transformation consists in applying
# *tensorization* to our schedule.
# Tensorization is analogous to vectorization, but extends the concept
# to a higher-dimensional unit of computation.
# Consequently, tensorization imposes data layout constraints as discussed
# earlier when declaring the data layout input placeholders.
# We've already arranged our tensors in a tiled format, so the next thing
# we need to perform is loop reordering to accommodate for tensorization.
#
# Here we choose to move the outermost reduction axis all the way out.
# This dictates that we first iterate over input channels, then batch
# dimensions, and finally output channels.
# Lastly, we apply the tensorization scheduling primitive :code:`tensorize`
# along the outer axis of the inner-most matrix matrix multiplication tensor
# block.
# We print the finalized schedule that is ready for code-generation
# by the VTA runtime JIT compiler.
s[C_buf].reorder(
ko, s[C_buf].op.axis[0], s[C_buf].op.axis[1], s[C_buf].op.axis[2], s[C_buf].op.axis[3], ki
)
s[C_buf].tensorize(s[C_buf].op.axis[2], env.gemm)
# Let's take a look at the finalized schedule
print(vta.lower(s, [A, B, C], simple_mode=True))
######################################################################
# This concludes the scheduling portion of this tutorial.
######################################################################
# TVM Compilation
# ---------------
# After we have finished specifying the schedule, we can compile it
# into a TVM function.
# Build GEMM VTA kernel
my_gemm = vta.build(
s, [A, B, C], tvm.target.Target("ext_dev", host=env.target_host), name="my_gemm"
)
# Write the compiled module into an object file.
temp = utils.tempdir()
my_gemm.save(temp.relpath("gemm.o"))
# Send the executable over RPC
remote.upload(temp.relpath("gemm.o"))
# Load the compiled module
f = remote.load_module("gemm.o")
######################################################################
# Running the Function
# --------------------
# The compiled TVM function uses a concise C API and can be invoked from
# code language.
#
# TVM provides an array API in python to aid quick testing and prototyping.
# The array API is based on `DLPack <https://github.com/dmlc/dlpack>`_ standard.
#
# - We first create a remote context (for remote execution on the Pynq).
# - Then :code:`tvm.nd.array` formats the data accordingly.
# - :code:`f()` runs the actual computation.
# - :code:`numpy()` copies the result array back in a format that can be
# interpreted.
#
# Get the remote device context
ctx = remote.ext_dev(0)
# Initialize the A and B arrays randomly in the int range of (-128, 128]
A_orig = np.random.randint(-128, 128, size=(o * env.BATCH, n * env.BLOCK_IN)).astype(A.dtype)
B_orig = np.random.randint(-128, 128, size=(m * env.BLOCK_OUT, n * env.BLOCK_IN)).astype(B.dtype)
# Apply packing to the A and B arrays from a 2D to a 4D packed layout
A_packed = A_orig.reshape(o, env.BATCH, n, env.BLOCK_IN).transpose((0, 2, 1, 3))
B_packed = B_orig.reshape(m, env.BLOCK_OUT, n, env.BLOCK_IN).transpose((0, 2, 1, 3))
# Format the input/output arrays with tvm.nd.array to the DLPack standard
A_nd = tvm.nd.array(A_packed, ctx)
B_nd = tvm.nd.array(B_packed, ctx)
C_nd = tvm.nd.array(np.zeros((o, m, env.BATCH, env.BLOCK_OUT)).astype(C.dtype), ctx)
# Clear stats
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
# Invoke the module to perform the computation
f(A_nd, B_nd, C_nd)
######################################################################
# Verifying Correctness
# ---------------------
# Compute the reference result with numpy and assert that the output of the
# matrix multiplication indeed is correct
# Compute reference result with numpy
C_ref = np.dot(A_orig.astype(env.acc_dtype), B_orig.T.astype(env.acc_dtype)).astype(C.dtype)
C_ref = C_ref.reshape(o, env.BATCH, m, env.BLOCK_OUT).transpose((0, 2, 1, 3))
np.testing.assert_equal(C_ref, C_nd.numpy())
# Print stats
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
print("Successful matrix multiply test!")
######################################################################
# Summary
# -------
# This tutorial showcases the TVM workflow to implement a simple matrix
# multiplication example on VTA.
# The general workflow includes:
#
# - Programming the FPGA with the VTA bitstream over RPC.
# - Describing matrix multiplication via a series of computations.
# - Describing how we want to perform the computation using schedule primitives.
# - Compiling the function to the VTA target.
# - Running the compiled module and verifying it against a numpy implementation.
#
| 19,550 | 40.16 | 99 | py |
tvm | tvm-main/vta/tutorials/vta_get_started.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _vta-get-started:
Get Started with VTA
====================
**Author**: `Thierry Moreau <https://homes.cs.washington.edu/~moreau/>`_
This is an introduction tutorial on how to use TVM to program the VTA design.
In this tutorial, we will demonstrate the basic TVM workflow to implement
a vector addition on the VTA design's vector ALU.
This process includes specific scheduling transformations necessary to lower
computation down to low-level accelerator operations.
To begin, we need to import TVM which is our deep learning optimizing compiler.
We also need to import the VTA python package which contains VTA specific
extensions for TVM to target the VTA design.
"""
from __future__ import absolute_import, print_function
import os
import tvm
from tvm import te
import vta
import numpy as np
######################################################################
# Loading in VTA Parameters
# ~~~~~~~~~~~~~~~~~~~~~~~~~
# VTA is a modular and customizable design. Consequently, the user
# is free to modify high-level hardware parameters that affect
# the hardware design layout.
# These parameters are specified in the :code:`vta_config.json` file by their
# :code:`log2` values.
# These VTA parameters can be loaded with the :code:`vta.get_env`
# function.
#
# Finally, the TVM target is also specified in the :code:`vta_config.json` file.
# When set to *sim*, execution will take place inside of a behavioral
# VTA simulator.
# If you want to run this tutorial on the Pynq FPGA development platform,
# follow the *VTA Pynq-Based Testing Setup* guide.
env = vta.get_env()
######################################################################
# FPGA Programming
# ----------------
# When targeting the Pynq FPGA development board, we need to configure
# the board with a VTA bitstream.
# We'll need the TVM RPC module and the VTA simulator module
from tvm import rpc
from tvm.contrib import utils
from vta.testing import simulator
# We read the Pynq RPC host IP address and port number from the OS environment
host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
# We configure both the bitstream and the runtime system on the Pynq
# to match the VTA configuration specified by the vta_config.json file.
if env.TARGET == "pynq" or env.TARGET == "de10nano":
# Make sure that TVM was compiled with RPC=1
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
# Reconfigure the JIT runtime
vta.reconfig_runtime(remote)
# Program the FPGA with a pre-compiled VTA bitstream.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
vta.program_fpga(remote, bitstream=None)
# In simulation mode, host the RPC server locally.
elif env.TARGET in ("sim", "tsim", "intelfocl"):
remote = rpc.LocalSession()
if env.TARGET in ["intelfocl"]:
# program intelfocl aocx
vta.program_fpga(remote, bitstream="vta.bitstream")
######################################################################
# Computation Declaration
# -----------------------
# As a first step, we need to describe our computation.
# TVM adopts tensor semantics, with each intermediate result
# represented as multi-dimensional array. The user needs to describe
# the computation rule that generates the output tensors.
#
# In this example we describe a vector addition, which requires multiple
# computation stages, as shown in the dataflow diagram below.
# First we describe the input tensors :code:`A` and :code:`B` that are living
# in main memory.
# Second, we need to declare intermediate tensors :code:`A_buf` and
# :code:`B_buf`, which will live in VTA's on-chip buffers.
# Having this extra computational stage allows us to explicitly
# stage cached reads and writes.
# Third, we describe the vector addition computation which will
# add :code:`A_buf` to :code:`B_buf` to produce :code:`C_buf`.
# The last operation is a cast and copy back to DRAM, into results tensor
# :code:`C`.
#
# .. image:: https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/vadd_dataflow.png
# :align: center
######################################################################
# Input Placeholders
# ~~~~~~~~~~~~~~~~~~
# We describe the placeholder tensors :code:`A`, and :code:`B` in a tiled data
# format to match the data layout requirements imposed by the VTA vector ALU.
#
# For VTA's general purpose operations such as vector adds, the tile size is
# :code:`(env.BATCH, env.BLOCK_OUT)`.
# The dimensions are specified in
# the :code:`vta_config.json` configuration file and are set by default to
# a (1, 16) vector.
#
# In addition, A and B's data types also needs to match the :code:`env.acc_dtype`
# which is set by the :code:`vta_config.json` file to be a 32-bit integer.
# Output channel factor m - total 64 x 16 = 1024 output channels
m = 64
# Batch factor o - total 1 x 1 = 1
o = 1
# A placeholder tensor in tiled data format
A = te.placeholder((o, m, env.BATCH, env.BLOCK_OUT), name="A", dtype=env.acc_dtype)
# B placeholder tensor in tiled data format
B = te.placeholder((o, m, env.BATCH, env.BLOCK_OUT), name="B", dtype=env.acc_dtype)
######################################################################
# Copy Buffers
# ~~~~~~~~~~~~
# One specificity of hardware accelerators, is that on-chip memory has to be
# explicitly managed.
# This means that we'll need to describe intermediate tensors :code:`A_buf`
# and :code:`B_buf` that can have a different memory scope than the original
# placeholder tensors :code:`A` and :code:`B`.
#
# Later in the scheduling phase, we can tell the compiler that :code:`A_buf`
# and :code:`B_buf` will live in the VTA's on-chip buffers (SRAM), while
# :code:`A` and :code:`B` will live in main memory (DRAM).
# We describe A_buf and B_buf as the result of a compute
# operation that is the identity function.
# This can later be interpreted by the compiler as a cached read operation.
# A copy buffer
A_buf = te.compute((o, m, env.BATCH, env.BLOCK_OUT), lambda *i: A(*i), "A_buf")
# B copy buffer
B_buf = te.compute((o, m, env.BATCH, env.BLOCK_OUT), lambda *i: B(*i), "B_buf")
######################################################################
# Vector Addition
# ~~~~~~~~~~~~~~~
# Now we're ready to describe the vector addition result tensor :code:`C`,
# with another compute operation.
# The compute function takes the shape of the tensor, as well as a lambda
# function that describes the computation rule for each position of the tensor.
#
# No computation happens during this phase, as we are only declaring how
# the computation should be done.
# Describe the in-VTA vector addition
C_buf = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT),
lambda *i: A_buf(*i).astype(env.acc_dtype) + B_buf(*i).astype(env.acc_dtype),
name="C_buf",
)
######################################################################
# Casting the Results
# ~~~~~~~~~~~~~~~~~~~
# After the computation is done, we'll need to send the results computed by VTA
# back to main memory.
######################################################################
# .. note::
#
# **Memory Store Restrictions**
#
# One specificity of VTA is that it only supports DRAM stores in the narrow
# :code:`env.inp_dtype` data type format.
# This lets us reduce the data footprint for memory transfers (more on this
# in the basic matrix multiply example).
#
# We perform one last typecast operation to the narrow
# input activation data format.
# Cast to output type, and send to main memory
C = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT), lambda *i: C_buf(*i).astype(env.inp_dtype), name="C"
)
######################################################################
# This concludes the computation declaration part of this tutorial.
######################################################################
# Scheduling the Computation
# --------------------------
# While the above lines describes the computation rule, we can obtain
# :code:`C` in many ways.
# TVM asks the user to provide an implementation of the computation called
# *schedule*.
#
# A schedule is a set of transformations to an original computation that
# transforms the implementation of the computation without affecting
# correctness.
# This simple VTA programming tutorial aims to demonstrate basic schedule
# transformations that will map the original schedule down to VTA hardware
# primitives.
######################################################################
# Default Schedule
# ~~~~~~~~~~~~~~~~
# After we construct the schedule, by default the schedule computes
# :code:`C` in the following way:
# Let's take a look at the generated schedule
s = te.create_schedule(C.op)
print(tvm.lower(s, [A, B, C], simple_mode=True))
######################################################################
# Although this schedule makes sense, it won't compile to VTA.
# In order to obtain correct code generation, we need to apply scheduling
# primitives and code annotation that will transform the schedule into
# one that can be directly lowered onto VTA hardware intrinsics.
# Those include:
#
# - DMA copy operations which will take globally-scoped tensors and copy
# those into locally-scoped tensors.
# - Vector ALU operations that will perform the vector add.
######################################################################
# Buffer Scopes
# ~~~~~~~~~~~~~
# First, we set the scope of the copy buffers to indicate to TVM that these
# intermediate tensors will be stored in the VTA's on-chip SRAM buffers.
# Below, we tell TVM that :code:`A_buf`, :code:`B_buf`, :code:`C_buf`
# will live in VTA's on-chip *accumulator buffer* which serves as
# VTA's general purpose register file.
#
# Set the intermediate tensors' scope to VTA's on-chip accumulator buffer
s[A_buf].set_scope(env.acc_scope)
s[B_buf].set_scope(env.acc_scope)
s[C_buf].set_scope(env.acc_scope)
######################################################################
# DMA Transfers
# ~~~~~~~~~~~~~
# We need to schedule DMA transfers to move data living in DRAM to
# and from the VTA on-chip buffers.
# We insert :code:`dma_copy` pragmas to indicate to the compiler
# that the copy operations will be performed in bulk via DMA,
# which is common in hardware accelerators.
# Tag the buffer copies with the DMA pragma to map a copy loop to a
# DMA transfer operation
s[A_buf].pragma(s[A_buf].op.axis[0], env.dma_copy)
s[B_buf].pragma(s[B_buf].op.axis[0], env.dma_copy)
s[C].pragma(s[C].op.axis[0], env.dma_copy)
######################################################################
# ALU Operations
# ~~~~~~~~~~~~~~
# VTA has a vector ALU that can perform vector operations on tensors
# in the accumulator buffer.
# In order to tell TVM that a given operation needs to be mapped to the
# VTA's vector ALU, we need to explicitly tag the vector addition loop
# with an :code:`env.alu` pragma.
# Tell TVM that the computation needs to be performed
# on VTA's vector ALU
s[C_buf].pragma(C_buf.op.axis[0], env.alu)
# Let's take a look at the finalized schedule
print(vta.lower(s, [A, B, C], simple_mode=True))
######################################################################
# This concludes the scheduling portion of this tutorial.
######################################################################
# TVM Compilation
# ---------------
# After we have finished specifying the schedule, we can compile it
# into a TVM function. By default TVM compiles into a type-erased
# function that can be directly called from python side.
#
# In the following line, we use :code:`tvm.build` to create a function.
# The build function takes the schedule, the desired signature of the
# function(including the inputs and outputs) as well as target language
# we want to compile to.
#
my_vadd = vta.build(
s, [A, B, C], tvm.target.Target("ext_dev", host=env.target_host), name="my_vadd"
)
######################################################################
# Saving the Module
# ~~~~~~~~~~~~~~~~~
# TVM lets us save our module into a file so it can loaded back later. This
# is called ahead-of-time compilation and allows us to save some compilation
# time.
# More importantly, this allows us to cross-compile the executable on our
# development machine and send it over to the Pynq FPGA board over RPC for
# execution.
# Write the compiled module into an object file.
temp = utils.tempdir()
my_vadd.save(temp.relpath("vadd.o"))
# Send the executable over RPC
remote.upload(temp.relpath("vadd.o"))
######################################################################
# Loading the Module
# ~~~~~~~~~~~~~~~~~~
# We can load the compiled module from the file system to run the code.
f = remote.load_module("vadd.o")
######################################################################
# Running the Function
# --------------------
# The compiled TVM function uses a concise C API and can be invoked from
# any language.
#
# TVM provides an array API in python to aid quick testing and prototyping.
# The array API is based on `DLPack <https://github.com/dmlc/dlpack>`_ standard.
#
# - We first create a remote context (for remote execution on the Pynq).
# - Then :code:`tvm.nd.array` formats the data accordingly.
# - :code:`f()` runs the actual computation.
# - :code:`numpy()` copies the result array back in a format that can be
# interpreted.
#
# Get the remote device context
ctx = remote.ext_dev(0)
# Initialize the A and B arrays randomly in the int range of (-128, 128]
A_orig = np.random.randint(-128, 128, size=(o * env.BATCH, m * env.BLOCK_OUT)).astype(A.dtype)
B_orig = np.random.randint(-128, 128, size=(o * env.BATCH, m * env.BLOCK_OUT)).astype(B.dtype)
# Apply packing to the A and B arrays from a 2D to a 4D packed layout
A_packed = A_orig.reshape(o, env.BATCH, m, env.BLOCK_OUT).transpose((0, 2, 1, 3))
B_packed = B_orig.reshape(o, env.BATCH, m, env.BLOCK_OUT).transpose((0, 2, 1, 3))
# Format the input/output arrays with tvm.nd.array to the DLPack standard
A_nd = tvm.nd.array(A_packed, ctx)
B_nd = tvm.nd.array(B_packed, ctx)
C_nd = tvm.nd.array(np.zeros((o, m, env.BATCH, env.BLOCK_OUT)).astype(C.dtype), ctx)
# Invoke the module to perform the computation
f(A_nd, B_nd, C_nd)
######################################################################
# Verifying Correctness
# ---------------------
# Compute the reference result with numpy and assert that the output of the
# matrix multiplication indeed is correct
# Compute reference result with numpy
C_ref = (A_orig.astype(env.acc_dtype) + B_orig.astype(env.acc_dtype)).astype(C.dtype)
C_ref = C_ref.reshape(o, env.BATCH, m, env.BLOCK_OUT).transpose((0, 2, 1, 3))
np.testing.assert_equal(C_ref, C_nd.numpy())
print("Successful vector add test!")
######################################################################
# Summary
# -------
# This tutorial provides a walk-through of TVM for programming the
# deep learning accelerator VTA with a simple vector addition example.
# The general workflow includes:
#
# - Programming the FPGA with the VTA bitstream over RPC.
# - Describing the vector add computation via a series of computations.
# - Describing how we want to perform the computation using schedule primitives.
# - Compiling the function to the VTA target.
# - Running the compiled module and verifying it against a numpy implementation.
#
# You are more than welcome to check other examples out and tutorials
# to learn more about the supported operations, schedule primitives
# and other features supported by TVM to program VTA.
#
| 16,406 | 39.41133 | 99 | py |
tvm | tvm-main/vta/tutorials/optimize/convolution_opt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
2D Convolution Optimization
===========================
**Author**: `Thierry Moreau <https://homes.cs.washington.edu/~moreau/>`_
This tutorial provides an overview on how to use TVM to map a 2D convolution
workload efficiently on the VTA design.
We recommend covering the :ref:`vta-mat-mult-opt` tutorial first.
2D convolution is dominant in most computer vision deep neural networks.
In this tutorial, we will demonstrate TVM schedule optimizations to map
2D convolution operators in NCHW layout onto VTA.
We also introduce the notion of latency hiding, which allows us to
maximize VTA's compute and memory resource utilization.
"""
######################################################################
# RPC Setup
# ---------
# We start by programming the Pynq's FPGA and building its RPC runtime.
from __future__ import absolute_import, print_function
import os
import tvm
import tvm.testing
from tvm import te
import vta
import numpy as np
from tvm import rpc
from tvm.contrib import utils
from vta.testing import simulator
# Load VTA parameters from the 3rdparty/vta-hw/config/vta_config.json file
env = vta.get_env()
# We read the Pynq RPC host IP address and port number from the OS environment
host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
# We configure both the bitstream and the runtime system on the Pynq
# to match the VTA configuration specified by the vta_config.json file.
if env.TARGET == "pynq":
# Make sure that TVM was compiled with RPC=1
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
# Reconfigure the JIT runtime
vta.reconfig_runtime(remote)
# Program the FPGA with a pre-compiled VTA bitstream.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
vta.program_fpga(remote, bitstream=None)
# In simulation mode, host the RPC server locally.
elif env.TARGET in ["sim", "tsim"]:
remote = rpc.LocalSession()
######################################################################
# Computation Declaration
# -----------------------
# As a first step, we need to describe our 2D convolution computation
# in NCHW format.
#
# We define the 2D convolution shape by the batch size,
# spatial dimensions, input channels, output channels, kernel dimensions,
# kernel dimensions, padding dimensions, and stride dimensions.
#
# We pick the shape of the 9th convolutional layer of the ResNet-18
# architecture as our convolution workload parameters.
#
# We've added extra operators to the 2D convolution that apply
# shifting and clipping to the output in order to mimic a fixed-point
# convolution followed by a rectified linear activation.
# We describe the TVM dataflow graph of the 2D convolution layer below:
#
# .. image:: https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/conv2d_dataflow.png
# :align: center
#
# This computation is intentionally too large to fit onto VTA's on-chip
# buffers all at once. Therefore in the scheduling phase we'll
# rely on computation blocking strategies to break the computation down into
# manageable chunks.
#
# .. note::
#
# *Spatial padding*
#
# Note that we'll need to import the TOPI library to apply spatial padding
# on the input feature map tensor.
# Spatial padding facilitates blocking in the context of 2D convolutions
# due to the fact that the same (x, y) spatial location of the input
# feature map of any given layer is read more than once if the convolution
# kernel window size is greater than one.
# On CPUs, and GPUs, one way to increase efficiency of memory accesses
# when parallelizing work is spatial packing, which requires data re-layout.
# VTA load DMA engine can insert padding automatically so that the original
# input feature map does not have to be re-packed in memory.
#
# We show the effect of VTA's on the fly spatial padding when data is being
# loaded from DRAM into VTA's SRAM, following a 2D strided and padded memory
# read.
#
# .. image:: https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/padding.png
# :align: center
# :width: 480px
from tvm import topi
# 2D convolution layer dimensions taken from ResNet-18 architecture
# (9th convolutional layer)
batch_size = 1
height = 14
width = 14
in_channels = 256
out_channels = 256
kernel_h = 3
kernel_w = 3
pad_h = 1
pad_w = 1
stride_h = 1
stride_w = 1
assert batch_size % env.BATCH == 0
assert in_channels % env.BLOCK_IN == 0
assert out_channels % env.BLOCK_OUT == 0
# Input feature map: (N, IC, H, W, n, ic)
data_shape = (
batch_size // env.BATCH,
in_channels // env.BLOCK_IN,
height,
width,
env.BATCH,
env.BLOCK_IN,
)
# Kernel: (OC, IC, H, W, oc, ic)
kernel_shape = (
out_channels // env.BLOCK_OUT,
in_channels // env.BLOCK_IN,
kernel_h,
kernel_w,
env.BLOCK_OUT,
env.BLOCK_IN,
)
# Derive output feature map dimensions
fout_height = (height + 2 * pad_h - kernel_h) // stride_h + 1
fout_width = (width + 2 * pad_w - kernel_w) // stride_w + 1
# Output feature map: (N, OC, H, W, n, oc)
output_shape = (
batch_size // env.BATCH,
out_channels // env.BLOCK_OUT,
fout_height,
fout_width,
env.BATCH,
env.BLOCK_OUT,
)
# Convolution reduction axes
dy = te.reduce_axis((0, kernel_h), name="dy")
dx = te.reduce_axis((0, kernel_w), name="dx")
ic = te.reduce_axis((0, in_channels // env.BLOCK_IN), name="ic")
ic_tns = te.reduce_axis((0, env.BLOCK_IN), name="ic_tns")
# Input placeholder tensors
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
# Copy buffers:
# Apply spatial padding to input feature map
data_buf = topi.nn.pad(data, [0, 0, pad_h, pad_w, 0, 0], name="data_buf")
kernel_buf = te.compute(kernel_shape, lambda *i: kernel(*i), "kernel_buf")
# Declare 2D convolution
res_conv = te.compute(
output_shape,
lambda bo, co, i, j, bi, ci: te.sum(
data_buf[bo, ic, i * stride_h + dy, j * stride_w + dx, bi, ic_tns].astype(env.acc_dtype)
* kernel_buf[co, ic, dy, dx, ci, ic_tns].astype(env.acc_dtype),
axis=[ic, dy, dx, ic_tns],
),
name="res_conv",
)
# Add shift stage for fix-point normalization
res_shr = te.compute(output_shape, lambda *i: res_conv(*i) >> 8, name="res_shr")
# Apply clipping between (0, input max value)
inp_max = (1 << (env.INP_WIDTH - 1)) - 1
res_max = te.compute(output_shape, lambda *i: tvm.te.max(res_shr(*i), 0), "res_max")
res_min = te.compute(output_shape, lambda *i: tvm.te.min(res_max(*i), inp_max), "res_min")
# Result Tensor
res = te.compute(output_shape, lambda *i: res_min(*i).astype(env.inp_dtype), name="res")
######################################################################
# Scheduling the Computation
# --------------------------
# We'll look at a set of schedule transformations necessary to map the
# 2D convolution onto VTA in an efficient fashion.
# Those include:
#
# - Computation blocking
# - Virtual threading to increase compute utilization
# - Lowering to VTA hardware intrinsics
# Create TVM schedule
s = te.create_schedule(res.op)
# Let's look at the default TVM schedule
print(tvm.lower(s, [data, kernel, res], simple_mode=True))
######################################################################
# Blocking the Computation
# ~~~~~~~~~~~~~~~~~~~~~~~~
# The 2D convolution is by default too large for activations or kernel weights
# to fit on VTA's on-chip buffers all at once.
# We apply blocking along input channels, output channels, and along
# the height spatial dimensions.
# We don't apply blocking along the width spatial dimension since it's
# the innermost dimension in the NCHW layout (and consequently to increase
# locality, it's best not to block along the innermost dimension).
# Let's define tiling sizes
b_block = 1 // env.BATCH
oc_block = 128 // env.BLOCK_OUT
ic_block = 16 // env.BLOCK_IN
h_block = 7
w_block = 14
# Tile the output tensor along the spatial and output channel dimensions
# (since by default we are doing single batch inference, the split along
# the batch dimension has no effect)
b, oc, y, x, b_tns, oc_tns = s[res].op.axis
b_out, b_inn = s[res].split(b, factor=b_block)
oc_out, oc_inn = s[res].split(oc, factor=oc_block)
y_out, y_inn = s[res].split(y, factor=h_block)
x_out, x_inn = s[res].split(x, factor=w_block)
s[res].reorder(b_out, oc_out, y_out, x_out, b_inn, oc_inn, y_inn, x_inn, b_tns, oc_tns)
# Move intermediate computation into each output compute tile
s[res_conv].compute_at(s[res], x_out)
s[res_shr].compute_at(s[res], x_out)
s[res_max].compute_at(s[res], x_out)
s[res_min].compute_at(s[res], x_out)
# Apply additional loop split along reduction axis (input channel)
b_inn, oc_inn, y_inn, x_inn, b_tns, oc_tns = s[res_conv].op.axis
ic_out, ic_inn = s[res_conv].split(ic, factor=ic_block)
# Reorder axes.
# 1) Group the VTA tensor axes in the inner most position: b_tns, oc_tns, ic_tns
# to allow TVM to tensorize.
# 2) We move the ic_out axis all the way out of the convolution loop to block
# along the reduction axis.
# 3) Now we re-order the block axes: b_inn, oc_inn, y_inn, x_inn, ic_inn, dy, dx.
# VTA runtime/hardware requires us to write to a different output feature map
# location for every VTA tensor operation.
# This restriction requires us to order one of oc_inn, y_inn or x_inn right
# before b_tns, since they all affect output feature map indexing.
# Therefore, we choose to bring x_inn inside as shown below.
s[res_conv].reorder(ic_out, b_inn, oc_inn, y_inn, ic_inn, dy, dx, x_inn, b_tns, oc_tns, ic_tns)
######################################################################
# Virtual Threading
# ~~~~~~~~~~~~~~~~~
# Virtual threading is a mechanism that increases task-level pipeline
# parallelism in the VTA hardware design.
# Put it another way, it increases compute resource utilization by hiding
# memory access latency.
#
# In the implementation below, virtual threading distributes work across two
# threads split along the output channel axis.
# We show how work is split when computing the 2D convolution in the figure
# below.
#
# .. image:: https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/virtual_threading.png
# :align: center
# :width: 480px
# VTA only supports 2 virtual threads
v_threads = 2
# Perform virtual thread split along output channel outer axis
_, tx = s[res].split(oc_out, factor=v_threads)
s[res].reorder(tx, b_out)
s[res].bind(tx, te.thread_axis("cthread"))
# Let's look at the current TVM schedule after blocking and virtual threading
print(tvm.lower(s, [data, kernel, res], simple_mode=True))
######################################################################
# Lowering Copies to DMA Transfers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Next we set the buffer scopes to the corresponding on-chip VTA SRAM buffers.
# We move the load loops into the 2D convolution computation loop to stage
# memory loads such that they fit in the on-chip SRAM buffers.
# Finally we annotate the load/store loop outer axes with the DMA copy pragma
# to perform bulk memory transfers on VTA.
# Set scope of SRAM buffers
s[data_buf].set_scope(env.inp_scope)
s[kernel_buf].set_scope(env.wgt_scope)
s[res_conv].set_scope(env.acc_scope)
s[res_shr].set_scope(env.acc_scope)
s[res_min].set_scope(env.acc_scope)
s[res_max].set_scope(env.acc_scope)
# Block data and kernel cache reads
s[data_buf].compute_at(s[res_conv], ic_out)
s[kernel_buf].compute_at(s[res_conv], ic_out)
# Use DMA copy pragma on DRAM->SRAM operations
s[data_buf].pragma(s[data_buf].op.axis[0], env.dma_copy)
s[kernel_buf].pragma(s[kernel_buf].op.axis[0], env.dma_copy)
# Use DMA copy pragma on SRAM->DRAM operation in each result block
# (this implies that these copies should be performed along b_inn,
# or result axis 4)
s[res].pragma(s[res].op.axis[4], env.dma_copy)
######################################################################
# Lowering Computation to VTA Compute Intrinsics
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The last phase is to lower the computation loops down to VTA hardware
# intrinsics by mapping the 2D convolution to tensor intrinsics,
# and mapping the shift, and clipping computation to the vector ALU.
# Apply tensorization over the batch tensor tile axis
s[res_conv].tensorize(b_tns, env.gemm)
# Add an ALU pragma over the shift and clipping operations
s[res_shr].pragma(s[res_shr].op.axis[0], env.alu)
s[res_min].pragma(s[res_min].op.axis[0], env.alu)
s[res_max].pragma(s[res_max].op.axis[0], env.alu)
# Let's look at the final lowered TVM schedule after lowering memory
# loads/stores down to DMA copy intrinsics, and the computation down to
# VTA compute intrinsics.
print(vta.lower(s, [data, kernel, res], simple_mode=True))
######################################################################
# TVM Compilation and Verification
# --------------------------------
# After specifying the schedule, we can compile it into a TVM function.
# We save the module so we can send it over RPC.
# We run the function and verify it against a numpy implementation to
# ensure correctness.
# This library facilitates 2D convolution testing
from tvm.topi.testing import conv2d_nchw_python
# Compile the TVM module
with vta.build_config(disabled_pass={"tir.CommonSubexprElimTIR"}):
my_conv = vta.build(
s, [data, kernel, res], tvm.target.Target("ext_dev", host=env.target_host), name="my_conv"
)
temp = utils.tempdir()
my_conv.save(temp.relpath("conv2d.o"))
remote.upload(temp.relpath("conv2d.o"))
f = remote.load_module("conv2d.o")
# Get the remote device context
ctx = remote.ext_dev(0)
# Initialize the data and kernel arrays randomly in the int range
# of (-128, 128] in NCHW layout
data_np = np.random.randint(-128, 128, size=(batch_size, in_channels, height, width)).astype(
data.dtype
)
kernel_np = np.random.randint(
-128, 128, size=(out_channels, in_channels, kernel_h, kernel_w)
).astype(kernel.dtype)
# Apply packing to the data and kernel arrays from a 2D NCHW
# to a 4D NCHWnc packed layout
data_packed = data_np.reshape(
batch_size // env.BATCH, env.BATCH, in_channels // env.BLOCK_IN, env.BLOCK_IN, height, width
).transpose((0, 2, 4, 5, 1, 3))
kernel_packed = kernel_np.reshape(
out_channels // env.BLOCK_OUT,
env.BLOCK_OUT,
in_channels // env.BLOCK_IN,
env.BLOCK_IN,
kernel_h,
kernel_w,
).transpose((0, 2, 4, 5, 1, 3))
# Format the input/output arrays with tvm.nd.array to the DLPack standard
data_nd = tvm.nd.array(data_packed, ctx)
kernel_nd = tvm.nd.array(kernel_packed, ctx)
res_nd = tvm.nd.array(np.zeros(output_shape).astype(res.dtype), ctx)
# Clear stats
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
# Invoke the module to perform the computation
f(data_nd, kernel_nd, res_nd)
# Verify against numpy implementation
res_ref = conv2d_nchw_python(
data_np.astype(env.acc_dtype),
kernel_np.astype(env.acc_dtype),
(stride_h, stride_w),
(pad_h, pad_w),
).astype(env.acc_dtype)
res_ref = res_ref >> env.INP_WIDTH
res_ref = np.clip(res_ref, 0, inp_max)
res_ref = res_ref.astype(res.dtype)
res_ref = res_ref.reshape(
(
batch_size // env.BATCH,
env.BATCH,
out_channels // env.BLOCK_OUT,
env.BLOCK_OUT,
fout_height,
fout_width,
)
).transpose((0, 2, 4, 5, 1, 3))
tvm.testing.assert_allclose(res_ref, res_nd.numpy())
# Print stats
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
print("Successful 2D convolution test!")
######################################################################
# Summary
# -------
# This tutorial demonstrates how TVM scheduling primitives can be used to
# lower 2D convolution onto hardware accelerator intrinsics, making
# use of hardware specific optimizations, such as latency hiding with
# virtual threading.
#
| 16,886 | 35.79085 | 103 | py |
tvm | tvm-main/vta/tutorials/optimize/matrix_multiply_opt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _vta-mat-mult-opt:
Matrix Multiply Blocking
========================
**Author**: `Thierry Moreau <https://homes.cs.washington.edu/~moreau/>`_
This tutorial provides an overview on how to use TVM to map matrix
multiplication efficiently on the VTA design.
We recommend covering the :ref:`basic-mat-mult` tutorial first.
In this tutorial, we will demonstrate TVM schedule optimizations to break large
neural network operators down onto smaller blocks to achieve computation within
limited hardware accelerator resources.
"""
######################################################################
# RPC Setup
# ---------
# We start by programming the Pynq's FPGA and building its RPC runtime.
from __future__ import absolute_import, print_function
import os
import tvm
from tvm import te
import vta
import numpy as np
from tvm import rpc
from tvm.contrib import utils
from vta.testing import simulator
# Load VTA parameters from the 3rdparty/vta-hw/config/vta_config.json file
env = vta.get_env()
# We read the Pynq RPC host IP address and port number from the OS environment
host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
# We configure both the bitstream and the runtime system on the Pynq
# to match the VTA configuration specified by the vta_config.json file.
if env.TARGET == "pynq":
# Make sure that TVM was compiled with RPC=1
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
# Reconfigure the JIT runtime
vta.reconfig_runtime(remote)
# Program the FPGA with a pre-compiled VTA bitstream.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
vta.program_fpga(remote, bitstream=None)
# In simulation mode, host the RPC server locally.
elif env.TARGET in ["sim", "tsim"]:
remote = rpc.LocalSession()
######################################################################
# Computation Declaration
# -----------------------
# As a first step, we need to describe our matrix multiplication computation.
# We define the matrix multiplication as the computation one would find in a
# fully connected layer, defined by its batch size, input channels, and output
# channels.
# These have to be integer multiples of the VTA tensor shape:
# :code:`BATCH`, :code:`BLOCK_IN`, and :code:`BLOCK_OUT` respectively.
#
# We've added extra operators to the matrix multiplication that apply
# shifting and clipping to the output in order to mimic a fixed-point
# matrix multiplication followed by a rectified linear activation.
# We describe the TVM dataflow graph of the fully connected layer below:
#
# .. image:: https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/fc_dataflow.png
# :align: center
#
# This computation is intentionally too large to fit onto VTA's on-chip
# buffers all at once. Therefore in the scheduling phase we'll
# rely on computation blocking strategies to break the computation down into
# manageable chunks.
# Fully connected layer dimensions: 1024 x 1024
batch_size = 1
in_channels = 1024
out_channels = 1024
assert batch_size % env.BATCH == 0
assert in_channels % env.BLOCK_IN == 0
assert out_channels % env.BLOCK_OUT == 0
# Let's derive the tiled input tensor shapes
data_shape = (batch_size // env.BATCH, in_channels // env.BLOCK_IN, env.BATCH, env.BLOCK_IN)
weight_shape = (
out_channels // env.BLOCK_OUT,
in_channels // env.BLOCK_IN,
env.BLOCK_OUT,
env.BLOCK_IN,
)
output_shape = (batch_size // env.BATCH, out_channels // env.BLOCK_OUT, env.BATCH, env.BLOCK_OUT)
num_ops = in_channels * out_channels * batch_size * 2
# Reduction axes
ic = te.reduce_axis((0, in_channels // env.BLOCK_IN), name="ic")
ic_tns = te.reduce_axis((0, env.BLOCK_IN), name="ic_tns")
# Input placeholder tensors
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
weight = te.placeholder(weight_shape, name="weight", dtype=env.wgt_dtype)
# Copy buffers
data_buf = te.compute(data_shape, lambda *i: data(*i), "data_buf")
weight_buf = te.compute(weight_shape, lambda *i: weight(*i), "weight_buf")
# Declare matrix multiply computation
res_gemm = te.compute(
output_shape,
lambda bo, co, bi, ci: te.sum(
data_buf[bo, ic, bi, ic_tns].astype(env.acc_dtype)
* weight_buf[co, ic, ci, ic_tns].astype(env.acc_dtype),
axis=[ic, ic_tns],
),
name="res_gem",
)
# Add shift stage for fix-point normalization
res_shr = te.compute(output_shape, lambda *i: res_gemm(*i) >> env.INP_WIDTH, name="res_shr")
# Apply clipping between (0, input max value)
inp_max = (1 << (env.INP_WIDTH - 1)) - 1
res_max = te.compute(output_shape, lambda *i: tvm.te.max(res_shr(*i), 0), "res_max")
res_min = te.compute(output_shape, lambda *i: tvm.te.min(res_max(*i), inp_max), "res_min")
# Apply typecast to input data type before sending results back
res = te.compute(output_shape, lambda *i: res_min(*i).astype(env.inp_dtype), name="res")
######################################################################
# Scheduling the Computation
# --------------------------
# We'll look at a set of schedule transformations necessary to map the
# matrix multiplications onto VTA in an efficient fashion.
# Those include:
#
# - Computation blocking
# - Lowering to VTA hardware intrinsics
# Create TVM schedule
s = te.create_schedule(res.op)
# Let's look at the default TVM schedule
print(tvm.lower(s, [data, weight, res], simple_mode=True))
######################################################################
# Blocking the Computation
# ~~~~~~~~~~~~~~~~~~~~~~~~
# The matrix multiplication is by default too large for activations or weights
# to fit on VTA's on-chip buffers all at once.
# We block the (1, 1024) by (1024, 1024) matrix multiplication into
# smaller (1, 256) by (256, 256) matrix multiplications so the intermediate
# tensors can fit on the accelerator's on-chip SRAM.
# This approach is similar to blocking techniques applied to CPUs and GPUs in
# order to increase cache hit rate.
#
# We perform blocking along each axes (the batch axis being untouched since
# we are performing singe-batch inference).
# We also leave the inner-most tensorization axes as-is in order to allow
# TVM to pattern-match tensorization.
# We show the outcome of blocking on the computation schedule in the diagram
# below:
#
# .. image:: https://raw.githubusercontent.com/uwsampl/web-data/main/vta/tutorial/blocking.png
# :align: center
# :width: 480px
#
# .. note::
#
# The code after loop splitting and reordering is equivalent to the following
# pseudo-code. We ignore the batch axis since we are only performing single-batch
# inference in this example:
#
# .. code-block:: c
#
# for (int oc_out = 0; oc_out < 4; ++oc_out) {
# // Initialization loop
# for (int oc_inn = 0; oc_inn < 16; ++oc_inn) {
# for (int oc_tns = 0; oc_tns < 16; ++oc_tns) {
# int j = (oc_out * 16 + oc_inn) * 16 + oc_tns;
# C[0][j] = 0;
# }
# }
# for (int ic_out = 0; ic_out < 4; ++ic_out) {
# // Block loop
# for (int oc_inn = 0; oc_inn < 16; ++oc_inn) {
# for (int ic_inn = 0; ic_inn < 16; ++ic_inn) {
# // Tensorization loop
# for (int oc_tns = 0; oc_tns < 16; ++oc_tns) {
# for (int ic_tns = 0; ic_tns < 16; ++ic_tns) {
# int i = (ic_out * 16 + ic_inn) * 16 + ic_tns;
# int j = (oc_out * 16 + oc_inn) * 16 + oc_tns;
# C[0][i] = C[0][i] + A[0][i] * B[j][i];
# }
# }
# }
# }
# }
# }
# }
# Let's define tiling sizes (expressed in multiples of VTA tensor shape size)
b_block = 1 // env.BATCH
i_block = 256 // env.BLOCK_IN
o_block = 256 // env.BLOCK_OUT
# Tile the output tensor along the batch and output channel dimensions
# (since by default we are doing single batch inference, the split along
# the batch dimension has no effect)
b, oc, b_tns, oc_tns = s[res].op.axis
b_out, b_inn = s[res].split(b, b_block)
oc_out, oc_inn = s[res].split(oc, o_block)
s[res].reorder(b_out, oc_out, b_inn, oc_inn)
# Move intermediate computation into each output compute tile
s[res_gemm].compute_at(s[res], oc_out)
s[res_shr].compute_at(s[res], oc_out)
s[res_max].compute_at(s[res], oc_out)
s[res_min].compute_at(s[res], oc_out)
# Apply additional loop split along reduction axis (input channel)
b_inn, oc_inn, b_tns, oc_tns = s[res_gemm].op.axis
ic_out, ic_inn = s[res_gemm].split(ic, i_block)
# Reorder axes. We move the ic_out axis all the way out of the GEMM
# loop to block along the reduction axis
s[res_gemm].reorder(ic_out, b_inn, oc_inn, ic_inn, b_tns, oc_tns, ic_tns)
# Let's look at the current TVM schedule after blocking
print(tvm.lower(s, [data, weight, res], simple_mode=True))
######################################################################
# Lowering Copies to DMA Transfers
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Next we set the buffer scopes to the corresponding on-chip VTA SRAM buffers.
# We move the load loops into the matrix multiply computation loop to stage
# memory loads such that they fit in the on-chip SRAM buffers.
# Finally we annotate the load/store loop outer axes with the DMA copy pragma
# to perform bulk memory transfers on VTA.
# Set scope of SRAM buffers
s[data_buf].set_scope(env.inp_scope)
s[weight_buf].set_scope(env.wgt_scope)
s[res_gemm].set_scope(env.acc_scope)
s[res_shr].set_scope(env.acc_scope)
s[res_min].set_scope(env.acc_scope)
s[res_max].set_scope(env.acc_scope)
# Block data and weight cache reads
s[data_buf].compute_at(s[res_gemm], ic_out)
s[weight_buf].compute_at(s[res_gemm], ic_out)
# Use DMA copy pragma on DRAM->SRAM operations
s[data_buf].pragma(s[data_buf].op.axis[0], env.dma_copy)
s[weight_buf].pragma(s[weight_buf].op.axis[0], env.dma_copy)
# Use DMA copy pragma on SRAM->DRAM operation
# (this implies that these copies should be performed along b_inn,
# or result axis 2)
s[res].pragma(s[res].op.axis[2], env.dma_copy)
######################################################################
# Lowering Computation to VTA Compute Intrinsics
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The last phase is to lower the computation loops down to VTA hardware
# intrinsics by mapping the matrix multiplication to tensor intrinsics,
# and mapping the shift, and clipping computation to the vector ALU.
# Apply tensorization over the batch tensor tile axis
s[res_gemm].tensorize(b_tns, env.gemm)
# Add an ALU pragma over the shift and clipping operations
s[res_shr].pragma(s[res_shr].op.axis[0], env.alu)
s[res_min].pragma(s[res_min].op.axis[0], env.alu)
s[res_max].pragma(s[res_max].op.axis[0], env.alu)
# Let's look at the final lowered TVM schedule after lowering memory
# loads/stores down to DMA copy intrinsics, and the computation down to
# VTA compute intrinsics.
print(vta.lower(s, [data, weight, res], simple_mode=True))
######################################################################
# TVM Compilation and Verification
# --------------------------------
# After specifying the schedule, we can compile it into a TVM function.
# We save the module so we can send it over RPC.
# We run the function and verify it against a numpy implementation to
# ensure correctness.
# Compile the TVM module
my_gemm = vta.build(
s, [data, weight, res], tvm.target.Target("ext_dev", host=env.target_host), name="my_gemm"
)
temp = utils.tempdir()
my_gemm.save(temp.relpath("gemm.o"))
remote.upload(temp.relpath("gemm.o"))
f = remote.load_module("gemm.o")
# Get the remote device context
ctx = remote.ext_dev(0)
# Initialize the data and weight arrays randomly in the int range of (-128, 128]
data_np = np.random.randint(-128, 128, size=(batch_size, in_channels)).astype(data.dtype)
weight_np = np.random.randint(-128, 128, size=(out_channels, in_channels)).astype(weight.dtype)
# Apply packing to the data and weight arrays from a 2D to a 4D packed layout
data_packed = data_np.reshape(
batch_size // env.BATCH, env.BATCH, in_channels // env.BLOCK_IN, env.BLOCK_IN
).transpose((0, 2, 1, 3))
weight_packed = weight_np.reshape(
out_channels // env.BLOCK_OUT, env.BLOCK_OUT, in_channels // env.BLOCK_IN, env.BLOCK_IN
).transpose((0, 2, 1, 3))
# Format the input/output arrays with tvm.nd.array to the DLPack standard
data_nd = tvm.nd.array(data_packed, ctx)
weight_nd = tvm.nd.array(weight_packed, ctx)
res_nd = tvm.nd.array(np.zeros(output_shape).astype(res.dtype), ctx)
# Clear stats
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
# Invoke the module to perform the computation
f(data_nd, weight_nd, res_nd)
# Verify against numpy implementation
res_ref = np.dot(data_np.astype(env.acc_dtype), weight_np.T.astype(env.acc_dtype))
res_ref = res_ref >> env.INP_WIDTH
res_ref = np.clip(res_ref, 0, inp_max)
res_ref = res_ref.astype(res.dtype)
res_ref = res_ref.reshape(
batch_size // env.BATCH, env.BATCH, out_channels // env.BLOCK_OUT, env.BLOCK_OUT
).transpose((0, 2, 1, 3))
np.testing.assert_equal(res_ref, res_nd.numpy())
# Print stats
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
print("Successful blocked matrix multiply test!")
######################################################################
# Summary
# -------
# This tutorial demonstrates how TVM scheduling primitives can achieve
# computation blocking for a matrix multiplication example.
# This allows us to map arbitrarily large computation onto limited
# hardware accelerator resources.
#
| 14,466 | 37.578667 | 97 | py |
tvm | tvm-main/vta/tutorials/frontend/deploy_classification.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Deploy Pretrained Vision Model from MxNet on VTA
================================================
**Author**: `Thierry Moreau <https://homes.cs.washington.edu/~moreau/>`_
This tutorial provides an end-to-end demo, on how to run ImageNet classification
inference onto the VTA accelerator design to perform ImageNet classification tasks.
It showcases Relay as a front end compiler that can perform quantization (VTA
only supports int8/32 inference) as well as graph packing (in order to enable
tensorization in the core) to massage the compute graph for the hardware target.
"""
######################################################################
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user mxnet requests "Pillow<7"
#
# Now return to the python code. Import packages.
from __future__ import absolute_import, print_function
import argparse, json, os, requests, sys, time
from io import BytesIO
from os.path import join, isfile
from PIL import Image
from mxnet.gluon.model_zoo import vision
import numpy as np
from matplotlib import pyplot as plt
import tvm
from tvm import te
from tvm import rpc, autotvm, relay
from tvm.contrib import graph_executor, utils, download
from tvm.contrib.debugger import debug_executor
from tvm.relay import transform
import vta
from vta.testing import simulator
from vta.top import graph_pack
# Make sure that TVM was compiled with RPC=1
assert tvm.runtime.enabled("rpc")
######################################################################
# Define the platform and model targets
# -------------------------------------
# Execute on CPU vs. VTA, and define the model.
# Load VTA parameters from the 3rdparty/vta-hw/config/vta_config.json file
env = vta.get_env()
# Set ``device=arm_cpu`` to run inference on the CPU
# or ``device=vta`` to run inference on the FPGA.
device = "vta"
target = env.target if device == "vta" else env.target_vta_cpu
# Dictionary lookup for when to start/end bit packing
pack_dict = {
"resnet18_v1": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet34_v1": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet18_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet34_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet50_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet101_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
}
# Name of Gluon model to compile
# The ``start_pack`` and ``stop_pack`` labels indicate where
# to start and end the graph packing relay pass: in other words
# where to start and finish offloading to VTA.
model = "resnet18_v1"
assert model in pack_dict
######################################################################
# Obtain an execution remote
# --------------------------
# When target is 'pynq', reconfigure FPGA and runtime.
# Otherwise, if target is 'sim', execute locally.
if env.TARGET not in ["sim", "tsim", "intelfocl"]:
# Get remote from tracker node if environment variable is set.
# To set up the tracker, you'll need to follow the "Auto-tuning
# a convolutional network for VTA" tutorial.
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
# Otherwise if you have a device you want to program directly from
# the host, make sure you've set the variables below to the IP of
# your board.
device_host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
device_port = os.environ.get("VTA_RPC_PORT", "9091")
if not tracker_host or not tracker_port:
remote = rpc.connect(device_host, int(device_port))
else:
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, int(tracker_port), timeout=10000
)
# Reconfigure the JIT runtime and FPGA.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
reconfig_start = time.time()
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
reconfig_time = time.time() - reconfig_start
print("Reconfigured FPGA and RPC runtime in {0:.2f}s!".format(reconfig_time))
# In simulation mode, host the RPC server locally.
else:
remote = rpc.LocalSession()
if env.TARGET in ["intelfocl"]:
# program intelfocl aocx
vta.program_fpga(remote, bitstream="vta.bitstream")
# Get execution context from remote
ctx = remote.ext_dev(0) if device == "vta" else remote.cpu(0)
######################################################################
# Build the inference graph executor
# ----------------------------------
# Grab vision model from Gluon model zoo and compile with Relay.
# The compilation steps are:
#
# 1. Front end translation from MxNet into Relay module.
# 2. Apply 8-bit quantization: here we skip the first conv layer,
# and dense layer which will both be executed in fp32 on the CPU.
# 3. Perform graph packing to alter the data layout for tensorization.
# 4. Perform constant folding to reduce number of operators (e.g. eliminate batch norm multiply).
# 5. Perform relay build to object file.
# 6. Load the object file onto remote (FPGA device).
# 7. Generate graph executor, `m`.
#
# Load pre-configured AutoTVM schedules
with autotvm.tophub.context(target):
# Populate the shape and data type dictionary for ImageNet classifier input
dtype_dict = {"data": "float32"}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
# Get off the shelf gluon model, and convert to relay
gluon_model = vision.get_model(model, pretrained=True)
# Measure build start time
build_start = time.time()
# Start front end compilation
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
# Update shape and type dictionary
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
if target.device_name == "vta":
# Perform quantization in Relay
# Note: We set opt_level to 3 in order to fold batch norm
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
mod = relay.quantize.quantize(mod, params=params)
# Perform graph packing and constant folding for VTA target
assert env.BLOCK_IN == env.BLOCK_OUT
# do device annotation if target is intelfocl or sim
relay_prog = graph_pack(
mod["main"],
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=pack_dict[model][0],
stop_name=pack_dict[model][1],
device_annot=(env.TARGET == "intelfocl"),
)
else:
relay_prog = mod["main"]
# Compile Relay program with AlterOpLayout disabled
if target.device_name != "vta":
with tvm.transform.PassContext(opt_level=3, disabled_pass={"AlterOpLayout"}):
graph, lib, params = relay.build(
relay_prog, target=tvm.target.Target(target, host=env.target_host), params=params
)
else:
if env.TARGET == "intelfocl":
# multiple targets to run both on cpu and vta
target = {"cpu": env.target_vta_cpu, "ext_dev": target}
with vta.build_config(
opt_level=3, disabled_pass={"AlterOpLayout", "tir.CommonSubexprElimTIR"}
):
graph, lib, params = relay.build(
relay_prog, target=tvm.target.Target(target, host=env.target_host), params=params
)
# Measure Relay build time
build_time = time.time() - build_start
print(model + " inference graph built in {0:.2f}s!".format(build_time))
# Send the inference library over to the remote RPC server
temp = utils.tempdir()
lib.export_library(temp.relpath("graphlib.tar"))
remote.upload(temp.relpath("graphlib.tar"))
lib = remote.load_module("graphlib.tar")
if env.TARGET == "intelfocl":
ctxes = [remote.ext_dev(0), remote.cpu(0)]
m = graph_executor.create(graph, lib, ctxes)
else:
# Graph runtime
m = graph_executor.create(graph, lib, ctx)
######################################################################
# Perform image classification inference
# --------------------------------------
# We run classification on an image sample from ImageNet
# We just need to download the categories files, `synset.txt`
# and an input test image.
# Download ImageNet categories
categ_url = "https://github.com/uwsampl/web-data/raw/main/vta/models/"
categ_fn = "synset.txt"
download.download(join(categ_url, categ_fn), categ_fn)
synset = eval(open(categ_fn).read())
# Download test image
image_url = "https://homes.cs.washington.edu/~moreau/media/vta/cat.jpg"
image_fn = "cat.png"
download.download(image_url, image_fn)
# Prepare test image for inference
image = Image.open(image_fn).resize((224, 224))
plt.imshow(image)
plt.show()
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
image = np.repeat(image, env.BATCH, axis=0)
# Set the network parameters and inputs
m.set_input(**params)
m.set_input("data", image)
# Perform inference and gather execution statistics
# More on: :py:method:`tvm.runtime.Module.time_evaluator`
num = 4 # number of times we run module for a single measurement
rep = 3 # number of measurements (we derive std dev from this)
timer = m.module.time_evaluator("run", ctx, number=num, repeat=rep)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
timer()
sim_stats = simulator.stats()
print("\nExecution statistics:")
for k, v in sim_stats.items():
# Since we execute the workload many times, we need to normalize stats
# Note that there is always one warm up run
# Therefore we divide the overall stats by (num * rep + 1)
print("\t{:<16}: {:>16}".format(k, v // (num * rep + 1)))
else:
tcost = timer()
std = np.std(tcost.results) * 1000
mean = tcost.mean * 1000
print("\nPerformed inference in %.2fms (std = %.2f) for %d samples" % (mean, std, env.BATCH))
print("Average per sample inference time: %.2fms" % (mean / env.BATCH))
# Get classification results
tvm_output = m.get_output(0, tvm.nd.empty((env.BATCH, 1000), "float32", remote.cpu(0)))
for b in range(env.BATCH):
top_categories = np.argsort(tvm_output.numpy()[b])
# Report top-5 classification results
print("\n{} prediction for sample {}".format(model, b))
print("\t#1:", synset[top_categories[-1]])
print("\t#2:", synset[top_categories[-2]])
print("\t#3:", synset[top_categories[-3]])
print("\t#4:", synset[top_categories[-4]])
print("\t#5:", synset[top_categories[-5]])
# This just checks that one of the 5 top categories
# is one variety of cat; this is by no means an accurate
# assessment of how quantization affects classification
# accuracy but is meant to catch changes to the
# quantization pass that would accuracy in the CI.
cat_detected = False
for k in top_categories[-5:]:
if "cat" in synset[k]:
cat_detected = True
assert cat_detected
| 12,219 | 38.546926 | 97 | py |
tvm | tvm-main/vta/tutorials/frontend/deploy_detection.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Deploy Pretrained Vision Detection Model from Darknet on VTA
============================================================
**Author**: `Hua Jiang <https://github.com/huajsj>`_
This tutorial provides an end-to-end demo, on how to run Darknet YoloV3-tiny
inference onto the VTA accelerator design to perform Image detection tasks.
It showcases Relay as a front end compiler that can perform quantization (VTA
only supports int8/32 inference) as well as graph packing (in order to enable
tensorization in the core) to massage the compute graph for the hardware target.
"""
######################################################################
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install "Pillow<7"
#
# YOLO-V3-tiny Model with Darknet parsing have dependancy with CFFI and CV2 library,
# we need to install CFFI and CV2 before executing this script.
#
# .. code-block:: bash
#
# pip3 install cffi
# pip3 install opencv-python
#
# Now return to the python code. Import packages.
from __future__ import absolute_import, print_function
import sys
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import tvm
import vta
from tvm import rpc, autotvm, relay
from tvm.relay.testing import yolo_detection, darknet
from tvm.relay.testing.darknet import __darknetffi__
from tvm.contrib import graph_executor, utils
from tvm.contrib.download import download_testdata
from vta.testing import simulator
from vta.top import graph_pack
# Make sure that TVM was compiled with RPC=1
assert tvm.runtime.enabled("rpc")
##############################################################################
# Download yolo net configure file, weight file, darknet library file based on
# Model Name
# ----------------------------------------------------------------------------
MODEL_NAME = "yolov3-tiny"
REPO_URL = "https://github.com/dmlc/web-data/blob/main/darknet/"
cfg_path = download_testdata(
"https://github.com/pjreddie/darknet/blob/master/cfg/" + MODEL_NAME + ".cfg" + "?raw=true",
MODEL_NAME + ".cfg",
module="darknet",
)
weights_path = download_testdata(
"https://pjreddie.com/media/files/" + MODEL_NAME + ".weights" + "?raw=true",
MODEL_NAME + ".weights",
module="darknet",
)
if sys.platform in ["linux", "linux2"]:
darknet_lib_path = download_testdata(
REPO_URL + "lib/" + "libdarknet2.0.so" + "?raw=true", "libdarknet2.0.so", module="darknet"
)
elif sys.platform == "darwin":
darknet_lib_path = download_testdata(
REPO_URL + "lib_osx/" + "libdarknet_mac2.0.so" + "?raw=true",
"libdarknet_mac2.0.so",
module="darknet",
)
else:
raise NotImplementedError("Darknet lib is not supported on {} platform".format(sys.platform))
##################################################
# Download yolo categories and illustration front.
# ------------------------------------------------
coco_path = download_testdata(
REPO_URL + "data/" + "coco.names" + "?raw=true", "coco.names", module="data"
)
font_path = download_testdata(
REPO_URL + "data/" + "arial.ttf" + "?raw=true", "arial.ttf", module="data"
)
with open(coco_path) as f:
content = f.readlines()
names = [x.strip() for x in content]
########################################
# Define the platform and model targets.
# --------------------------------------
# Execute on CPU vs. VTA, and define the model.
# Load VTA parameters from the 3rdparty/vta-hw/config/vta_config.json file
env = vta.get_env()
# Set ``device=arm_cpu`` to run inference on the CPU
# or ``device=vta`` to run inference on the FPGA.
device = "vta"
target = env.target if device == "vta" else env.target_vta_cpu
pack_dict = {
"yolov3-tiny": ["nn.max_pool2d", "cast", 4, 186],
}
# Name of Darknet model to compile
# The ``start_pack`` and ``stop_pack`` labels indicate where
# to start and end the graph packing relay pass: in other words
# where to start and finish offloading to VTA.
# the number 4 indicate the ``start_pack`` index is 4, the
# number 186 indicate the ``stop_pack index`` is 186, by using
# name and index number, here we can located to correct place
# where to start/end when there are multiple ``nn.max_pool2d``
# or ``cast``, print(mod.astext(show_meta_data=False)) can help
# to find operator name and index information.
assert MODEL_NAME in pack_dict
#############################
# Obtain an execution remote.
# ---------------------------
# When target is 'pynq' or other FPGA backend, reconfigure FPGA and runtime.
# Otherwise, if target is 'sim', execute locally.
if env.TARGET not in ["sim", "tsim"]:
# Get remote from tracker node if environment variable is set.
# To set up the tracker, you'll need to follow the "Auto-tuning
# a convolutional network for VTA" tutorial.
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
# Otherwise if you have a device you want to program directly from
# the host, make sure you've set the variables below to the IP of
# your board.
device_host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
device_port = os.environ.get("VTA_RPC_PORT", "9091")
if not tracker_host or not tracker_port:
remote = rpc.connect(device_host, int(device_port))
else:
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, int(tracker_port), timeout=10000
)
# Reconfigure the JIT runtime and FPGA.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
reconfig_start = time.time()
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
reconfig_time = time.time() - reconfig_start
print("Reconfigured FPGA and RPC runtime in {0:.2f}s!".format(reconfig_time))
# In simulation mode, host the RPC server locally.
else:
remote = rpc.LocalSession()
# Get execution context from remote
ctx = remote.ext_dev(0) if device == "vta" else remote.cpu(0)
#####################################
# Build the inference graph executor.
# -----------------------------------
# Using Darknet library load downloaded vision model and compile with Relay.
# The compilation steps are:
#
# 1. Front end translation from Darknet into Relay module.
# 2. Apply 8-bit quantization: here we skip the first conv layer,
# and dense layer which will both be executed in fp32 on the CPU.
# 3. Perform graph packing to alter the data layout for tensorization.
# 4. Perform constant folding to reduce number of operators (e.g. eliminate batch norm multiply).
# 5. Perform relay build to object file.
# 6. Load the object file onto remote (FPGA device).
# 7. Generate graph executor, `m`.
# Load pre-configured AutoTVM schedules
with autotvm.tophub.context(target):
net = __darknetffi__.dlopen(darknet_lib_path).load_network(
cfg_path.encode("utf-8"), weights_path.encode("utf-8"), 0
)
dshape = (env.BATCH, net.c, net.h, net.w)
dtype = "float32"
# Measure build start time
build_start = time.time()
# Start front end compilation
mod, params = relay.frontend.from_darknet(net, dtype=dtype, shape=dshape)
if target.device_name == "vta":
# Perform quantization in Relay
# Note: We set opt_level to 3 in order to fold batch norm
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(
global_scale=23.0,
skip_conv_layers=[0],
store_lowbit_output=True,
round_for_shift=True,
):
mod = relay.quantize.quantize(mod, params=params)
# Perform graph packing and constant folding for VTA target
mod = graph_pack(
mod["main"],
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=pack_dict[MODEL_NAME][0],
stop_name=pack_dict[MODEL_NAME][1],
start_name_idx=pack_dict[MODEL_NAME][2],
stop_name_idx=pack_dict[MODEL_NAME][3],
)
else:
mod = mod["main"]
# Compile Relay program with AlterOpLayout disabled
with vta.build_config(disabled_pass={"AlterOpLayout", "tir.CommonSubexprElimTIR"}):
lib = relay.build(
mod, target=tvm.target.Target(target, host=env.target_host), params=params
)
# Measure Relay build time
build_time = time.time() - build_start
print(MODEL_NAME + " inference graph built in {0:.2f}s!".format(build_time))
# Send the inference library over to the remote RPC server
temp = utils.tempdir()
lib.export_library(temp.relpath("graphlib.tar"))
remote.upload(temp.relpath("graphlib.tar"))
lib = remote.load_module("graphlib.tar")
# Graph executor
m = graph_executor.GraphModule(lib["default"](ctx))
####################################
# Perform image detection inference.
# ----------------------------------
# We run detect on an downloaded image
# Download test image
[neth, netw] = dshape[2:]
test_image = "person.jpg"
img_url = REPO_URL + "data/" + test_image + "?raw=true"
img_path = download_testdata(img_url, test_image, "data")
data = darknet.load_image(img_path, neth, netw).transpose(1, 2, 0)
# Prepare test image for inference
plt.imshow(data)
plt.show()
data = data.transpose((2, 0, 1))
data = data[np.newaxis, :]
data = np.repeat(data, env.BATCH, axis=0)
# Set the network parameters and inputs
m.set_input("data", data)
# Perform inference and gather execution statistics
# More on: :py:method:`tvm.runtime.Module.time_evaluator`
num = 4 # number of times we run module for a single measurement
rep = 3 # number of measurements (we derive std dev from this)
timer = m.module.time_evaluator("run", ctx, number=num, repeat=rep)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
timer()
sim_stats = simulator.stats()
print("\nExecution statistics:")
for k, v in sim_stats.items():
# Since we execute the workload many times, we need to normalize stats
# Note that there is always one warm up run
# Therefore we divide the overall stats by (num * rep + 1)
print("\t{:<16}: {:>16}".format(k, v // (num * rep + 1)))
else:
tcost = timer()
std = np.std(tcost.results) * 1000
mean = tcost.mean * 1000
print("\nPerformed inference in %.2fms (std = %.2f) for %d samples" % (mean, std, env.BATCH))
print("Average per sample inference time: %.2fms" % (mean / env.BATCH))
# Get detection results from out
thresh = 0.5
nms_thresh = 0.45
tvm_out = []
for i in range(2):
layer_out = {}
layer_out["type"] = "Yolo"
# Get the yolo layer attributes (n, out_c, out_h, out_w, classes, total)
layer_attr = m.get_output(i * 4 + 3).numpy()
layer_out["biases"] = m.get_output(i * 4 + 2).numpy()
layer_out["mask"] = m.get_output(i * 4 + 1).numpy()
out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3])
layer_out["output"] = m.get_output(i * 4).numpy().reshape(out_shape)
layer_out["classes"] = layer_attr[4]
tvm_out.append(layer_out)
thresh = 0.560
# Show detection results
img = darknet.load_image_color(img_path)
_, im_h, im_w = img.shape
dets = yolo_detection.fill_network_boxes((netw, neth), (im_w, im_h), thresh, 1, tvm_out)
last_layer = net.layers[net.n - 1]
yolo_detection.do_nms_sort(dets, last_layer.classes, nms_thresh)
yolo_detection.draw_detections(font_path, img, dets, thresh, names, last_layer.classes)
plt.imshow(img.transpose(1, 2, 0))
plt.show()
| 12,542 | 37.832817 | 98 | py |
tvm | tvm-main/vta/tutorials/autotvm/tune_alu_vta.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-tuning a ALU fused op on VTA
---------------------------------
"""
import os
from mxnet.gluon.model_zoo import vision
import numpy as np
from PIL import Image
from tvm import topi
import tvm
from tvm import te
from tvm import rpc, autotvm, relay
from tvm.contrib import download
from tvm.autotvm.measure.measure_methods import request_remote
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.autotvm import record
import vta
from vta.testing import simulator
from vta.top import graph_pack
import copy
#################################################################
# Compile network
# ---------------
# Perform vta-specific compilation with Relay from a Gluon model
def compile_network(env, target, model, start_pack, stop_pack):
# Populate the shape and data type dictionary
dtype_dict = {"data": "float32"}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
# Get off the shelf gluon model, and convert to relay
gluon_model = vision.get_model(model, pretrained=True)
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
# Update shape and type dictionary
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
# Perform quantization in Relay
# Note: We set opt_level to 3 in order to fold batch norm
with relay.build_config(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
mod = relay.quantize.quantize(mod, params=params)
# Perform graph packing and constant folding for VTA target
if target.device_name == "vta":
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
mod["main"],
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=start_pack,
stop_name=stop_pack,
)
return relay_prog, params
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we should apply some configurations.
# Here we use an Pynq-Z1 board as an example.
# Tracker host and port can be set by your environment
tracker_host = os.environ.get("TVM_TRACKER_HOST", "0.0.0.0")
tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
# Load VTA parameters from the vta/config/vta_config.json file
env = vta.get_env()
# This target is used for cross compilation. You can query it by :code:`gcc -v` on your device.
# Set ``device=arm_cpu`` to run inference on the CPU
# or ``device=vta`` to run inference on the FPGA.
device = "vta"
target = env.target if device == "vta" else env.target_vta_cpu
# Name of Gluon model to compile
# The ``start_pack`` and ``stop_pack`` labels indicate where
# to start and end the graph packing relay pass: in other words
# where to start and finish offloading to VTA.
network = "resnet50_v2"
start_pack = "nn.max_pool2d"
stop_pack = "nn.global_avg_pool2d"
# Tuning option
log_file = "%s.alu.%s.log" % (device, network)
tuning_option = {
"log_filename": log_file,
"tuner": "random",
"n_trial": 1000,
"early_stopping": None,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(n_parallel=1),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracker_host,
port=tracker_port,
number=5,
timeout=60,
# check_correctness=True, # TODO: re-enable when check_correctness works again.
),
),
}
def log_to_file(file_out, protocol="json"):
"""Log the tuning records into file.
The rows of the log are stored in the format of autotvm.record.encode.
for lhs == rhs, we add an extra rhs = [] record
Parameters
----------
file_out : str
The file to log to.
protocol: str, optional
The log protocol. Can be 'json' or 'pickle'
Returns
-------
callback : callable
Callback function to do the logging.
"""
def _callback(_, inputs, results):
with open(file_out, "a") as f:
for inp, result in zip(inputs, results):
f.write(record.encode(inp, result, protocol) + "\n")
# we only consider task with same lhs and rhs
if inp.task.args[0] == inp.task.args[1]:
args = list(inp.task.args)
args[1] = (args[0][0], (), args[0][2])
inp_copy = copy.deepcopy(inp)
inp_copy.task.args = tuple(args)
f.write(record.encode(inp_copy, result, protocol) + "\n")
return _callback
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=10,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
########################################################################
# Register VTA-specific tuning tasks
def register_vta_tuning_tasks():
from tvm.autotvm.task import TaskExtractEnv
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
# init autotvm env to register VTA operator
TaskExtractEnv()
@autotvm.template("add.vta")
def _topi_add(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
A, B = args[:2]
with tvm.target.vta():
res = vta.top.op.add_packed(*args, **kwargs)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = vta.top.op.schedule_add_packed([res])
else:
s = te.create_schedule([res.op])
return s, [A, B, res]
@autotvm.template("multiply.vta")
def _topi_multiply(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
A, B = args[:2]
with tvm.target.vta():
res = vta.top.op.multiply_packed(*args, **kwargs)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = vta.top.op.schedule_multiply_packed([res])
else:
s = te.create_schedule([res.op])
return s, [A, B, res]
########################################################################
# Finally, we launch tuning jobs and evaluate the end-to-end performance.
def tune_and_evaluate(tuning_opt):
if env.TARGET != "intelfocl":
print("ALU only op only available for intelfocl target")
return
# Register VTA tuning tasks
register_vta_tuning_tasks()
# Perform task extraction on Relay program
print("Extract tasks...")
relay_prog, params = compile_network(env, target, network, start_pack, stop_pack)
mod = tvm.IRModule.from_expr(relay_prog)
tasks = autotvm.task.extract_from_program(
mod,
params=params,
ops=(
relay.op.get("add"),
relay.op.get("multiply"),
),
target=tvm.target.Target(target, host=env.target_host),
)
# filter out non-packed alu task
tasks = list(filter(lambda t: len(t.args[0][1]) > 4, tasks))
# filter out float alu task
tasks = list(filter(lambda t: t.args[0][2] != "float32", tasks))
# We should have extracted 10 convolution tasks
tasks_set = {}
print("Extracted {} alu tasks:".format(len(tasks)))
for tsk in tasks:
print("tsk = ", tsk)
if len(tsk.args[1][1]) == 0:
args = list(tsk.args)
args[1] = args[0]
tsk.args = tuple(args)
if (tsk.name, tsk.args) in tasks_set:
print("task {} already exists".format(tsk))
tasks_set[(tsk.name, tsk.args)] = tsk
tasks = list(tasks_set.values())
print("After merged, final #tasks={}, tasks = {}".format(len(tasks), tasks))
# run tuning tasks
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
# Run the tuning and evaluate the results
tune_and_evaluate(tuning_option)
| 11,793 | 33.58651 | 95 | py |
tvm | tvm-main/vta/tutorials/autotvm/tune_relay_vta.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-tuning a convolutional network on VTA
==========================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Thierry Moreau <https://homes.cs.washington.edu/~moreau/>`_
Auto-tuning for a specific accelerator design is critical for getting the best
performance for any given operator. This is a tutorial showcases how to tune a
whole convolutional network on VTA.
The operator implementation for VTA in TVM is written in template form.
The template has many tunable knobs (tile factor, virtual threads, etc).
We will tune all convolution operators in the neural network. After tuning,
we produce a log file which stores the best schedule parameters for all tuned
operators. When the TVM compiler compiles these operators, it will query this
log file to get the best knob parameters.
"""
######################################################################
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost tornado mxnet requests "Pillow<7" cloudpickle
#
# To make TVM run faster during tuning, it is recommended to use cython
# as FFI of TVM. In the root directory of TVM, execute
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Import packages.
import os
from mxnet.gluon.model_zoo import vision
import numpy as np
from PIL import Image
from tvm import topi
import tvm
from tvm import te
from tvm import rpc, autotvm, relay
from tvm.contrib import graph_executor, utils, download
from tvm.autotvm.measure.measure_methods import request_remote
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
import vta
from vta.testing import simulator
from vta.top import graph_pack
#################################################################
# Compile network
# ---------------
# Perform vta-specific compilation with Relay from a Gluon model
def compile_network(env, target, model, start_pack, stop_pack):
# Populate the shape and data type dictionary
dtype_dict = {"data": "float32"}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
# Get off the shelf gluon model, and convert to relay
gluon_model = vision.get_model(model, pretrained=True)
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
# Update shape and type dictionary
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
# Perform quantization in Relay
# Note: We set opt_level to 3 in order to fold batch norm
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
mod = relay.quantize.quantize(mod, params=params)
# Perform graph packing and constant folding for VTA target
if target.device_name == "vta":
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
mod["main"],
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=start_pack,
stop_name=stop_pack,
)
return relay_prog, params
#################################################################
# Start RPC Tracker
# -----------------
# TVM uses an RPC session to communicate with Pynq boards.
# During tuning, the tuner will send the generated code to the board and
# measure the speed of code on the board.
#
# To scale up tuning, TVM uses an RPC Tracker to manage multiple devices.
# The RPC Tracker is a centralized controller node. We can register all devices to
# the tracker. For example, if we have 10 Pynq boards, we can register all of them
# to the tracker, and run 10 measurements in parallel, accelerating the tuning process.
#
# To start an RPC tracker, run this command on the host machine. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is:
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#################################################################
# Register devices to RPC Tracker
# -----------------------------------
# Now we can register our devices to the tracker. The first step is to
# build the TVM runtime for the Pynq devices.
#
# Follow :ref:`vta-index`
# to build the TVM runtime on the device. Then register the device to the tracker with:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=pynq
#
# (replace :code:`[HOST_IP]` with the IP address of your host machine)
#
# After registering devices, we can confirm it by querying the rpc_tracker:
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
#
# For example, if we have 6 Pynq boards and 11 Raspberry Pi 3B,
# the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# pynq 6 6 0
# rpi3b 11 11 0
# ----------------------------------
#
# You can register multiple devices to the tracker to accelerate tuning.
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we should apply some configurations.
# Here we use an Pynq-Z1 board as an example.
# Tracker host and port can be set by your environment
tracker_host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1")
tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
# Load VTA parameters from the 3rdparty/vta-hw/config/vta_config.json file
env = vta.get_env()
# This target is used for cross compilation. You can query it by :code:`gcc -v` on your device.
# Set ``device=arm_cpu`` to run inference on the CPU
# or ``device=vta`` to run inference on the FPGA.
device = "vta"
target = env.target if device == "vta" else env.target_vta_cpu
# Name of Gluon model to compile
# The ``start_pack`` and ``stop_pack`` labels indicate where
# to start and end the graph packing relay pass: in other words
# where to start and finish offloading to VTA.
network = "resnet18_v1"
start_pack = "nn.max_pool2d"
stop_pack = "nn.global_avg_pool2d"
# Tuning option
log_file = "%s.%s.log" % (device, network)
tuning_option = {
"log_filename": log_file,
"tuner": "random",
"n_trial": 1000,
"early_stopping": None,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracker_host,
port=tracker_port,
number=5,
timeout=60,
module_loader=vta.module_loader(),
# check_correctness=True, # TODO: re-enable when check_correctness works again.
),
),
}
####################################################################
#
# .. note:: How to set tuning options
#
# In general, the default values provided here work well.
# If you have enough time budget, you can set :code:`n_trial`, :code:`early_stopping`
# to larger values, makes the tuning run for longer.
# If your device is under-powered or your conv2d operators are large, consider
# setting a longer timeout.
#
###################################################################
# Begin Tuning
# ------------
# Now we can extract tuning tasks from the network and begin tuning.
# Here, we provide a simple utility function to tune a list of tasks.
# This function is just an initial implementation which tunes them in sequential order.
# We will introduce a more sophisticated tuning scheduler in the future.
#
# Given that the tuning will be done on Pynq FPGA boards, make sure that
# the ```TARGET`` entry in the ``vta_config.json`` file is set to ``pynq``.
# You can skip the implementation of this function for this tutorial.
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
########################################################################
# Register VTA-specific tuning tasks
def register_vta_tuning_tasks():
from tvm.autotvm.task import TaskExtractEnv
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
# init autotvm env to register VTA operator
TaskExtractEnv()
@autotvm.template("conv2d_packed.vta")
def _topi_nn_conv2d(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
A, W = args[:2]
with tvm.target.vta():
res = vta.top.conv2d_packed(*args, **kwargs)
res = topi.right_shift(res, 8)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = vta.top.schedule_conv2d_packed([res])
else:
s = te.create_schedule([res.op])
return s, [A, W, res]
########################################################################
# Finally, we launch tuning jobs and evaluate the end-to-end performance.
def tune_and_evaluate(tuning_opt):
# Register VTA tuning tasks
register_vta_tuning_tasks()
# Perform task extraction on Relay program
print("Extract tasks...")
relay_prog, params = compile_network(env, target, network, start_pack, stop_pack)
mod = tvm.IRModule.from_expr(relay_prog)
tasks = autotvm.task.extract_from_program(
mod,
params=params,
ops=(relay.op.get("nn.conv2d"),),
target=target,
target_host=env.target_host,
)
# filter out non-packed conv2d task
tasks = list(filter(lambda t: len(t.args[0][1]) > 4 and "conv" in t.name, tasks))
# We should have extracted 10 convolution tasks
assert len(tasks) == 10
print("Extracted {} conv2d tasks:".format(len(tasks)))
for tsk in tasks:
inp = tsk.args[0][1]
wgt = tsk.args[1][1]
batch = inp[0] * inp[4]
in_filter = inp[1] * inp[5]
out_filter = wgt[0] * wgt[4]
height, width = inp[2], inp[3]
hkernel, wkernel = wgt[2], wgt[3]
hstride, wstride = tsk.args[2][0], tsk.args[2][1]
hpad, wpad = tsk.args[3][0], tsk.args[3][1]
print(
"({}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {})".format(
batch,
height,
width,
in_filter,
out_filter,
hkernel,
wkernel,
hpad,
wpad,
hstride,
wstride,
)
)
# We do not run the tuning in our webpage server since it takes too long.
# Comment the following line to run it by yourself.
return
# run tuning tasks
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
# evaluate with tuning history
if env.TARGET != "sim":
# Get remote from fleet node
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, tracker_port, timeout=10000
)
# Reconfigure the JIT runtime and FPGA.
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
else:
# In simulation mode, host the RPC server locally.
remote = rpc.LocalSession()
# compile kernels with history best records
with autotvm.tophub.context(target, extra_files=[log_file]):
# Compile network
print("Compile...")
if target.device_name != "vta":
with tvm.transform.PassContext(opt_level=3, disabled_pass={"AlterOpLayout"}):
lib = relay.build(
relay_prog, target=target, params=params, target_host=env.target_host
)
else:
with vta.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
lib = relay.build(
relay_prog, target=target, params=params, target_host=env.target_host
)
# Export library
print("Upload...")
temp = utils.tempdir()
lib.export_library(temp.relpath("graphlib.tar"))
remote.upload(temp.relpath("graphlib.tar"))
lib = remote.load_module("graphlib.tar")
# Generate the graph executor
ctx = remote.ext_dev(0) if device == "vta" else remote.cpu(0)
m = graph_executor.GraphModule(lib["default"](ctx))
# upload parameters to device
image = tvm.nd.array((np.random.uniform(size=(1, 3, 224, 224))).astype("float32"))
m.set_input("data", image)
# evaluate
print("Evaluate inference time cost...")
timer = m.module.time_evaluator("run", ctx, number=1, repeat=10)
tcost = timer()
prof_res = np.array(tcost.results) * 1000 # convert to millisecond
print(
"Mean inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), np.std(prof_res))
)
# Run the tuning and evaluate the results
tune_and_evaluate(tuning_option)
######################################################################
# Sample Output
# -------------
# The tuning needs to compile many programs and extract feature from them.
# So a high performance CPU is recommended.
# One sample output is listed below.
# It takes about 2 hours on a 16T CPU, and 6 Pynq boards.
#
# .. code-block:: bash
#
# Extract tasks...
# [Warning] Invalid shape during AutoTVM task creation
# Extracted 10 conv2d tasks:
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 16, 14, 14, 1, 16), 'int8'), ('TENSOR', (32, 16, 1, 1, 16, 16), 'int8'), (2, 2), (0, 0), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 16, 14, 14, 1, 16, 'int8'), (32, 16, 1, 1, 16, 16, 'int8'), (2, 2), (0, 0), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 8, 28, 28, 1, 16), 'int8'), ('TENSOR', (16, 8, 1, 1, 16, 16), 'int8'), (2, 2), (0, 0), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 8, 28, 28, 1, 16, 'int8'), (16, 8, 1, 1, 16, 16, 'int8'), (2, 2), (0, 0), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 4, 56, 56, 1, 16), 'int8'), ('TENSOR', (8, 4, 1, 1, 16, 16), 'int8'), (2, 2), (0, 0), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 4, 56, 56, 1, 16, 'int8'), (8, 4, 1, 1, 16, 16, 'int8'), (2, 2), (0, 0), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 4, 56, 56, 1, 16), 'int8'), ('TENSOR', (4, 4, 3, 3, 16, 16), 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 4, 56, 56, 1, 16, 'int8'), (4, 4, 3, 3, 16, 16, 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 8, 28, 28, 1, 16), 'int8'), ('TENSOR', (8, 8, 3, 3, 16, 16), 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 8, 28, 28, 1, 16, 'int8'), (8, 8, 3, 3, 16, 16, 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 4, 56, 56, 1, 16), 'int8'), ('TENSOR', (8, 4, 3, 3, 16, 16), 'int8'), (2, 2), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 4, 56, 56, 1, 16, 'int8'), (8, 4, 3, 3, 16, 16, 'int8'), (2, 2), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 16, 14, 14, 1, 16), 'int8'), ('TENSOR', (16, 16, 3, 3, 16, 16), 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 16, 14, 14, 1, 16, 'int8'), (16, 16, 3, 3, 16, 16, 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 8, 28, 28, 1, 16), 'int8'), ('TENSOR', (16, 8, 3, 3, 16, 16), 'int8'), (2, 2), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 8, 28, 28, 1, 16, 'int8'), (16, 8, 3, 3, 16, 16, 'int8'), (2, 2), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 32, 7, 7, 1, 16), 'int8'), ('TENSOR', (32, 32, 3, 3, 16, 16), 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 32, 7, 7, 1, 16, 'int8'), (32, 32, 3, 3, 16, 16, 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 16, 14, 14, 1, 16), 'int8'), ('TENSOR', (32, 16, 3, 3, 16, 16), 'int8'), (2, 2), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 16, 14, 14, 1, 16, 'int8'), (32, 16, 3, 3, 16, 16, 'int8'), (2, 2), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Tuning...
# [Task 1/10] Current/Best: 0.72/ 23.24 GFLOPS | Progress: (480/1000) | 640.31 s Done.
# [Task 2/10] Current/Best: 0.00/ 27.69 GFLOPS | Progress: (576/1000) | 810.09 s Done.
# [Task 3/10] Current/Best: 0.00/ 22.97 GFLOPS | Progress: (1000/1000) | 1125.37 s Done.
# [Task 4/10] Current/Best: 0.00/ 31.26 GFLOPS | Progress: (1000/1000) | 1025.52 s Done.
# [Task 5/10] Current/Best: 0.00/ 15.15 GFLOPS | Progress: (1000/1000) | 1236.58 s Done.
# [Task 6/10] Current/Best: 0.00/ 22.74 GFLOPS | Progress: (1000/1000) | 906.60 s Done.
# [Task 7/10] Current/Best: 0.00/ 15.27 GFLOPS | Progress: (1000/1000) | 1056.25 s Done.
# [Task 8/10] Current/Best: 0.00/ 2.18 GFLOPS | Progress: (1000/1000) | 2275.29 s Done.
# [Task 9/10] Current/Best: 2.23/ 3.99 GFLOPS | Progress: (1000/1000) | 2527.25 s Done.
# [Task 10/10] Current/Best: 1.56/ 6.32 GFLOPS | Progress: (480/1000) | 1304.84 s Done.
# Compile...
# Upload...
# Evaluate inference time cost...
# Mean inference time (std dev): 621.79 ms (0.14 ms)
######################################################################
#
# .. note:: **Experiencing Difficulties?**
#
# The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS",
# then there must be something wrong.
#
# First, make sure you set the correct configuration of your device.
# Then, you can print debug information by adding these lines in the beginning
# of the script. It will print every measurement result, where you can find useful
# error messages.
#
# .. code-block:: python
#
# import logging
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
#
# Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org
| 22,597 | 41.397749 | 322 | py |
tvm | tvm-main/vta/python/vta/rpc_client.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA RPC client function"""
import os
from tvm import rpc
from vta import program_bitstream
from .environment import get_env
from .bitstream import download_bitstream, get_bitstream_path
def reconfig_runtime(remote):
"""Reconfigure remote runtime based on current hardware spec.
Parameters
----------
remote : RPCSession
The TVM RPC session
"""
env = get_env()
freconfig = remote.get_function("tvm.contrib.vta.reconfig_runtime")
freconfig(env.pkg.cfg_json)
def program_fpga(remote, bitstream=None):
"""Upload and program bistream
Parameters
----------
remote : RPCSession
The TVM RPC session
bitstream : str, optional
Path to a local bistream file. If unset, tries to download from cache server.
"""
env = get_env()
if bitstream:
assert os.path.isfile(bitstream)
else:
bitstream = get_bitstream_path()
if not os.path.isfile(bitstream):
if env.TARGET == "de10nano":
return
download_bitstream()
if isinstance(remote, rpc.LocalSession):
program_bitstream.bitstream_program(env.TARGET, bitstream)
else:
fprogram = remote.get_function("tvm.contrib.vta.init")
remote.upload(bitstream)
fprogram(os.path.basename(bitstream))
| 2,110 | 30.507463 | 85 | py |
tvm | tvm-main/vta/python/vta/build_module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, invalid-name
"""VTA specific buildin for runtime."""
import tvm
from tvm.ir import register_intrin_lowering
from . import transform
from .environment import get_env, Environment
def EarlyRewrite():
"""Try to do storage rewrite in early pass."""
def _transform(mod, ctx):
try:
return tvm.tir.transform.StorageRewrite()(mod)
except tvm.error.TVMError:
return mod
return tvm.transform.module_pass(_transform, opt_level=0, name="tir.vta.EarlyRewrite")
def build_config(debug_flag=0, **kwargs):
"""Build a build config for VTA.
Parameters
----------
debug_flag : int
The dbeug flag to be passed.
kwargs : dict
Additional configurations.
Returns
-------
build_config: tvm.transform.PassContext
The build config that can be used in TVM.
Example
--------
.. code-block:: python
# build a vta module.
with vta.build_config():
vta_module = tvm.build(s, ...)
"""
env = get_env()
@tvm.tir.transform.prim_func_pass(opt_level=0)
def add_debug(f, *_):
debug = tvm.tir.call_extern("int32", "VTASetDebugMode", env.dev.command_handle, debug_flag)
return f.with_body(tvm.tir.stmt_seq(debug, f.body))
pass_list = [
(0, transform.InjectConv2DTransposeSkip()),
(1, transform.InjectDMAIntrin()),
(1, transform.InjectSkipCopy()),
(1, transform.AnnotateALUCoProcScope()),
(1, tvm.tir.transform.LiftAttrScope("coproc_uop_scope")),
(1, transform.LiftAllocToScopeBegin()),
(1, tvm.tir.transform.LiftAttrScope("coproc_scope")),
(1, transform.InjectCoProcSync()),
(1, EarlyRewrite()),
]
if debug_flag:
pass_list.append((1, add_debug))
pass_list.append((2, transform.InjectALUIntrin()))
pass_list.append((3, tvm.tir.transform.LowerDeviceStorageAccessInfo()))
pass_list.append((3, transform.FoldUopLoop()))
pass_list.append((3, transform.CPUAccessRewrite()))
config = {"tir.add_lower_pass": pass_list}
if kwargs.get("config"):
config.update(kwargs[config])
del kwargs["config"]
return tvm.transform.PassContext(config=config, **kwargs)
def lower(*args, **kwargs):
"""Thin wrapper of tvm.lower
This wrapper automatically applies VTA's build_config
if there is no user specified build_config in context.
See Also
--------
tvm.lower : The original TVM's lower function
"""
pass_ctx = tvm.transform.PassContext.current()
if not pass_ctx.config.get("add_lower_pass"):
with build_config():
return tvm.lower(*args, **kwargs)
return tvm.lower(*args, **kwargs)
def build(*args, **kwargs):
"""Thin wrapper of tvm.build
This wrapper automatically applies VTA's build_config
if there is no user specified build_config in context.
See Also
--------
tvm.build : The original TVM's build function
"""
pass_ctx = tvm.transform.PassContext.current()
if not pass_ctx.config.get("tir.add_lower_pass"):
with build_config():
return tvm.build(*args, **kwargs)
return tvm.build(*args, **kwargs)
# Register key ops
tvm.ir.register_op_attr("tir.vta.coproc_sync", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
tvm.ir.register_op_attr("tir.vta.coproc_dep_push", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
tvm.ir.register_op_attr("tir.vta.coproc_dep_pop", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
tvm.ir.register_op_attr("tir.vta.uop_push", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
tvm.ir.register_op_attr("tir.vta.uop_push", "TGlobalSymbol", "VTAUopPush")
tvm.ir.register_op_attr("tir.vta.command_handle", "TGlobalSymbol", "VTATLSCommandHandle")
tvm.ir.register_op_attr("tir.vta.command_handle", "TCallEffectKind", tvm.tir.CallEffectKind.Opaque)
# The memory information for the compiler
@tvm.register_func("tvm.info.mem.%s" % Environment.inp_scope)
def mem_info_inp_buffer():
spec = get_env()
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=spec.INP_ELEM_BITS,
max_simd_bits=spec.INP_ELEM_BITS,
max_num_bits=spec.INP_BUFF_SIZE * 8,
head_address=None,
)
@tvm.register_func("tvm.info.mem.%s" % Environment.wgt_scope)
def mem_info_wgt_buffer():
spec = get_env()
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=spec.WGT_ELEM_BITS,
max_simd_bits=spec.WGT_ELEM_BITS,
max_num_bits=spec.WGT_BUFF_SIZE * 8,
head_address=None,
)
@tvm.register_func("tvm.info.mem.%s" % Environment.acc_scope)
def mem_info_acc_buffer():
spec = get_env()
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=spec.ACC_ELEM_BITS,
max_simd_bits=spec.ACC_ELEM_BITS,
max_num_bits=spec.ACC_BUFF_SIZE * 8,
head_address=None,
)
# TVM Op related registration
@register_intrin_lowering("tir.vta.coproc_sync", "default")
def coproc_sync(op):
_ = op
return tvm.tir.call_extern(
"int32",
"VTASynchronize",
get_env().dev.command_handle,
tvm.runtime.const(1 << 31, dtype="uint32"),
)
@register_intrin_lowering("tir.vta.coproc_dep_push", "default")
def coproc_dep_push(op):
return tvm.tir.call_extern(
"int32", "VTADepPush", get_env().dev.command_handle, op.args[0], op.args[1]
)
@register_intrin_lowering("tir.vta.coproc_dep_pop", "default")
def coproc_dep_pop(op):
return tvm.tir.call_extern(
"int32", "VTADepPop", get_env().dev.command_handle, op.args[0], op.args[1]
)
| 6,415 | 31.08 | 100 | py |
tvm | tvm-main/vta/python/vta/environment.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Configurable VTA Hareware Environment scope."""
# pylint: disable=invalid-name, exec-used
from __future__ import absolute_import as _abs
import os
import json
import copy
import tvm
from tvm import te
from . import intrin
def get_vta_hw_path():
"""Get the VTA HW path."""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
vta_hw_default = os.path.abspath(os.path.join(curr_path, "../../../3rdparty/vta-hw"))
VTA_HW_PATH = os.getenv("VTA_HW_PATH", vta_hw_default)
return os.path.abspath(VTA_HW_PATH)
def pkg_config(cfg):
"""Returns PkgConfig pkg config object."""
pkg_config_py = os.path.join(get_vta_hw_path(), "config/pkg_config.py")
libpkg = {"__file__": pkg_config_py}
exec(compile(open(pkg_config_py, "rb").read(), pkg_config_py, "exec"), libpkg, libpkg)
PkgConfig = libpkg["PkgConfig"]
return PkgConfig(cfg)
class DevContext(object):
"""Internal development context
This contains all the non-user facing compiler
internal context that is hold by the Environment.
Parameters
----------
env : Environment
The environment hosting the DevContext
Note
----
This class is introduced so we have a clear separation
of developer related, and user facing attributes.
"""
# Memory id for DMA
MEM_ID_UOP = 0
MEM_ID_WGT = 1
MEM_ID_INP = 2
MEM_ID_ACC = 3
MEM_ID_OUT = 4
MEM_ID_ACC_8BIT = 5
# VTA ALU Opcodes
ALU_OPCODE_MIN = 0
ALU_OPCODE_MAX = 1
ALU_OPCODE_ADD = 2
ALU_OPCODE_SHR = 3
ALU_OPCODE_MUL = 4
# Task queue id (pipeline stage)
QID_LOAD_INP = 1
QID_LOAD_WGT = 1
QID_LOAD_OUT = 2
QID_STORE_OUT = 3
QID_COMPUTE = 2
def __init__(self, env):
self.vta_axis = te.thread_axis("vta")
self.vta_push_uop = tvm.tir.StringImm("VTAPushGEMMOp")
ctx = tvm.tir.call_intrin("handle", "tir.vta.command_handle")
self.command_handle = tvm.tir.Call("handle", "tir.tvm_thread_context", [ctx])
self.DEBUG_NO_SYNC = False
env._dev_ctx = self
self.gemm = intrin.gemm(env, env.mock_mode)
def get_task_qid(self, qid):
"""Get transformed queue index."""
return 1 if self.DEBUG_NO_SYNC else qid
class Environment(object):
"""Hardware configuration object.
This object contains all the information
needed for compiling to a specific VTA backend.
Parameters
----------
cfg : dict of str to value.
The configuration parameters.
Example
--------
.. code-block:: python
# the following code reconfigures the environment
# temporarily to attributes specified in new_cfg.json
new_cfg = json.load(json.load(open("new_cfg.json")))
with vta.Environment(new_cfg):
# env works on the new environment
env = vta.get_env()
"""
current = None
# constants
MAX_XFER = 1 << 22
# debug flags
DEBUG_DUMP_INSN = 1 << 1
DEBUG_DUMP_UOP = 1 << 2
DEBUG_SKIP_READ_BARRIER = 1 << 3
DEBUG_SKIP_WRITE_BARRIER = 1 << 4
# memory scopes
inp_scope = "local.inp_buffer"
wgt_scope = "local.wgt_buffer"
acc_scope = "local.acc_buffer"
# initialization function
def __init__(self, cfg):
# Produce the derived parameters and update dict
self.pkg = pkg_config(cfg)
self.__dict__.update(self.pkg.cfg_dict)
# data type width
self.INP_WIDTH = 1 << self.LOG_INP_WIDTH
self.WGT_WIDTH = 1 << self.LOG_WGT_WIDTH
self.ACC_WIDTH = 1 << self.LOG_ACC_WIDTH
self.OUT_WIDTH = 1 << self.LOG_OUT_WIDTH
# tensor intrinsic shape
self.BATCH = 1 << self.LOG_BATCH
self.BLOCK_IN = 1 << self.LOG_BLOCK_IN
self.BLOCK_OUT = 1 << self.LOG_BLOCK_OUT
# buffer size
self.UOP_BUFF_SIZE = 1 << self.LOG_UOP_BUFF_SIZE
self.INP_BUFF_SIZE = 1 << self.LOG_INP_BUFF_SIZE
self.WGT_BUFF_SIZE = 1 << self.LOG_WGT_BUFF_SIZE
self.ACC_BUFF_SIZE = 1 << self.LOG_ACC_BUFF_SIZE
self.OUT_BUFF_SIZE = 1 << self.LOG_OUT_BUFF_SIZE
# bytes per buffer
self.INP_ELEM_BITS = self.BATCH * self.BLOCK_IN * self.INP_WIDTH
self.WGT_ELEM_BITS = self.BLOCK_OUT * self.BLOCK_IN * self.WGT_WIDTH
self.ACC_ELEM_BITS = self.BATCH * self.BLOCK_OUT * self.ACC_WIDTH
self.OUT_ELEM_BITS = self.BATCH * self.BLOCK_OUT * self.OUT_WIDTH
self.INP_ELEM_BYTES = self.INP_ELEM_BITS // 8
self.WGT_ELEM_BYTES = self.WGT_ELEM_BITS // 8
self.ACC_ELEM_BYTES = self.ACC_ELEM_BITS // 8
self.OUT_ELEM_BYTES = self.OUT_ELEM_BITS // 8
# dtypes
self.acc_dtype = "int%d" % self.ACC_WIDTH
self.inp_dtype = "int%d" % self.INP_WIDTH
self.wgt_dtype = "int%d" % self.WGT_WIDTH
self.out_dtype = "int%d" % self.OUT_WIDTH
# bistream name
self.BITSTREAM = self.pkg.bitstream
# model string
self.MODEL = self.TARGET + "_" + self.BITSTREAM
# lazy cached members
self.mock_mode = False
self._mock_env = None
self._dev_ctx = None
self._last_env = None
def __enter__(self):
self._last_env = Environment.current
Environment.current = self
return self
def __exit__(self, ptype, value, trace):
Environment.current = self._last_env
@property
def cfg_dict(self):
return self.pkg.cfg_dict
@property
def dev(self):
"""Developer context"""
if self._dev_ctx is None:
self._dev_ctx = DevContext(self)
return self._dev_ctx
@property
def mock(self):
"""A mock version of the Environment
The ALU, dma_copy and intrinsics will be
mocked to be nop.
"""
if self.mock_mode:
return self
if self._mock_env is None:
self._mock_env = copy.copy(self)
self._mock_env._dev_ctx = None
self._mock_env.mock_mode = True
return self._mock_env
@property
def dma_copy(self):
"""DMA copy pragma"""
return "dma_copy" if not self.mock_mode else "skip_dma_copy"
@property
def alu(self):
"""ALU pragma"""
return "alu" if not self.mock_mode else "skip_alu"
@property
def gemm(self):
"""GEMM intrinsic"""
return self.dev.gemm
@property
def target(self):
return tvm.target.vta(model=self.MODEL)
@property
def target_host(self):
"""The target host"""
if self.TARGET in ["pynq", "de10nano"]:
return "llvm -mtriple=armv7-none-linux-gnueabihf"
if self.TARGET == "ultra96":
return "llvm -mtriple=aarch64-linux-gnu"
if self.TARGET in ["sim", "tsim", "intelfocl"]:
return "llvm"
raise ValueError("Unknown target %s" % self.TARGET)
@property
def target_vta_cpu(self):
return tvm.target.arm_cpu(model=self.TARGET)
def get_env():
"""Get the current VTA Environment.
Returns
-------
env : Environment
The current environment.
"""
return Environment.current
def _init_env():
"""Initialize the default global env"""
config_path = os.path.join(get_vta_hw_path(), "config/vta_config.json")
if not os.path.exists(config_path):
raise RuntimeError("Cannot find config in %s" % str(config_path))
cfg = json.load(open(config_path))
return Environment(cfg)
Environment.current = _init_env()
| 8,303 | 30.101124 | 90 | py |
tvm | tvm-main/vta/python/vta/intrin.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA related intrinsics"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
def gemm(env, mock=False):
"""Matrix-matrix multiply intrinsic
Parameters
----------
env : Environment
The Environment
mock : bool
Whether create a mock version.
"""
wgt_lanes = env.WGT_ELEM_BITS // env.WGT_WIDTH
assert wgt_lanes == env.BLOCK_OUT * env.BLOCK_IN
wgt_shape = (env.BLOCK_OUT, env.BLOCK_IN)
assert wgt_shape[0] * wgt_shape[1] == wgt_lanes
inp_lanes = env.INP_ELEM_BITS // env.INP_WIDTH
assert inp_lanes == env.BATCH * env.BLOCK_IN
inp_shape = (env.BATCH, env.BLOCK_IN)
assert inp_shape[0] * inp_shape[1] == inp_lanes
out_lanes = env.ACC_ELEM_BITS // env.ACC_WIDTH
assert out_lanes == env.BATCH * env.BLOCK_OUT
out_shape = (env.BATCH, env.BLOCK_OUT)
assert out_shape[0] * out_shape[1] == out_lanes
wgt = te.placeholder(
(wgt_shape[0], wgt_shape[1]), dtype="int%d" % env.WGT_WIDTH, name=env.wgt_scope
)
inp = te.placeholder(
(inp_shape[0], inp_shape[1]), dtype="int%d" % env.INP_WIDTH, name=env.inp_scope
)
k = te.reduce_axis((0, wgt_shape[1]), name="k")
out_dtype = "int%d" % env.ACC_WIDTH
out = te.compute(
(out_shape[0], out_shape[1]),
lambda i, j: te.sum(inp[i, k].astype(out_dtype) * wgt[j, k].astype(out_dtype), axis=[k]),
name="out",
)
wgt_layout = tvm.tir.decl_buffer(
wgt.shape,
wgt.dtype,
env.wgt_scope,
scope=env.wgt_scope,
offset_factor=wgt_lanes,
data_alignment=wgt_lanes,
)
inp_layout = tvm.tir.decl_buffer(
inp.shape,
inp.dtype,
env.inp_scope,
scope=env.inp_scope,
offset_factor=inp_lanes,
data_alignment=inp_lanes,
)
out_layout = tvm.tir.decl_buffer(
out.shape,
out.dtype,
env.acc_scope,
scope=env.acc_scope,
offset_factor=out_lanes,
data_alignment=out_lanes,
)
def intrin_func(ins, outs):
"""Matrix-matrix multiply intrinsic function"""
dinp, dwgt = ins
dout = outs[0]
def instr(index):
"""Generate matrix-matrix multiply VTA instruction"""
irb = tvm.tir.ir_builder.create()
dev = env.dev
irb.scope_attr(dev.vta_axis, "coproc_scope", dev.get_task_qid(dev.QID_COMPUTE))
irb.scope_attr(dev.vta_axis, "coproc_uop_scope", dev.vta_push_uop)
if index in (0, 2):
irb.emit(
tvm.tir.call_intrin(
"int32",
"tir.vta.uop_push",
0,
0,
dout.access_ptr("rw", "int32"),
dinp.access_ptr("r", "int32"),
dwgt.access_ptr("r", "int32"),
0,
0,
0,
)
)
else:
irb.emit(
tvm.tir.call_intrin(
"int32",
"tir.vta.uop_push",
0,
1,
dout.access_ptr("rw", "int32"),
0,
0,
0,
0,
0,
)
)
return irb.get()
# return a triple of normal-set, reset, update
nop = tvm.tir.Evaluate(0)
if mock:
return (nop, nop, nop)
return (instr(0), instr(1), instr(2))
return te.decl_tensor_intrin(
out.op, intrin_func, name="GEMM", binds={inp: inp_layout, wgt: wgt_layout, out: out_layout}
)
| 4,611 | 31.942857 | 99 | py |
tvm | tvm-main/vta/python/vta/libinfo.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Library information."""
from __future__ import absolute_import
import sys
import os
from .environment import get_vta_hw_path
def _get_lib_name(lib_name):
"""Get lib name with extension
Returns
-------
lib_name_ext : str
Name of VTA shared library with extension
Parameters
------------
lib_name : str
Name of VTA shared library
"""
if sys.platform.startswith("win32"):
return lib_name + ".dll"
if sys.platform.startswith("darwin"):
return lib_name + ".dylib"
return lib_name + ".so"
def find_libvta(lib_vta, optional=False):
"""Find VTA Chisel-based library
Returns
-------
lib_found : str
Library path
Parameters
------------
lib_vta : str
Name of VTA shared library
optional : bool
Enable error check
"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
tvm_library_path = os.environ.get("TVM_LIBRARY_PATH", None)
if tvm_library_path is None:
tvm_library_path = os.path.join(
curr_path,
os.pardir,
os.pardir,
os.pardir,
"build",
)
lib_search = [tvm_library_path, os.path.join(get_vta_hw_path(), "build")]
lib_name = _get_lib_name(lib_vta)
lib_path = [os.path.join(x, lib_name) for x in lib_search]
lib_found = [x for x in lib_path if os.path.exists(x)]
if not lib_found and not optional:
raise RuntimeError(
"Cannot find the files.\n" + "List of candidates:\n" + str("\n".join(lib_path))
)
return lib_found
| 2,410 | 28.765432 | 91 | py |
tvm | tvm-main/vta/python/vta/transform.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Additional Transformation Passes. for VTA"""
# pylint: disable=len-as-condition, no-else-return, unused-argument, invalid-name
import tvm
from tvm import te
from tvm.topi import utils
from .environment import get_env
def _match_pragma(stmt, key):
"""Internal helper to match stmt to pragma stmt.
Parameters
----------
stmt : Stmt
The AttrStmt
key : str
The pragma key
"""
return (stmt.attr_key == "pragma_" + key) or (
stmt.attr_key == "pragma_scope" and stmt.value.value == key
)
def FoldUopLoop():
"""Detect and fold uop loop.
VTA support uop programming model
that recognizes loop structure.
This pass detect the loop structure
and extract that into uop loop AST.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _fold_outermost_loop(body):
stmt = body
if not isinstance(stmt, tvm.tir.For):
return None, body, None
loop_var = stmt.loop_var
gemm_offsets = [None, None, None]
fail = [False]
builtin_uop_push = tvm.ir.Op.get("tir.vta.uop_push")
def _post_order(op):
assert isinstance(op, tvm.tir.Call)
base_args = 2
if op.op.same_as(builtin_uop_push):
args = []
args += op.args[:base_args]
for i in range(3):
m = tvm.arith.detect_linear_equation(op.args[i + base_args], [loop_var])
if not m:
fail[0] = True
return op
if gemm_offsets[i] is not None:
if not tvm.ir.structural_equal(m[0], gemm_offsets[i]):
fail[0] = True
return op
args.append(m[1])
else:
gemm_offsets[i] = m[0]
args.append(m[1])
args += op.args[base_args + 3 :]
return tvm.tir.call_intrin("int32", builtin_uop_push, *args)
if op.op.name not in ("tir.vta.command_handle", "tir.tvm_thread_context"):
raise RuntimeError("unexpected op %s" % op)
return op
ret = tvm.tir.stmt_functor.ir_transform(stmt.body, None, _post_order, ["tir.Call"])
if not fail[0] and all(x is not None for x in gemm_offsets):
def _visit(op):
if op.same_as(loop_var):
fail[0] = True
tvm.tir.stmt_functor.post_order_visit(ret, _visit)
if not fail[0]:
begin = tvm.tir.call_extern("int32", "VTAUopLoopBegin", stmt.extent, *gemm_offsets)
end = tvm.tir.call_extern("int32", "VTAUopLoopEnd")
return [begin, ret, end]
raise ValueError("Failed to fold the GEMM instructions..")
def _do_fold(stmt):
env = get_env()
if (
stmt.attr_key == "coproc_uop_scope"
and isinstance(stmt.value, tvm.tir.StringImm)
and stmt.value.value == env.dev.vta_push_uop.value
):
body = stmt.body
begins = []
ends = []
try:
begin, body, end = _fold_outermost_loop(body)
if begin is not None:
begins.append(begin)
if end is not None:
ends.append(end)
begin, body, end = _fold_outermost_loop(body)
if begin is not None:
begins.append(begin)
if end is not None:
ends.append(end)
except ValueError:
pass
if body == stmt.body:
return stmt
ends = list(reversed(ends))
body = tvm.tir.stmt_seq(*(begins + [body] + ends))
return tvm.tir.AttrStmt(stmt.node, stmt.attr_key, stmt.value, body)
return None
def _ftransform(f, mod, ctx):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, _do_fold, None, ["tir.AttrStmt"])
)
return tvm.tir.transform.prim_func_pass(_ftransform, opt_level=0, name="tir.vta.FoldUopLoop")
def CPUAccessRewrite():
"""Detect CPU access to VTA buffer and get address correctly.
VTA's buffer is an opaque handle that do not
correspond to address in CPU.
This pass detect CPU access and rewrite to use pointer
returned VTABufferCPUPtr for CPU access.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(f, mod, ctx):
env = get_env()
var_remap = {}
buf_remap = {}
def find_var_remap(old_var):
if old_var in var_remap:
return var_remap[old_var]
new_var = tvm.tir.Var(old_var.name + "_ptr", dtype=old_var.type_annotation)
var_remap[old_var] = new_var
return new_var
def find_buf_remap(old_buf):
if old_buf in buf_remap:
return buf_remap[old_buf]
new_var = find_var_remap(old_buf.data)
new_buf = tvm.tir.decl_buffer(
shape=old_buf.shape,
dtype=old_buf.dtype,
data=new_var,
strides=old_buf.strides,
elem_offset=old_buf.elem_offset,
scope=old_buf.scope,
data_alignment=old_buf.data_alignment,
offset_factor=old_buf.offset_factor,
buffer_type="auto_broadcast" if (old_buf.buffer_type == 2) else "",
axis_separators=old_buf.axis_separators,
)
buf_remap[old_buf] = new_buf
return new_buf
def _post_order(op):
if isinstance(op, tvm.tir.Allocate):
buffer_var = op.buffer_var
if buffer_var not in var_remap:
return None
new_var = var_remap[buffer_var]
let_stmt = tvm.tir.LetStmt(
new_var,
tvm.tir.call_extern(
"handle", "VTABufferCPUPtr", env.dev.command_handle, buffer_var
),
op.body,
)
alloc = tvm.tir.Allocate(buffer_var, op.dtype, op.extents, op.condition, let_stmt)
del var_remap[buffer_var]
bufs_to_delete = [
old_buf for old_buf in buf_remap if old_buf.data.same_as(buffer_var)
]
for buf in bufs_to_delete:
del buf_remap[buf]
return alloc
if isinstance(op, tvm.tir.BufferLoad):
return tvm.tir.BufferLoad(find_buf_remap(op.buffer), op.indices)
if isinstance(op, tvm.tir.BufferStore):
return tvm.tir.BufferStore(find_buf_remap(op.buffer), op.value, op.indices)
raise RuntimeError("not reached")
stmt_in = f.body
stmt = tvm.tir.stmt_functor.ir_transform(
stmt_in, None, _post_order, ["tir.Allocate", "tir.BufferLoad", "tir.BufferStore"]
)
for old_var, new_var in var_remap.items():
stmt = tvm.tir.LetStmt(
new_var,
tvm.tir.call_extern("handle", "VTABufferCPUPtr", env.dev.command_handle, old_var),
stmt,
)
return f.with_body(stmt)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.CPUAccessRewrite"
)
def LiftAllocToScopeBegin():
"""Lift allocate to beginning of the current scope.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(f, mod, ctx):
lift_stmt = [[]]
def _merge_block(slist, body):
for op in slist:
if op.body == body:
body = op
elif isinstance(op, tvm.tir.Allocate):
body = tvm.tir.Allocate(op.buffer_var, op.dtype, op.extents, op.condition, body)
elif isinstance(op, tvm.tir.AttrStmt):
body = tvm.tir.AttrStmt(op.node, op.attr_key, op.value, body)
elif isinstance(op, tvm.tir.For):
body = tvm.tir.For(
op.loop_var,
op.min,
op.extent,
op.kind,
body,
op.thread_binding,
op.annotations,
)
else:
raise RuntimeError("unexpected op")
del slist[:]
return body
def _pre_order(op):
if isinstance(op, tvm.tir.For):
lift_stmt.append([])
elif isinstance(op, tvm.tir.AttrStmt):
if op.attr_key == "virtual_thread":
lift_stmt.append([])
def _post_order(op):
if isinstance(op, tvm.tir.Allocate):
lift_stmt[-1].append(op)
return op.body
if isinstance(op, tvm.tir.AttrStmt):
if op.attr_key == "storage_scope":
lift_stmt[-1].append(op)
return op.body
if op.attr_key == "virtual_thread":
return _merge_block(lift_stmt.pop() + [op], op.body)
return op
if isinstance(op, tvm.tir.For):
return _merge_block(lift_stmt.pop() + [op], op.body)
raise RuntimeError("not reached")
stmt_in = f.body
stmt = tvm.tir.stmt_functor.ir_transform(
stmt_in, _pre_order, _post_order, ["tir.Allocate", "tir.AttrStmt", "tir.For"]
)
assert len(lift_stmt) == 1
return f.with_body(_merge_block(lift_stmt[0], stmt))
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.LiftAllocToScopeBegin"
)
def InjectSkipCopy():
"""Pass to inject skip copy stmt, used for debug purpose.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _do_fold(stmt):
if _match_pragma(stmt, "skip_dma_copy"):
return tvm.tir.Evaluate(0)
return None
def _ftransform(f, mod, ctx):
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, _do_fold, None, ["tir.AttrStmt"])
)
return tvm.tir.transform.prim_func_pass(_ftransform, opt_level=0, name="tir.vta.InjectSkipCopy")
def InjectCoProcSync():
"""Pass inject coproc sync
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(f, *_):
success = [False]
def _do_fold(stmt):
if _match_pragma(stmt, "coproc_sync"):
success[0] = True
sync = tvm.tir.Call("int32", "vta.coproc_sync", [])
return tvm.tir.SeqStmt([stmt.body, tvm.tir.Evaluate(sync)])
if _match_pragma(stmt, "trim_loop"):
op = stmt.body
assert isinstance(op, tvm.tir.For)
return tvm.tir.For(
op.loop_var, op.min, 2, op.kind, op.body, op.thread_binding, op.annotations
)
return None
return f.with_body(
tvm.tir.stmt_functor.ir_transform(f.body, None, _do_fold, ["tir.AttrStmt"])
)
return tvm.transform.Sequential(
[
tvm.tir.transform.prim_func_pass(_ftransform, 0, "tir.vta.InjectCoProcSync"),
tvm.tir.transform.CoProcSync(),
],
opt_level=0,
name="tir.vta.InjectCoProcSync",
)
def InjectDMAIntrin():
"""Pass to inject DMA copy intrinsics.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
idxd = tvm.tir.indexdiv
idxm = tvm.tir.indexmod
def _check_compact(buf):
ndim = len(buf.shape)
size = tvm.tir.const(1, buf.shape[0].dtype)
for i in reversed(range(ndim)):
if not utils.equal_const_int(size - buf.strides[i], 0):
raise RuntimeError(
"Cannot prove compact: shape=%s, strides=%s" % (buf.shape, buf.strides)
)
size = size * buf.shape[i]
def _fold_buffer_dim(buf, scope, elem_block):
ndim = len(buf.shape)
x_size = 1
base = 0
for i in range(1, ndim + 1):
if not utils.equal_const_int(buf.strides[ndim - i] - x_size, 0):
raise RuntimeError("scope %s needs to have block=%d" % (scope, elem_block))
x_size = x_size * buf.shape[ndim - i]
if utils.equal_const_int(x_size - elem_block, 0):
base = i + 1
break
if base == 0:
raise RuntimeError(
"scope %s need to have block=%d, shape=%s" % (scope, elem_block, buf.shape)
)
shape = [elem_block]
strides = [1]
if base < ndim + 1 and not utils.equal_const_int(buf.strides[ndim - base], elem_block):
shape.append(1)
strides.append(elem_block)
analyzer = tvm.arith.Analyzer()
while base < ndim + 1:
x_size = 1
x_stride = buf.strides[ndim - base]
next_base = base
if not utils.equal_const_int(idxm(x_stride, elem_block), 0):
raise RuntimeError(
"scope %s need to have block=%d, shape=%s, strides=%s"
% (scope, elem_block, buf.shape, buf.strides)
)
for i in range(base, ndim + 1):
k = ndim - i
if not utils.equal_const_int(x_size * x_stride - buf.strides[k], 0):
break
x_size = x_size * buf.shape[k]
next_base = i + 1
shape.append(analyzer.simplify(x_size))
strides.append(x_stride)
assert next_base != base
base = next_base
strides = list(reversed(strides))
shape = list(reversed(shape))
return shape, strides
def _get_2d_pattern(buf, elem_width, elem_bytes, dtype, scope, allow_fold):
elem_block = elem_bytes * 8 // elem_width
shape, strides = buf.shape, buf.strides
if not utils.equal_const_int(idxm(buf.elem_offset, elem_block), 0):
raise RuntimeError("scope %s need to have block=%d" % (scope, elem_block))
if allow_fold:
shape, strides = _fold_buffer_dim(buf, scope, elem_block)
else:
shape = list(x for x in shape)
strides = list(x for x in strides)
def raise_error():
"""Internal function to raise error"""
raise RuntimeError(
(
"Scope[%s]: cannot detect 2d pattern with elem_block=%d:"
+ " shape=%s, strides=%s"
)
% (scope, elem_block, buf.shape, buf.strides)
)
ndim = len(shape)
# Check if the inner-tensor is already flat
flat = utils.equal_const_int(shape[-1], elem_block)
if flat:
if not utils.equal_const_int(strides[-1], 1):
raise_error()
if ndim == 1:
x_size = 1
x_stride = 1
y_size = 1
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
if not utils.equal_const_int(strides[-2] - elem_block, 0):
raise_error()
if ndim == 2:
x_size = shape[-2]
x_stride = shape[-2]
y_size = 1
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
if not utils.equal_const_int(idxm(strides[-3], elem_block), 0):
raise_error()
if ndim == 3:
x_size = shape[-2]
x_stride = idxd(strides[-3], elem_block)
y_size = shape[-3]
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
else:
if not utils.equal_const_int(strides[-1], 1):
raise_error()
if not utils.equal_const_int(strides[-2] - shape[-1], 0):
raise_error()
if not utils.equal_const_int(shape[-1] * shape[-2], elem_block):
raise_error()
if ndim == 2:
x_size = 1
x_stride = 1
y_size = 1
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
if not utils.equal_const_int(strides[-3], elem_block):
raise_error()
if ndim == 3:
x_size = shape[-3]
x_stride = shape[-3]
y_size = 1
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
if not utils.equal_const_int(idxm(strides[-4], elem_block), 0):
raise_error()
if ndim == 4:
x_size = shape[-3]
x_stride = idxd(strides[-4], elem_block)
y_size = shape[-4]
return x_size, y_size, x_stride, idxd(buf.elem_offset, elem_block)
raise_error()
def _inject_copy(src, dst, pad_before, pad_after, pad_value):
# FIXME: pad_value is ignored...
env = get_env()
_ = pad_value
if dst.scope() == "global":
# Store
if pad_before or pad_after:
raise RuntimeError("Do not support copy into DRAM with pad")
if src.scope() == env.acc_scope:
elem_width = env.OUT_WIDTH
elem_bytes = env.OUT_ELEM_BYTES
mem_type = env.dev.MEM_ID_OUT
data_type = "int%d" % env.OUT_WIDTH
task_qid = env.dev.QID_STORE_OUT
else:
raise RuntimeError("Do not support copy %s->dram" % (src.scope()))
_check_compact(src)
x_size, y_size, x_stride, offset = _get_2d_pattern(
dst, elem_width, elem_bytes, data_type, src.scope(), allow_fold=True
)
irb = tvm.tir.ir_builder.create()
irb.scope_attr(env.dev.vta_axis, "coproc_scope", env.dev.get_task_qid(task_qid))
irb.emit(
tvm.tir.call_extern(
"int32",
"VTAStoreBuffer2D",
env.dev.command_handle,
src.access_ptr("r", "int32"),
mem_type,
dst.data,
offset,
x_size,
y_size,
x_stride,
)
)
return irb.get()
elif src.scope() == "global":
if dst.scope() == env.acc_scope:
elem_width = env.ACC_WIDTH
elem_bytes = env.ACC_ELEM_BYTES
mem_type = env.dev.MEM_ID_ACC
data_type = "int%d" % env.ACC_WIDTH
task_qid = env.dev.QID_LOAD_OUT
elif dst.scope() == env.inp_scope:
elem_width = env.INP_WIDTH
elem_bytes = env.INP_ELEM_BYTES
mem_type = env.dev.MEM_ID_INP
data_type = "int%d" % env.INP_WIDTH
task_qid = env.dev.QID_LOAD_INP
elif dst.scope() == env.wgt_scope:
elem_width = env.WGT_WIDTH
elem_bytes = env.WGT_ELEM_BYTES
mem_type = env.dev.MEM_ID_WGT
data_type = "int%d" % env.WGT_WIDTH
task_qid = env.dev.QID_LOAD_WGT
else:
raise RuntimeError("Do not support copy dram->%s" % (dst.scope()))
# collect pad statistics
if pad_before:
assert pad_after
ndim = len(pad_before)
if ndim <= 2 or ndim > 5:
raise ValueError("Limitation of 2D pad load forbid ndim=%d" % ndim)
if ndim == 5:
# This case occurs when batch size N > 1
y_pad_before = pad_before[1]
x_pad_before = pad_before[2]
y_pad_after = pad_after[1]
x_pad_after = pad_after[2]
for dim in range(3, ndim):
if not utils.equal_const_int(pad_before[dim], 0):
raise ValueError("Do not support pad on the innermost block")
if not utils.equal_const_int(pad_after[dim], 0):
raise ValueError("Do not support pad on the innermost block")
else:
y_pad_before = pad_before[0]
x_pad_before = pad_before[1]
y_pad_after = pad_after[0]
x_pad_after = pad_after[1]
for dim in range(2, ndim):
if not utils.equal_const_int(pad_before[dim], 0):
raise ValueError("Do not support pad on the innermost block")
if not utils.equal_const_int(pad_after[dim], 0):
raise ValueError("Do not support pad on the innermost block")
allow_fold = False
else:
x_pad_before = 0
y_pad_before = 0
x_pad_after = 0
y_pad_after = 0
allow_fold = True
_check_compact(dst)
x_size, y_size, x_stride, offset = _get_2d_pattern(
src, elem_width, elem_bytes, data_type, dst.scope(), allow_fold=allow_fold
)
if data_type != src.dtype:
assert data_type == "int%d" % env.ACC_WIDTH and src.dtype == "int%d" % env.INP_WIDTH
mem_type = env.dev.MEM_ID_ACC_8BIT
irb = tvm.tir.ir_builder.create()
irb.scope_attr(env.dev.vta_axis, "coproc_scope", env.dev.get_task_qid(task_qid))
irb.emit(
tvm.tir.call_extern(
"int32",
"VTALoadBuffer2D",
env.dev.command_handle,
src.data,
offset,
x_size,
y_size,
x_stride,
x_pad_before,
y_pad_before,
x_pad_after,
y_pad_after,
dst.access_ptr("r", "int32"),
mem_type,
)
)
return irb.get()
else:
raise RuntimeError("Do not support copy %s->%s" % (src.scope(), dst.scope()))
return tvm.tir.transform.InjectCopyIntrin("dma_copy", _inject_copy)
def _get_gemm_intrin_buffer():
env = get_env()
wgt_lanes = env.WGT_ELEM_BITS // env.WGT_WIDTH
assert wgt_lanes == env.BLOCK_OUT * env.BLOCK_IN
wgt_shape = (env.BLOCK_OUT, env.BLOCK_IN)
assert wgt_shape[0] * wgt_shape[1] == wgt_lanes
inp_lanes = env.INP_ELEM_BITS // env.INP_WIDTH
assert inp_lanes == env.BATCH * env.BLOCK_IN
inp_shape = (env.BATCH, env.BLOCK_IN)
assert inp_shape[0] * inp_shape[1] == inp_lanes
out_lanes = env.ACC_ELEM_BITS // env.ACC_WIDTH
assert out_lanes == env.BATCH * env.BLOCK_OUT
out_shape = (env.BATCH, env.BLOCK_OUT)
assert out_shape[0] * out_shape[1] == out_lanes
wgt = te.placeholder(
(wgt_shape[0], wgt_shape[1]), dtype="int%d" % env.WGT_WIDTH, name=env.wgt_scope
)
inp = te.placeholder(
(inp_shape[0], inp_shape[1]), dtype="int%d" % env.INP_WIDTH, name=env.inp_scope
)
k = te.reduce_axis((0, wgt_shape[1]), name="k")
out_dtype = "int%d" % env.ACC_WIDTH
out = te.compute(
(out_shape[0], out_shape[1]),
lambda i, j: te.sum(inp[i, k].astype(out_dtype) * wgt[j, k].astype(out_dtype), axis=[k]),
name="out",
)
wgt_layout = tvm.tir.decl_buffer(
wgt.shape,
wgt.dtype,
env.wgt_scope,
scope=env.wgt_scope,
offset_factor=wgt_lanes,
data_alignment=wgt_lanes,
)
inp_layout = tvm.tir.decl_buffer(
inp.shape,
inp.dtype,
env.inp_scope,
scope=env.inp_scope,
offset_factor=inp_lanes,
data_alignment=inp_lanes,
)
out_layout = tvm.tir.decl_buffer(
out.shape,
out.dtype,
env.acc_scope,
scope=env.acc_scope,
offset_factor=out_lanes,
data_alignment=out_lanes,
)
return wgt_layout, inp_layout, out_layout
def InjectConv2DTransposeSkip():
"""Pass to skip 0-weights in conv2d transpose with stride > 1.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(func, mod, ctx):
env = get_env()
dwgt, dinp, dout = _get_gemm_intrin_buffer()
calls = []
selects = []
def _find_basics(op):
if isinstance(op, tvm.tir.BufferLoad):
calls.append(op)
elif isinstance(op, tvm.tir.Select):
selects.append(op)
def _do_fold(op):
if _match_pragma(op, "conv2d_transpose_gemm"):
is_init = "_init" in str(op)
tvm.tir.stmt_functor.post_order_visit(op, _find_basics)
if is_init:
# create inner most block
irb = tvm.tir.ir_builder.create()
dev = env.dev
irb.scope_attr(dev.vta_axis, "coproc_scope", dev.get_task_qid(dev.QID_COMPUTE))
irb.scope_attr(dev.vta_axis, "coproc_uop_scope", dev.vta_push_uop)
irb.emit(
tvm.tir.call_intrin(
"int32",
"tir.vta.uop_push",
0,
1,
dout.access_ptr("rw", "int32"),
0,
0,
0,
0,
0,
)
)
inner = irb.get()
# TODO(@tmoreau89): This is only a temporary fix, please take a look.
body = op.body.body
while isinstance(body, tvm.tir.IfThenElse):
body = body.then_case
args = body.indices
res_buffer = body.buffer
tpl = (args[0], 1, args[1], 1, args[2], 1, args[3], 1, 0, 1, 0, env.BLOCK_OUT)
inner = tvm.tir.AttrStmt(
[dout, res_buffer],
"buffer_bind_scope",
tvm.tir.call_intrin("handle", "tir.tvm_tuple", *tpl),
inner,
)
return inner
else:
conv_call, data_call, kernel_call = calls[-3:]
pad_data_tensor = data_call.buffer
kernel_tensor = kernel_call.buffer
res_tensor = conv_call.buffer
if selects:
condition = selects[0].condition
else:
condition = tvm.tir.const(1, "int")
# create inner most block
irb = tvm.tir.ir_builder.create()
with irb.if_scope(condition):
dev = env.dev
irb.scope_attr(
dev.vta_axis, "coproc_scope", dev.get_task_qid(dev.QID_COMPUTE)
)
irb.scope_attr(dev.vta_axis, "coproc_uop_scope", dev.vta_push_uop)
irb.emit(
tvm.tir.call_intrin(
"int32",
"tir.vta.uop_push",
0,
0,
dout.access_ptr("rw", "int32"),
dinp.access_ptr("r", "int32"),
dwgt.access_ptr("r", "int32"),
0,
0,
0,
)
)
inner = irb.get()
args = conv_call.indices
tpl = (args[0], 1, args[1], 1, args[2], 1, args[3], 1, 0, 1, 0, env.BLOCK_OUT)
inner = tvm.tir.AttrStmt(
[dout, res_tensor],
"buffer_bind_scope",
tvm.tir.call_intrin("handle", "tir.tvm_tuple", *tpl),
inner,
)
args = kernel_call.indices
tpl = (
args[0],
1,
args[1],
1,
args[2],
1,
args[3],
1,
0,
env.BLOCK_OUT,
0,
env.BLOCK_IN,
)
inner = tvm.tir.AttrStmt(
[dwgt, kernel_tensor],
"buffer_bind_scope",
tvm.tir.call_intrin("handle", "tir.tvm_tuple", *tpl),
inner,
)
args = data_call.indices
tpl = (args[0], 1, args[1], 1, args[2], 1, args[3], 1, 0, 1, 0, env.BLOCK_IN)
inner = tvm.tir.AttrStmt(
[dinp, pad_data_tensor],
"buffer_bind_scope",
tvm.tir.call_intrin("handle", "tir.tvm_tuple", *tpl),
inner,
)
return inner
return None
return func.with_body(
tvm.tir.stmt_functor.ir_transform(func.body, _do_fold, None, ["tir.AttrStmt"])
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.InjectConv2DTrasnposeSkip"
)
def AnnotateALUCoProcScope():
"""Pass to insert ALU instruction.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(func, mod, ctx):
env = get_env()
def _do_fold(stmt):
if _match_pragma(stmt, "alu"):
irb = tvm.tir.ir_builder.create()
irb.scope_attr(
env.dev.vta_axis, "coproc_scope", env.dev.get_task_qid(env.dev.QID_COMPUTE)
)
irb.scope_attr(
env.dev.vta_axis, "coproc_uop_scope", tvm.tir.StringImm("VTAPushALUOp")
)
irb.emit(stmt)
return irb.get()
if _match_pragma(stmt, "skip_alu"):
return tvm.tir.Evaluate(0)
return stmt
return func.with_body(
tvm.tir.stmt_functor.ir_transform(func.body, None, _do_fold, ["tir.AttrStmt"])
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.AnnotateALUCoProcScope"
)
def InjectALUIntrin():
"""Pass to inject ALU micro-ops.
Returns
-------
fpass : tvm.transform.Pass
The pass
"""
def _ftransform(func, mod, ctx):
env = get_env()
idxm = tvm.tir.indexmod
analyzer = tvm.arith.Analyzer()
def _do_fold(stmt):
def _flatten_loop(src_coeff, dst_coeff, extents):
src_coeff = list(src_coeff)
dst_coeff = list(dst_coeff)
extents = list(extents)
rev_src_coeff = [src_coeff.pop()]
rev_dst_coeff = [dst_coeff.pop()]
rev_extents = []
assert src_coeff
vsrc = src_coeff.pop()
vdst = dst_coeff.pop()
vext = extents.pop()
while src_coeff:
next_src = src_coeff.pop()
next_dst = dst_coeff.pop()
next_ext = extents.pop()
if analyzer.can_prove_equal(next_src, vsrc * vext) and analyzer.can_prove_equal(
next_dst, vdst * vext
):
vext = analyzer.simplify(vext * next_ext)
else:
rev_src_coeff.append(vsrc)
rev_dst_coeff.append(vdst)
rev_extents.append(vext)
vsrc = next_src
vdst = next_dst
vext = next_ext
rev_src_coeff.append(vsrc)
rev_dst_coeff.append(vdst)
rev_extents.append(vext)
rev_src_coeff.reverse()
rev_dst_coeff.reverse()
rev_extents.reverse()
return rev_src_coeff, rev_dst_coeff, rev_extents
if _match_pragma(stmt, "alu"):
# Get to the innermost loop body
loop_body = stmt.body
nest_size = 0
while isinstance(loop_body, tvm.tir.For):
loop_body = loop_body.body
nest_size += 1
# Get the src/dst arguments
dst_var = loop_body.buffer.data
dst_idx = loop_body.indices[0]
# Derive loop variables and extents
tmp_body = stmt.body
indices = []
extents = []
for _ in range(nest_size):
indices.append(tmp_body.loop_var)
extents.append(tmp_body.extent)
tmp_body = tmp_body.body
# Derive opcode
if isinstance(loop_body.value, tvm.tir.Add):
alu_opcode = env.dev.ALU_OPCODE_ADD
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Sub):
alu_opcode = env.dev.ALU_OPCODE_SUB
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Mul):
alu_opcode = env.dev.ALU_OPCODE_MUL
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Min):
alu_opcode = env.dev.ALU_OPCODE_MIN
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Max):
alu_opcode = env.dev.ALU_OPCODE_MAX
lhs = loop_body.value.a
rhs = loop_body.value.b
elif isinstance(loop_body.value, tvm.tir.Call):
if loop_body.value.op.name == "tir.shift_left":
alu_opcode = env.dev.ALU_OPCODE_SHR
lhs = loop_body.value.args[0]
rhs = analyzer.simplify(-loop_body.value.args[1])
elif loop_body.value.op.name == "tir.shift_right":
alu_opcode = env.dev.ALU_OPCODE_SHR
lhs = loop_body.value.args[0]
rhs = loop_body.value.args[1]
else:
raise RuntimeError(
"Function call not recognized %s" % (loop_body.value.name)
)
elif isinstance(loop_body.value, tvm.tir.BufferLoad):
alu_opcode = env.dev.ALU_OPCODE_SHR
lhs = loop_body.value
rhs = tvm.tir.const(0, "int32")
else:
raise RuntimeError(
"Expression not recognized %s, %s, %s"
% (type(loop_body.value), str(loop_body.value), str(stmt))
)
# Derive array index coefficients
dst_coeff = tvm.arith.detect_linear_equation(dst_idx, indices)
# Check if lhs/rhs is immediate
use_imm = False
imm_val = None
if isinstance(rhs, tvm.tir.IntImm):
assert lhs.buffer.data.same_as(dst_var)
src_coeff = tvm.arith.detect_linear_equation(lhs.indices[0], indices)
use_imm = True
imm_val = rhs
if isinstance(lhs, tvm.tir.IntImm):
assert rhs.buffer.data.same_as(dst_var)
src_coeff = tvm.arith.detect_linear_equation(rhs.indices[0], indices)
use_imm = True
imm_val = lhs
if imm_val is None:
imm_val = 0
assert lhs.buffer.data.same_as(dst_var) and rhs.buffer.data.same_as(dst_var)
src_lhs_coeff = tvm.arith.detect_linear_equation(lhs.indices[0], indices)
src_rhs_coeff = tvm.arith.detect_linear_equation(rhs.indices[0], indices)
# Determine which side has the same coefficients
lhs_equal = True
rhs_equal = True
for i, coef in enumerate(dst_coeff):
if not tvm.ir.structural_equal(coef, src_lhs_coeff[i]):
lhs_equal = False
if not tvm.ir.structural_equal(coef, src_rhs_coeff[i]):
rhs_equal = False
# Make sure at least one of the source is identical to the
# destination (in-place computation)
assert lhs_equal or rhs_equal
# Assign the source coefficients
if lhs_equal:
src_coeff = src_rhs_coeff
else:
src_coeff = src_lhs_coeff
# Ensure that we have the proper tensor dimensions in the
# innermost loop (pattern match)
src_coeff = list(src_coeff)
dst_coeff = list(dst_coeff)
extents = list(extents)
assert len(src_coeff) > 1
assert len(dst_coeff) > 1
assert len(extents) != 0
assert tvm.ir.structural_equal(
analyzer.simplify(idxm(src_coeff[-1], env.BATCH * env.BLOCK_OUT)), 0
)
assert tvm.ir.structural_equal(
analyzer.simplify(idxm(dst_coeff[-1], env.BATCH * env.BLOCK_OUT)), 0
)
assert tvm.ir.structural_equal(src_coeff[-2], 1)
assert tvm.ir.structural_equal(dst_coeff[-2], 1)
if env.BATCH > 1:
assert len(src_coeff) > 2
assert len(dst_coeff) > 2
assert len(extents) > 1
assert tvm.ir.structural_equal(src_coeff[-3], env.BLOCK_OUT)
assert tvm.ir.structural_equal(dst_coeff[-3], env.BLOCK_OUT)
# Apply tensorization of the loop coefficients
src_offset = src_coeff[-1]
dst_offset = dst_coeff[-1]
if env.BATCH == 1:
src_coeff = src_coeff[:-2]
dst_coeff = dst_coeff[:-2]
extents = extents[:-1]
else:
src_coeff = src_coeff[:-3]
dst_coeff = dst_coeff[:-3]
extents = extents[:-2]
src_coeff.append(src_offset)
dst_coeff.append(dst_offset)
src_coeff = [analyzer.simplify(c // (env.BATCH * env.BLOCK_OUT)) for c in src_coeff]
dst_coeff = [analyzer.simplify(c // (env.BATCH * env.BLOCK_OUT)) for c in dst_coeff]
# Flatten the outer loops
if extents:
src_coeff, dst_coeff, extents = _flatten_loop(src_coeff, dst_coeff, extents)
# Insert ALU micro-ops
irb = tvm.tir.ir_builder.create()
for idx, extent in enumerate(extents):
irb.emit(
tvm.tir.call_extern(
"int32",
"VTAUopLoopBegin",
extent,
dst_coeff[idx],
src_coeff[idx],
0,
)
)
use_imm = int(use_imm)
irb.emit(
tvm.tir.call_intrin(
"int32",
"tir.vta.uop_push",
1,
0,
dst_coeff[len(dst_coeff) - 1],
src_coeff[len(src_coeff) - 1],
0,
alu_opcode,
use_imm,
imm_val,
)
)
for extent in extents:
irb.emit(tvm.tir.call_extern("int32", "VTAUopLoopEnd"))
return irb.get()
return stmt
return func.with_body(
tvm.tir.stmt_functor.ir_transform(func.body, None, _do_fold, ["tir.AttrStmt"])
)
return tvm.tir.transform.prim_func_pass(
_ftransform, opt_level=0, name="tir.vta.InjectALUIntrin"
)
| 42,544 | 36.885129 | 100 | py |
tvm | tvm-main/vta/python/vta/bitstream.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA specific bitstream management library."""
from __future__ import absolute_import as _abs
import os
import sys
from tvm.contrib.download import download
from .environment import get_env
if sys.version_info >= (3,):
import urllib.error as urllib2
else:
import urllib2
# bitstream repo
BITSTREAM_URL = "https://github.com/uwsampl/vta-distro/raw/master/bitstreams/"
def get_bitstream_path():
"""Returns the path to the cached bitstream corresponding to the current config
Returns
-------
bit_path: str
Corresponding to the filepath of the bitstream
"""
env = get_env()
# Derive destination path
cache_dir = os.getenv("VTA_CACHE_PATH", os.path.join(os.getenv("HOME"), ".vta_cache/"))
cache_dir = os.path.join(cache_dir, env.TARGET)
cache_dir = os.path.join(cache_dir, env.HW_VER.replace(".", "_"))
# Create the directory if it didn't exist
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
bit_path = os.path.join(cache_dir, env.BITSTREAM) + ".bit"
return bit_path
def download_bitstream():
"""Downloads a cached bitstream corresponding to the current config"""
env = get_env()
success = False
bit = get_bitstream_path()
url = os.path.join(BITSTREAM_URL, env.TARGET)
url = os.path.join(url, env.HW_VER)
url = os.path.join(url, env.BITSTREAM + ".bit")
try:
download(url, bit)
except urllib2.HTTPError as err:
if err.code == 404:
raise RuntimeError(
# Raise error - the solution when this happens it to build your
# own bitstream and add it to your $VTA_CACHE_PATH
"{} is not available. It appears that this configuration \
bistream has not been cached. Please compile your own bitstream (see hardware \
compilation guide to get Xilinx toolchains setup) and add it to your \
$VTA_CACHE_PATH. Alternatively edit your config.json back to its default \
settings. You can see the list of available bitstreams under {}".format(
url, BITSTREAM_URL
)
)
raise RuntimeError(
# This could happen when trying to access the URL behind a proxy
"Something went wrong when trying to access {}. Check your \
internet connection or proxy settings.".format(
url
)
)
return success
| 3,183 | 33.236559 | 91 | py |
tvm | tvm-main/vta/python/vta/program_bitstream.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA specific bitstream program library."""
import os
import argparse
def main():
"""Main function"""
parser = argparse.ArgumentParser()
parser.add_argument("target", type=str, default="", help="target")
parser.add_argument("bitstream", type=str, default="", help="bitstream path")
args = parser.parse_args()
if args.target not in ("pynq", "ultra96", "de10nano", "sim", "tsim"):
raise RuntimeError("Unknown target {}".format(args.target))
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
path_list = [
os.path.join(curr_path, "/{}".format(args.bitstream)),
os.path.join("./", "{}".format(args.bitstream)),
]
ok_path_list = [p for p in path_list if os.path.exists(p)]
if not ok_path_list:
raise RuntimeError("Cannot find bitstream file in %s" % str(path_list))
bitstream_program(args.target, args.bitstream)
def pynq_bitstream_program(bitstream_path):
# pylint: disable=import-outside-toplevel
from pynq import Bitstream
bitstream = Bitstream(bitstream_path)
bitstream.download()
def de10nano_bitstream_program(bitstream_path):
# pylint: disable=import-outside-toplevel
from tvm import get_global_func
program = get_global_func("vta.de10nano.program")
program(bitstream_path)
def intelfocl_bitstream_program(bitstream_path, mem_size=4 * 1024 * 1024 * 1024):
# pylint: disable=import-outside-toplevel
from tvm import get_global_func
program = get_global_func("vta.oclfpga.program")
program(bitstream_path, mem_size)
def bitstream_program(target, bitstream, *args):
"""program bitstream to devices"""
if target in ["pynq", "ultra96"]:
pynq_bitstream_program(bitstream)
elif target in ["de10nano"]:
de10nano_bitstream_program(bitstream)
elif target in ["sim", "tsim"]:
# In simulation, bit stream programming is a no-op
return
elif target in ["intelfocl"]:
intelfocl_bitstream_program(bitstream, *args)
else:
raise RuntimeError("Unknown target {}".format(target))
if __name__ == "__main__":
main()
| 2,930 | 33.081395 | 81 | py |
tvm | tvm-main/vta/python/vta/autotvm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines AutoTVM components used with VTA."""
from tvm.autotvm.measure import default_module_loader
from . import rpc_client
def module_loader(bitstream=None):
"""Construct a ModuleLoader implementation specialized for VTA.
Parameters
----------
bitsream : Optional[str]
Path to the bitstream to write prior to uploading code.
Returns
-------
ModuleLoader :
The ModuleLoader instance.
"""
def reprogram_fpga(remote, _build_result):
"""default_module_loader callback which reprograms the FPGA.
Parameters
----------
remote : tvm.rpc.RPCSession
RPC session established to the remote device.
_build_result : tvm.autotvm.measure.measure_methods.BuildResult
Artifact from the build phase, unused here.
"""
rpc_client.program_fpga(remote, bitstream)
rpc_client.reconfig_runtime(remote)
return default_module_loader(reprogram_fpga)
| 1,766 | 32.339623 | 71 | py |
tvm | tvm-main/vta/python/vta/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA Package is a TVM backend extension to support VTA hardware.
Besides the compiler toolchain, it also includes utility functions to
configure the hardware environment and access remote device through RPC.
"""
import sys
import tvm._ffi.base
from .autotvm import module_loader
from .bitstream import get_bitstream_path, download_bitstream
from .environment import get_env, Environment
from .rpc_client import reconfig_runtime, program_fpga
__version__ = "0.1.0"
# do not from tvm import topi when running vta.exec.rpc_server
# in lib tvm runtime only mode
if not tvm._ffi.base._RUNTIME_ONLY:
from . import top
from .build_module import build_config, lower, build
| 1,466 | 36.615385 | 72 | py |
tvm | tvm-main/vta/python/vta/top/op.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, ungrouped-imports
"""Namespace for supporting Relay operators on VTA."""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
from tvm.relay.op import op as reg
from tvm.relay.op import strategy as _strategy
from tvm.relay.op.op import OpPattern, OpStrategy
from .utils import is_packed_layout
from .vta_conv2d import conv2d_packed, schedule_conv2d_packed
from .vta_conv2d_transpose import conv2d_transpose_packed, schedule_conv2d_transpose_packed
from .vta_group_conv2d import group_conv2d_packed, schedule_group_conv2d_packed
from .vta_dense import dense_packed, schedule_dense_packed
from ..environment import get_env
ENV = get_env()
# override to force partition at copy
reg.register_pattern("copy", OpPattern.INJECTIVE, level=15)
# add clip vta strategy
def compute_clip_vta(attrs, inputs, output_type):
"""Clip operator."""
x = inputs[0]
a_min = attrs.a_min
a_max = attrs.a_max
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
with tvm.te.tag_scope(topi.tag.ELEMWISE):
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return [x]
def clip_strategy_vta(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
compute_clip_vta,
_strategy.wrap_topi_schedule(topi.generic.schedule_injective),
name="clip.vta",
)
return strategy
reg.get("clip").get_attr("FTVMStrategy").register(clip_strategy_vta, "vta")
@autotvm.register_topi_compute("add.vta")
def add_packed(cfg, lhs, rhs):
return topi.add(lhs, rhs)
@autotvm.register_topi_compute("multiply.vta")
def multiply_packed(cfg, lhs, rhs):
return topi.multiply(lhs, rhs)
def schedule_alu_packed(cfg, outs):
"""alu packed schedule"""
assert len(outs) == 1
def is_cast_op(op):
return op.name == "T_cast"
outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
output = outs[0]
s = te.create_schedule([x.op for x in outs])
te.schedule.AutoInlineInjective(s)
# other target does not support alu-only ops
if not (ENV.TARGET in ["sim", "tsim", "intelfocl"]):
return s
# only put the int-related ops to vta
if "int" in output.dtype and len(output.shape) == 6:
ewise_inputs = []
ewise_ops = []
const_ops = []
def _traverse(op):
if topi.tag.is_broadcast(op.tag):
if not op.same_as(output.op):
if not op.axis:
const_ops.append(op)
elif not is_cast_op(op):
ewise_ops.append(op)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.PlaceholderOp):
ewise_inputs.append((op, tensor))
elif is_cast_op(tensor.op) and not op.same_as(output.op):
ewise_inputs.append((op, tensor))
else:
_traverse(tensor.op)
else:
for tensor in op.input_tensors:
if (not isinstance(tensor.op, tvm.te.PlaceholderOp)) and (
not is_cast_op(tensor.op)
):
_traverse(tensor.op)
op = output.op
_traverse(op)
for _, t in ewise_inputs:
if t.dtype == "float32":
return s
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[output].op.axis
cfg.define_split("tile_co", x_co, num_outputs=2)
cfg.define_split("tile_h", x_i, num_outputs=2)
cfg.define_split("tile_w", x_j, num_outputs=2)
x_co0, x_co1 = cfg["tile_co"].apply(s, output, x_co)
x_i0, x_i1 = cfg["tile_h"].apply(s, output, x_i)
x_j0, x_j1 = cfg["tile_w"].apply(s, output, x_j)
s[output].reorder(x_bo, x_i0, x_co0, x_j0, x_co1, x_i1, x_j1, x_bi, x_ci)
store_pt = x_j0
for e_o in ewise_ops:
s[e_o].set_scope(ENV.acc_scope)
s[e_o].pragma(s[e_o].op.axis[0], ENV.alu)
s[e_o].compute_at(s[output], store_pt)
# cache read input
cache_read_ewise = []
for consumer, tensor in ewise_inputs:
cache_read_ewise.append(s.cache_read(tensor, ENV.acc_scope, [consumer]))
for tensor in cache_read_ewise:
if s[tensor].op.axis:
s[tensor].pragma(s[tensor].op.axis[0], ENV.dma_copy)
s[tensor].compute_at(s[output], store_pt)
for op in const_ops:
s[op].compute_inline()
s[output].pragma(x_co1, ENV.dma_copy)
return s
@autotvm.register_topi_schedule("add.vta")
def schedule_add_packed(cfg, outs):
return schedule_alu_packed(cfg, outs)
@autotvm.register_topi_schedule("multiply.vta")
def schedule_multiply_packed(cfg, outs):
return schedule_alu_packed(cfg, outs)
def add_strategy_vta(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
_strategy.wrap_topi_compute(add_packed),
_strategy.wrap_topi_schedule(schedule_add_packed),
name="add.vta",
)
return strategy
def multiply_strategy_vta(attrs, inputs, out_type, target):
strategy = OpStrategy()
strategy.add_implementation(
_strategy.wrap_topi_compute(multiply_packed),
_strategy.wrap_topi_schedule(schedule_multiply_packed),
name="multiply.vta",
)
return strategy
# other target does not support alu-only ops
if ENV.TARGET in ["sim", "intelfocl"]:
reg.get("add").get_attr("FTVMStrategy").register(add_strategy_vta, "vta")
reg.get("multiply").get_attr("FTVMStrategy").register(multiply_strategy_vta, "vta")
@_strategy.conv2d_strategy.register("vta")
def conv2d_strategy_vta(attrs, inputs, out_type, target):
"""conv2d vta strategy"""
strategy = OpStrategy()
kernel = inputs[1]
dilation = topi.utils.get_const_tuple(attrs.dilation)
groups = attrs.groups
layout = attrs.data_layout
assert dilation == (1, 1), "support for dilation limited to (1, 1)"
if is_packed_layout(layout):
if groups == 1:
assert ENV.LOG_INP_WIDTH == 3, "only support 8bit inp for now"
assert ENV.LOG_WGT_WIDTH == 3, "only support 8bit wgt for now"
assert kernel.dtype == "int8"
strategy.add_implementation(
_strategy.wrap_compute_conv2d(conv2d_packed, need_data_layout=True),
_strategy.wrap_topi_schedule(schedule_conv2d_packed),
name="conv2d_packed.vta",
)
else: # group_conv2d
strategy.add_implementation(
_strategy.wrap_compute_conv2d(group_conv2d_packed, has_groups=True),
_strategy.wrap_topi_schedule(schedule_group_conv2d_packed),
name="group_conv2d_packed.vta",
)
return strategy
# If it's not packed, run on ARM CPU
arm_tgt = tvm.target.arm_cpu(target.model)
return _strategy.arm_cpu.conv2d_strategy_arm_cpu(attrs, inputs, out_type, arm_tgt)
@_strategy.conv2d_transpose_strategy.register("vta")
def conv2d_transpose_strategy_vta(attrs, inputs, out_type, target):
"""conv2d_transpose vta strategy"""
dilation = topi.utils.get_const_tuple(attrs.dilation)
layout = attrs.data_layout
assert dilation == (1, 1), "support for dilation limited to (1, 1)"
if is_packed_layout(layout):
strategy = OpStrategy()
strategy.add_implementation(
_strategy.wrap_compute_conv2d_transpose(conv2d_transpose_packed),
_strategy.wrap_topi_schedule(schedule_conv2d_transpose_packed),
name="conv2d_transpose_packed.vta",
)
return strategy
# If it's not packed, run on ARM CPU
arm_tgt = tvm.target.arm_cpu(target.model)
return _strategy.arm_cpu.conv2d_transpose_strategy_arm_cpu(attrs, inputs, out_type, arm_tgt)
@_strategy.dense_strategy.register("vta")
def dense_strategy_vta(attrs, inputs, out_type, target):
"""dense vta strategy"""
if len(inputs[0].shape) == 4: # this implies the layout is packed
strategy = OpStrategy()
strategy.add_implementation(
_strategy.wrap_compute_dense(dense_packed),
_strategy.wrap_topi_schedule(schedule_dense_packed),
name="dense_packed.vta",
)
return strategy
# If it's not packed, run on ARM CPU
arm_tgt = tvm.target.arm_cpu(target.model)
return _strategy.x86.dense_strategy_cpu(attrs, inputs, out_type, arm_tgt)
| 9,526 | 34.416357 | 96 | py |
tvm | tvm-main/vta/python/vta/top/vta_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Conv2D operator declaration and schedule registration for VTA."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
from .utils import is_packed_layout
from ..environment import get_env
@autotvm.register_topi_compute("conv2d_packed.vta")
def conv2d_packed(cfg, data, kernel, strides, padding, dilation, layout, out_dtype):
"""Packed conv2d function."""
if not is_packed_layout(layout):
raise topi.InvalidShapeError()
assert dilation == (1, 1)
if padding[0]:
pad_data = topi.nn.pad(data, [0, 0, padding[0], padding[1], 0, 0], name="pad_data")
else:
pad_data = data
assert len(data.shape) == 6
assert len(kernel.shape) == 6
oheight = topi.utils.get_const_int((pad_data.shape[2] - kernel.shape[2]) // strides[0] + 1)
owidth = topi.utils.get_const_int((pad_data.shape[3] - kernel.shape[3]) // strides[1] + 1)
oshape = (data.shape[0], kernel.shape[0], oheight, owidth, data.shape[4], kernel.shape[4])
ishape = topi.utils.get_const_tuple(data.shape)
kshape = topi.utils.get_const_tuple(kernel.shape)
d_i = te.reduce_axis((0, kshape[2]), name="d_i")
d_j = te.reduce_axis((0, kshape[3]), name="d_j")
k_o = te.reduce_axis((0, ishape[1]), name="k_o")
k_i = te.reduce_axis((0, ishape[-1]), name="k_i")
hstride, wstride = strides
res = te.compute(
oshape,
lambda b_o, c_o, i, j, b_i, c_i: te.sum(
pad_data[b_o, k_o, i * hstride + d_i, j * wstride + d_j, b_i, k_i].astype(out_dtype)
* kernel[c_o, k_o, d_i, d_j, c_i, k_i].astype(out_dtype),
axis=[k_o, d_i, d_j, k_i],
),
name="res",
tag="conv2d_dense",
)
cfg.add_flop(
2
* np.prod(topi.utils.get_const_tuple(oshape))
* kshape[2]
* kshape[3]
* ishape[1]
* ishape[-1]
)
return res
@autotvm.register_topi_schedule("conv2d_packed.vta")
def schedule_conv2d_packed(cfg, outs):
"""Schedule packed conv2d"""
assert len(outs) == 1
output = outs[0]
const_ops = []
ewise_inputs = []
ewise_ops = []
conv2d_res = []
assert "int" in output.op.input_tensors[0].dtype
def _traverse(op):
if topi.tag.is_broadcast(op.tag):
if not op.same_as(output.op):
if not op.axis:
const_ops.append(op)
else:
ewise_ops.append(op)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.PlaceholderOp):
ewise_inputs.append((op, tensor))
else:
_traverse(tensor.op)
else:
assert op.tag == "conv2d_dense"
conv2d_res.append(op)
_traverse(output.op)
assert len(conv2d_res) == 1
conv2d_stage = conv2d_res[0].output(0)
s = te.create_schedule(output.op)
##### space definition begin #####
b, c_o, x_i, x_j, _, _ = s[conv2d_stage].op.axis
c_i, _, _, _ = s[conv2d_stage].op.reduce_axis
cfg.define_split("tile_b", b, num_outputs=2)
cfg.define_split("tile_h", x_i, num_outputs=2)
cfg.define_split("tile_w", x_j, num_outputs=2)
cfg.define_split("tile_ci", c_i, num_outputs=2)
cfg.define_split("tile_co", c_o, num_outputs=2)
cfg.define_knob("oc_nthread", [1, 2])
cfg.define_knob("h_nthread", [1, 2])
###### space definition end ######
data, kernel = conv2d_stage.op.input_tensors
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
temp = data.op.input_tensors[0]
pad_data = data
data = temp
else:
pad_data = None
env = get_env()
# setup pad
if pad_data is not None:
cdata = pad_data
s[pad_data].set_scope(env.inp_scope)
else:
cdata = s.cache_read(data, env.inp_scope, [conv2d_stage])
ckernel = s.cache_read(kernel, env.wgt_scope, [conv2d_stage])
s[conv2d_stage].set_scope(env.acc_scope)
# cache read input
cache_read_ewise = []
for consumer, tensor in ewise_inputs:
cache_read_ewise.append(s.cache_read(tensor, env.acc_scope, [consumer]))
# set ewise scope
for op in ewise_ops:
s[op].set_scope(env.acc_scope)
s[op].pragma(s[op].op.axis[0], env.alu)
for op in const_ops:
s[op].compute_inline()
# tile
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[output].op.axis
x_co0, x_co1 = cfg["tile_co"].apply(s, output, x_co)
x_i0, x_i1 = cfg["tile_h"].apply(s, output, x_i)
x_j0, x_j1 = cfg["tile_w"].apply(s, output, x_j)
s[output].reorder(x_bo, x_i0, x_co0, x_j0, x_co1, x_i1, x_j1, x_bi, x_ci)
store_pt = x_j0
# set all compute scopes
s[conv2d_stage].compute_at(s[output], store_pt)
for op in ewise_ops:
s[op].compute_at(s[output], store_pt)
for tensor in cache_read_ewise:
s[tensor].compute_at(s[output], store_pt)
s[tensor].pragma(s[tensor].op.axis[0], env.dma_copy)
# virtual threading along output channel axes
if cfg["oc_nthread"].val > 1:
_, v_t = s[output].split(x_co0, factor=cfg["oc_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
# virtual threading along spatial rows
if cfg["h_nthread"].val > 1:
_, v_t = s[output].split(x_i0, factor=cfg["h_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[conv2d_stage].op.axis
k_o, d_i, d_j, k_i = s[conv2d_stage].op.reduce_axis
s[conv2d_stage].reorder(x_bo, k_o, x_j, d_j, d_i, x_co, x_i, x_bi, x_ci, k_i)
k_o, _ = cfg["tile_ci"].apply(s, conv2d_stage, k_o)
s[cdata].compute_at(s[conv2d_stage], k_o)
s[ckernel].compute_at(s[conv2d_stage], k_o)
# Use VTA instructions
s[cdata].pragma(s[cdata].op.axis[0], env.dma_copy)
s[ckernel].pragma(s[ckernel].op.axis[0], env.dma_copy)
s[conv2d_stage].tensorize(x_bi, env.gemm)
s[output].pragma(x_co1, env.dma_copy)
return s
| 6,860 | 33.827411 | 96 | py |
tvm | tvm-main/vta/python/vta/top/vta_conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Conv2D_transpose operator declaration and schedule registration for VTA."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
from tvm.topi.utils import get_const_tuple
from tvm.topi.nn.utils import get_pad_tuple
from ..environment import get_env
@autotvm.register_topi_compute("conv2d_transpose_packed.vta")
def conv2d_transpose_packed(cfg, data, kernel, strides, padding, out_dtype, output_padding=(0, 0)):
"""Packed conv2d_transpose compute"""
ishape = get_const_tuple(data.shape)
kshape = get_const_tuple(kernel.shape)
b, c_i, i_h, i_w, t_b, t_ci = ishape
c_o, _, k_h, k_w, t_co, t_ci = kshape
stride_h, stride_w = strides
opad_h, opad_w = output_padding
# FIXME(tmoreau89): currently IR pass breaks when output padding != (0,0)
assert opad_h == 0 and opad_w == 0, "VTA does not support output padding for now"
# derive padding parameters
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (k_h, k_w))
bpad_top = k_h - 1 - fpad_top
bpad_bottom = k_h - 1 - fpad_bottom + opad_h
bpad_left = k_w - 1 - fpad_left
bpad_right = k_w - 1 - fpad_right + opad_w
# padding stage
dilated_input = topi.nn.dilate(data, [1, 1, stride_h, stride_w, 1, 1])
data_pad = topi.nn.pad(
dilated_input, [0, 0, bpad_top, bpad_left, 0, 0], [0, 0, bpad_bottom, bpad_right, 0, 0]
)
# convolution transpose stage
out_h = (i_h - 1) * stride_h - fpad_top - fpad_bottom + k_h + opad_h
out_w = (i_w - 1) * stride_w - fpad_left - fpad_right + k_w + opad_w
oshape = (b, c_o, out_h, out_w, t_b, t_co)
d_c = te.reduce_axis((0, c_i), name="d_c")
d_h = te.reduce_axis((0, k_h), name="d_h")
d_w = te.reduce_axis((0, k_w), name="d_w")
d_ci = te.reduce_axis((0, t_ci), name="d_ci")
out = te.compute(
oshape,
lambda i_n, i_c, i_h, i_w, j_n, j_c: te.sum(
data_pad(i_n, d_c, i_h + d_h, i_w + d_w, j_n, d_ci).astype(out_dtype)
* kernel[i_c, d_c, d_h, d_w, j_c, d_ci].astype(out_dtype),
axis=[d_c, d_h, d_w, d_ci],
),
tag="packed_conv2d_transpose",
name="res",
)
cfg.add_flop(
2
* np.prod(topi.utils.get_const_tuple(oshape))
* kshape[2]
* kshape[3]
* ishape[1]
* ishape[-1]
)
return out
@autotvm.register_topi_schedule("conv2d_transpose_packed.vta")
def schedule_conv2d_transpose_packed(cfg, outs):
"""Schedule packed conv2d_transpose"""
assert len(outs) == 1
output = outs[0]
ewise_inputs = []
ewise_ops = []
conv2d_res = []
assert output.dtype == "int8"
assert output.op.input_tensors[0].dtype == "int32"
def _traverse(op):
if topi.tag.is_broadcast(op.tag):
if not op.same_as(output.op):
ewise_ops.append(op)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.PlaceholderOp):
ewise_inputs.append((op, tensor))
else:
_traverse(tensor.op)
else:
assert op.tag == "packed_conv2d_transpose"
conv2d_res.append(op)
_traverse(output.op)
assert len(conv2d_res) == 1
conv2d_stage = conv2d_res[0].output(0)
s = te.create_schedule(output.op)
##### space definition begin #####
b, c_o, x_i, x_j, _, c_i = s[conv2d_stage].op.axis
c_i, _, _, _ = s[conv2d_stage].op.reduce_axis
cfg.define_split("tile_b", b, num_outputs=2)
cfg.define_split("tile_h", x_i, num_outputs=2)
cfg.define_split("tile_w", x_j, num_outputs=2)
cfg.define_split("tile_ci", c_i, num_outputs=2)
cfg.define_split("tile_co", c_o, num_outputs=2)
cfg.define_knob("oc_nthread", [1, 2])
cfg.define_knob("h_nthread", [1, 2])
###### space definition end ######
data, kernel = conv2d_stage.op.input_tensors
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
temp = data.op.input_tensors[0]
pad_data = data
data = temp
else:
pad_data = None
env = get_env()
# setup pad
if pad_data is not None:
cdata = pad_data
s[pad_data].set_scope(env.inp_scope)
else:
cdata = s.cache_read(data, env.inp_scope, [conv2d_stage])
ckernel = s.cache_read(kernel, env.wgt_scope, [conv2d_stage])
s[conv2d_stage].set_scope(env.acc_scope)
# cache read input
cache_read_ewise = []
for consumer, tensor in ewise_inputs:
cache_read_ewise.append(s.cache_read(tensor, env.acc_scope, [consumer]))
# set ewise scope
for op in ewise_ops:
s[op].set_scope(env.acc_scope)
s[op].pragma(s[op].op.axis[0], env.alu)
# tile
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[output].op.axis
x_co0, x_co1 = cfg["tile_co"].apply(s, output, x_co)
x_i0, x_i1 = cfg["tile_h"].apply(s, output, x_i)
x_j0, x_j1 = cfg["tile_w"].apply(s, output, x_j)
s[output].reorder(x_bo, x_i0, x_co0, x_j0, x_co1, x_i1, x_j1, x_bi, x_ci)
store_pt = x_j0
# set all compute scopes
s[conv2d_stage].compute_at(s[output], store_pt)
for op in ewise_ops:
s[op].compute_at(s[output], store_pt)
for tensor in cache_read_ewise:
s[tensor].compute_at(s[output], store_pt)
s[tensor].pragma(s[tensor].op.axis[0], env.dma_copy)
# virtual threading along output channel axes
if cfg["oc_nthread"].val > 1:
_, v_t = s[output].split(x_co0, factor=cfg["oc_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
# virtual threading along spatial rows
if cfg["h_nthread"].val > 1:
_, v_t = s[output].split(x_i0, factor=cfg["h_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[conv2d_stage].op.axis
k_o, d_i, d_j, k_i = s[conv2d_stage].op.reduce_axis
x_i, x_ii = s[conv2d_stage].split(x_i, 4)
x_j, x_jj = s[conv2d_stage].split(x_j, 2)
s[conv2d_stage].reorder(x_bo, k_o, x_j, x_co, x_i, x_jj, d_j, d_i, x_ii, x_bi, x_ci, k_i)
for axis in [d_j, d_i, x_ii, x_jj]:
s[conv2d_stage].unroll(axis)
k_o, _ = cfg["tile_ci"].apply(s, conv2d_stage, k_o)
s[cdata].compute_at(s[conv2d_stage], k_o)
s[ckernel].compute_at(s[conv2d_stage], k_o)
# Use VTA instructions
s[cdata].pragma(s[cdata].op.axis[0], env.dma_copy)
s[ckernel].pragma(s[ckernel].op.axis[0], env.dma_copy)
s[conv2d_stage].pragma(x_bi, "conv2d_transpose_gemm")
s[output].pragma(x_co1, env.dma_copy)
return s
| 7,429 | 35.067961 | 99 | py |
tvm | tvm-main/vta/python/vta/top/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA TOPI Utils."""
def is_packed_layout(layout):
"""Check if layout is packed layout"""
if layout == "NCHW":
return False
if "n" in layout and "c" in layout:
return True
return False
| 1,005 | 36.259259 | 62 | py |
tvm | tvm-main/vta/python/vta/top/graphpack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, bad-chained-comparison
"""A Relay implementation of graph packing."""
import tvm
from tvm import relay
from tvm.relay import op, transform
from tvm.relay import ExprMutator
def run_opt_pass(expr, opt_pass):
"""Exectue a relay pass."""
assert isinstance(opt_pass, tvm.transform.Pass)
mod = tvm.IRModule.from_expr(expr)
mod = opt_pass(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def _to_shape(shape):
"""convert shape into tuple."""
return tuple(int(sh) for sh in shape)
def _pack_batch_channel(data, dshape, bfactor, cfactor):
"""Pack the data channel dimension."""
assert int(dshape[0]) % bfactor == 0
assert int(dshape[1]) % cfactor == 0
data = op.reshape(
data,
newshape=(
int(dshape[0]) // bfactor,
bfactor,
int(dshape[1]) // cfactor,
cfactor,
int(dshape[2]),
int(dshape[3]),
),
)
data = op.transpose(data, axes=(0, 2, 4, 5, 1, 3))
return data
def _unpack_batch_channel(data, old_shape, unpack_transpose=False):
"""Unpack the data channel dimension."""
if unpack_transpose:
data = op.transpose(data, axes=(0, 4, 1, 5, 2, 3))
data = op.reshape(data, newshape=old_shape)
return data
def _channel_const_match(channel_length, cfactor_out):
"""Round the channel const variant if the value not divisible by cfactor_out"""
diff = int(channel_length) % cfactor_out
if diff != 0:
diff = cfactor_out - diff
channel_length = channel_length + diff
return diff, channel_length
def _const_shape_match(data, dshape, cfactor_out):
"""Pad the constant if the shape[0] not divisible by cfactor_out."""
assert len(dshape) == 3
pad_width = int(dshape[0]) % cfactor_out
if pad_width != 0:
pad_width = cfactor_out - pad_width
data = op.nn.pad(data, [[0, pad_width], [0, 0], [0, 0]])
dshape = tuple([dshape[0] + pad_width, dshape[1], dshape[2]])
return data, dshape
def _weight_shape_match(data, dshape, channels, cfactor_out, transpose=False):
"""Pad the weight if the shape[0] not divisible by cfactor_out."""
assert len(dshape) == 4
pad_width = int(dshape[0]) % cfactor_out
channels_pad = int(channels) % cfactor_out
if pad_width != 0:
pad_width = cfactor_out - pad_width
data = op.nn.pad(data, [[0, pad_width], [0, 0], [0, 0], [0, 0]])
dshape = tuple([dshape[0] + pad_width, dshape[1], dshape[2], dshape[3]])
if channels_pad != 0:
channels = channels + (cfactor_out - channels_pad)
return data, dshape, channels
def _weight_shape_match_transpose(data, dshape, channels, cfactor_out):
"""Pad the weight if the shape[1] not divisible by cfactor_out."""
assert len(dshape) == 4
pad_width = int(dshape[1]) % cfactor_out
channels_pad = int(channels) % cfactor_out
if pad_width != 0:
pad_width = cfactor_out - pad_width
data = op.nn.pad(data, [[0, 0], [0, pad_width], [0, 0], [0, 0]])
dshape = tuple(dshape[0], [dshape[1] + pad_width, dshape[2], dshape[3]])
if channels_pad != 0:
channels = channels + (cfactor_out - channels_pad)
return data, dshape, channels
def _pack_weight(data, dshape, cfactor):
"""Pack the weight into packed format."""
assert len(dshape) == 4
assert int(dshape[0]) % cfactor == 0
assert int(dshape[1]) % cfactor == 0
data = op.reshape(
data,
newshape=(
int(dshape[0]) // cfactor,
cfactor,
int(dshape[1]) // cfactor,
cfactor,
int(dshape[2]),
int(dshape[3]),
),
)
data = op.transpose(data, axes=(0, 2, 4, 5, 1, 3))
return data
def _pack_weight_conv2d_transpose(data, dshape, cfactor):
"""Pack the weight into packed format."""
dshape = _to_shape(dshape)
assert len(dshape) == 4
assert dshape[0] % cfactor == 0
assert dshape[1] % cfactor == 0
data = op.reshape(
data,
newshape=(
dshape[0] // cfactor,
cfactor,
dshape[1] // cfactor,
cfactor,
dshape[2],
dshape[3],
),
)
data = op.transpose(data, axes=(2, 0, 4, 5, 3, 1))
return data
def _pack_const(data, dshape, dtype, bfactor, cfactor):
"""Pack a constant parameter."""
dshape = _to_shape(dshape)
assert len(dshape) == 3
assert dshape[0] % cfactor == 0
data = op.reshape(data, newshape=(dshape[0] // cfactor, cfactor, dshape[1], dshape[2], 1))
data = op.transpose(data, axes=(0, 2, 3, 4, 1))
# broadcast batch dimension to bfactor
data = op.broadcast_to(
data, shape=(dshape[0] // cfactor, dshape[1], dshape[2], bfactor, cfactor)
)
return data
def _get_tensor_shape(node):
"""Get node shape."""
if isinstance(node.checked_type, relay.ty.TensorType):
return _to_shape(node.checked_type.shape)
return []
def _get_tensor_type(node):
"""Get node type."""
if isinstance(node.checked_type, relay.ty.TensorType):
return node.checked_type.dtype
return "float32"
def _operator_idx_inc(expr, count_meta, operator_current_idx):
"""Increase operator index"""
if isinstance(expr, relay.expr.Constant):
operator_current_idx = operator_current_idx + 1 if count_meta else operator_current_idx
else:
operator_current_idx = operator_current_idx + 1
return operator_current_idx
class ExprDeviceAnnot(ExprMutator):
"""Visitor to perform graph annotation on an AST.
Parameters
----------
start: int
the start location to mark run on vta (inclusive)
end: int
the end location to mark run on vta (exclusive)
Returns
---------
None
"""
def __init__(self, start=-1, end=-1):
self.ext_dev = tvm.device("ext_dev")
self.cpu_dev = tvm.device("cpu")
self.cast = op.op.get("cast")
self.counter = -1
self.start = start
self.end = end
super().__init__()
def visit_call(self, call):
"""Visit the children."""
# First visit the children.
args = [self.visit(arg) for arg in call.args]
self.counter += 1
if self.counter == self.start:
ret = relay.Call(call.op, args, call.attrs)
ret = relay.annotation.on_device(ret, self.ext_dev)
return ret
if self.counter == self.end:
ret = relay.Call(call.op, args, call.attrs)
ret = relay.annotation.on_device(ret, self.cpu_dev)
return ret
if self.counter > self.start and self.counter < self.end:
ret = relay.Call(call.op, args, call.attrs)
# skip the float op, i.e., float->int cast
if self.is_float_op(call):
return ret
return relay.annotation.on_device(ret, self.ext_dev)
return relay.Call(self.visit(call.op), args, call.attrs)
def is_float_op(self, call):
"""check if this op belongs to a float op
in general, float op's odtype is float;
a special case is float->int cast, which follow this op sequence:
multiply(float) -> round(float) -> clip(float) -> cast(int);
"""
args = call.args
odtype = _get_tensor_type(call)
if odtype == "float32":
return True
if call.op == self.cast:
idtype = _get_tensor_type(args[0])
if idtype == "float32":
return True
return False
class ExprLocator(ExprMutator):
"""Visitor to locate op on an AST."""
def __init__(self):
self.counter = -1
self.op2nodes = {}
super().__init__()
def visit_call(self, call):
"""Visit the children."""
# First visit the children.
args = [self.visit(arg) for arg in call.args]
odtype = _get_tensor_type(call)
self.counter += 1
if (call.op, odtype) in self.op2nodes:
self.op2nodes[(call.op, odtype)].append(self.counter)
else:
self.op2nodes[(call.op, odtype)] = [self.counter]
return relay.Call(self.visit(call.op), args, call.attrs)
class ExprPack(ExprMutator):
"""Visitor to perform graph packing on an AST."""
def __init__(self, bfactor, cfactor, weight_bits):
self.bfactor = bfactor
self.cfactor = cfactor
self.weight_bits = weight_bits
self.start_pack = False
# Cache Operator the algorithm matches against.
self.bitpack_start = op.op.get("annotation.bitpack_start")
self.bitpack_end = op.op.get("annotation.bitpack_end")
self.conv2d = op.op.get("nn.conv2d")
self.conv2d_transpose = op.op.get("nn.conv2d_transpose")
self.add = op.op.get("add")
self.multiply = op.op.get("multiply")
self.bias_add = op.op.get("nn.bias_add")
self.pad = op.op.get("nn.pad")
self.upsampling = op.op.get("nn.upsampling")
self.reshape = op.op.get("reshape")
self.number_of_conv2d = 0
self.unpack_transpose = True
super().__init__()
def visit_call(self, call):
"""Visit the children."""
# First visit the children.
oshape = _get_tensor_shape(call)
odtype = _get_tensor_type(call)
input_types = [arg.checked_type for arg in call.args]
args = [self.visit(arg) for arg in call.args]
# Start and stop cases.
if call.op == self.bitpack_start:
assert not self.start_pack
self.start_pack = True
return _pack_batch_channel(args[0], oshape, self.bfactor, self.cfactor)
if call.op == self.bitpack_end:
if self.start_pack:
self.start_pack = False
data = args[0]
data_shape = _get_tensor_shape(call.args[0])
return _unpack_batch_channel(data, data_shape, self.unpack_transpose)
if self.start_pack:
# Operator cases
if call.op == self.conv2d and odtype == "int32":
self.number_of_conv2d += 1
assert 8 % self.weight_bits == 0
w_lanes = 8 // self.weight_bits
data_layout = "NCHW%dn%dc" % (self.bfactor, self.cfactor)
kernel_layout = "OIHW%do%di" % (self.cfactor, self.cfactor)
data, weight = args
data_shape = _to_shape(input_types[0].shape)
kernel_shape = _to_shape(input_types[1].shape)
channels = call.attrs.channels
weight, kernel_shape, channels = _weight_shape_match(
weight, kernel_shape, channels, self.cfactor
)
kernel = _pack_weight(weight, kernel_shape, self.cfactor)
# insert bit packing when necessary
if w_lanes != 1:
assert 8 % w_lanes == 0
kernel = op.bitpack(kernel, lanes=w_lanes)
conv2d = op.nn.conv2d(
data,
kernel,
strides=call.attrs.strides,
padding=call.attrs.padding,
dilation=call.attrs.dilation,
groups=call.attrs.groups,
channels=channels,
kernel_size=call.attrs.kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
out_dtype=call.attrs.out_dtype,
)
return conv2d
if call.op == self.conv2d_transpose and odtype == "int32":
self.number_of_conv2d += 1
assert 8 % self.weight_bits == 0
w_lanes = 8 // self.weight_bits
if self.start_pack:
data_layout = "NCHW%dn%dc" % (self.bfactor, self.cfactor)
kernel_layout = "IOHW%di%do" % (self.cfactor, self.cfactor)
data, weight = args
data_shape = _to_shape(input_types[0].shape)
kernel_shape = _to_shape(input_types[1].shape)
channels = call.attrs.channels
weight, kernel_shape, channels = _weight_shape_match_transpose(
weight, kernel_shape, channels, self.cfactor
)
kernel = _pack_weight_conv2d_transpose(weight, kernel_shape, self.cfactor)
conv2d = op.nn.conv2d_transpose(
data,
kernel,
strides=call.attrs.strides,
padding=call.attrs.padding,
dilation=call.attrs.dilation,
groups=call.attrs.groups,
channels=call.attrs.channels,
kernel_size=call.attrs.kernel_size,
data_layout=data_layout,
kernel_layout=kernel_layout,
output_padding=call.attrs.output_padding,
out_dtype=call.attrs.out_dtype,
)
return conv2d
if call.op == self.add and tuple(input_types[0].shape) == tuple(input_types[1].shape):
pass
elif call.op == self.add and len(input_types[1].shape) == 3:
data, const = args
const, input_shape = _const_shape_match(const, input_types[1].shape, self.cfactor)
const = _pack_const(
const, _to_shape(input_shape), input_types[1].dtype, self.bfactor, self.cfactor
)
return relay.Call(self.add, [data, const])
elif call.op == self.multiply and tuple(input_types[0].shape) == tuple(
input_types[1].shape
):
pass
elif call.op == self.multiply and len(input_types[1].shape) == 3:
data, const = args
const = _pack_const(
const,
_to_shape(input_types[1].shape),
input_types[1].dtype,
self.bfactor,
self.cfactor,
)
return relay.Call(self.multiply, [data, const])
elif self.start_pack and call.op == self.bias_add:
data, bias = args
bias = _pack_const(
bias,
_to_shape(input_types[1].shape),
input_types[1].dtype,
self.bfactor,
self.cfactor,
)
return relay.Call(self.add, [data, bias])
elif (
self.start_pack and call.op == op.op.get("cast") and input_types[0].dtype == "int32"
):
cast = relay.Call(op.op.get("cast"), [args[0]], call.attrs)
return cast
elif call.op == self.pad:
pad_width = call.attrs.pad_width
if len(pad_width) == 6:
pass
elif len(pad_width) == 4:
(data, pad_value) = args
new_pad_width = []
new_pad_width.extend(pad_width)
for _ in range(2):
new_pad_width.append([0, 0])
return op.nn.pad(data, pad_value=pad_value, pad_width=new_pad_width)
elif call.op == self.upsampling:
(data,) = args
scale_h = call.attrs.scale_h
scale_w = call.attrs.scale_w
data_layout = "NCHW%dn%dc" % (self.bfactor, self.cfactor)
method = call.attrs.method
align_corners = call.attrs.align_corners
return op.nn.upsampling(data, scale_h, scale_w, data_layout, method, align_corners)
elif call.op == self.reshape and len(input_types[0].shape) == 4:
(data,) = args
self.unpack_transpose = False
data = op.transpose(data, axes=(0, 4, 1, 5, 2, 3))
new_shape = [int(x) for x in input_types[0].shape]
# Check if the reshape match with such shape after pad
pad, new_shape[1] = _channel_const_match(new_shape[1], self.cfactor)
data = op.reshape(data, new_shape)
# remove pad data
if pad != 0:
new_pad_width = [[0, 0], [0, -pad], [0, 0], [0, 0]]
data = op.nn.pad(data, pad_width=new_pad_width)
return data
return relay.Call(self.visit(call.op), args, call.attrs)
class BT(Exception):
pass
def get_subgraph(expr, start_name, stop_name, start_name_idx, stop_name_idx, count_meta):
"""We assume stop_name only appears once for simplicity.
This constraint will be lifted in the future.
bitpack_start and bitpack_end are both inclusive.
"""
bitpack_start = op.op.get("annotation.bitpack_start")
bitpack_end = op.op.get("annotation.bitpack_end")
anf = run_opt_pass(expr, transform.ToANormalForm())
operator_current_idx = 0
def _recursion(anf, start_found, stop_found, operator_current_idx):
"""Helper to obtain the subgraph."""
if isinstance(anf, relay.Function):
return relay.Function(
anf.params,
_recursion(anf.body, start_found, stop_found, operator_current_idx),
anf.ret_type,
anf.type_params,
anf.attrs,
)
if isinstance(anf, relay.expr.Let):
value = anf.value
if isinstance(value, relay.expr.Call):
if isinstance(value.op, tvm.ir.Op):
if value.op.name == start_name and not start_found:
if operator_current_idx == start_name_idx or start_name_idx is None:
value = relay.expr.Call(bitpack_start, [value])
start_found = True
elif value.op.name == stop_name:
if operator_current_idx == stop_name_idx or stop_name_idx is None:
raise BT()
operator_current_idx = _operator_idx_inc(value, count_meta, operator_current_idx)
try:
return relay.expr.Let(
anf.var,
value,
_recursion(anf.body, start_found, stop_found, operator_current_idx),
)
except BT:
assert start_found
assert not stop_found
stop_found = True
value = relay.expr.Call(bitpack_end, [value])
# todo: check anf.body has no more stop_name beside that one
return relay.expr.Let(anf.var, value, anf.body)
else:
assert start_found
assert stop_found
return anf
annotated = _recursion(anf, False, False, operator_current_idx)
return run_opt_pass(annotated, transform.ToGraphNormalForm())
def graph_pack(
expr,
bfactor,
cfactor,
weight_bits,
start_name="nn.max_pool2d",
stop_name="nn.global_avg_pool2d",
start_name_idx=None,
stop_name_idx=None,
count_meta=False,
device_annot=False,
annot_start_name="nn.conv2d",
annot_end_name="annotation.stop_fusion",
):
"""Pack the graph into batch&channel packed format.
Parameters
----------
expr : relay.Expr
The input program.
bfactor : int
The packing factor in batch
cfactor : int
The packing factor in channel
weight_bits: int
The bit-width of the weights.
start_name: str, optional
Start packing from certain known node when start_name_idx is None.
stop_name: str, optional
Stop packing from certain known node when stop_name_idx is None.
start_name_idx: int, optional
When start_name_idx not None, start packing only when node name equal start_name
and node idx equals start_name_idx.
stop_name_idx: int, optional
When stop_name_idx not None, stop packing only when node name equal stop_name
and node index equals stop_name_idx.
count_meta:boolean, optional
When count_meta is False, the operator increase logic would not count the meta that have
the type 'relay.expr.Constant', start_name_idx and stop_name_idx follow the index from
'expr.astext(show_meta_data=False)'. When count_meta is True, the operator increase
logic would count the meta.
device_annot: boolean, optional
if we want to annoate the device_type
annot_start_name: str, optional
device annotation start node, from which we mark the nodes as `ext_dev`
annot_end_name: str, optional
device annotation end node, after which we mark the nodes as 'cpu'
Returns
-------
expr : Expr
The transformed expression.
"""
assert isinstance(expr, relay.Function)
assert (
(start_name != stop_name)
or (start_name_idx is None != stop_name_idx is None)
or (not (start_name_idx is None and stop_name_idx is None))
or (start_name_idx < stop_name_idx)
)
expr = get_subgraph(expr, start_name, stop_name, start_name_idx, stop_name_idx, count_meta)
expr = run_opt_pass(expr, transform.InferType())
packer = ExprPack(bfactor, cfactor, weight_bits)
expr = packer.visit(expr)
assert not packer.start_pack
expr = run_opt_pass(expr, transform.InferType())
if device_annot:
expr_locator = ExprLocator()
expr_locator.visit(expr)
annot_start = op.op.get(annot_start_name)
start = expr_locator.op2nodes[(annot_start, "int32")][0]
annot_end = op.op.get(annot_end_name)
# we mark the next op to the last stop_fusion on cpu device
end = expr_locator.op2nodes[(annot_end, "int8")][-1] + 1
device_annot = ExprDeviceAnnot(start=start, end=end)
expr = device_annot.visit(expr)
return run_opt_pass(expr, transform.InferType())
return expr
| 23,085 | 35.702703 | 100 | py |
tvm | tvm-main/vta/python/vta/top/vta_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""Dense operator declaration and schedule registration for VTA."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
from ..environment import get_env
def is_packed_layout(layout):
"""Check if layout is packed layout"""
if layout == "NCHW":
return False
if "n" in layout and "c" in layout:
return True
return False
@autotvm.register_topi_compute("dense_packed.vta")
def dense_packed(cfg, data, weight, bias=None, out_dtype=None):
"""Dense function declaration."""
# Make sure that the dense operator is packed
if len(data.shape) != 4 or len(weight.shape) != 4:
raise topi.InvalidShapeError()
# Derive shapes
ishape = topi.utils.get_const_tuple(data.shape)
wshape = topi.utils.get_const_tuple(weight.shape)
oshape = (data.shape[0], weight.shape[0], data.shape[2], weight.shape[2])
# Reduction axes (input channel)
assert ishape[1] == wshape[1]
assert ishape[3] == wshape[3]
k_o = te.reduce_axis((0, ishape[1]), name="k_o")
k_i = te.reduce_axis((0, ishape[3]), name="k_i")
res = te.compute(
oshape,
lambda b_o, c_o, b_i, c_i: te.sum(
data[b_o, k_o, b_i, k_i].astype(out_dtype)
* weight[c_o, k_o, c_i, k_i].astype(out_dtype),
axis=[k_o, k_i],
),
name="res",
tag="dense_pack",
)
cfg.add_flop(2 * np.prod(topi.utils.get_const_tuple(oshape)) * ishape[1] * ishape[3])
return res
@autotvm.register_topi_schedule("dense_packed.vta")
def schedule_dense_packed(cfg, outs):
"""Packed dense schedule."""
assert len(outs) == 1
output = outs[0]
const_ops = []
ewise_inputs = []
ewise_ops = []
dense_res = []
assert "int" in output.op.input_tensors[0].dtype
def _traverse(op):
if topi.tag.is_broadcast(op.tag):
if not op.same_as(output.op):
if not op.axis:
const_ops.append(op)
else:
ewise_ops.append(op)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.PlaceholderOp):
ewise_inputs.append((op, tensor))
else:
_traverse(tensor.op)
else:
assert op.tag == "dense_pack"
dense_res.append(op)
_traverse(output.op)
assert len(dense_res) == 1
dense_stage = dense_res[0].output(0)
s = te.create_schedule(output.op)
##### space definition begin #####
b, c_o, _, _ = s[dense_stage].op.axis
c_i, _ = s[dense_stage].op.reduce_axis
cfg.define_split("tile_b", b, num_outputs=2)
cfg.define_split("tile_ci", c_i, num_outputs=2)
cfg.define_split("tile_co", c_o, num_outputs=2)
cfg.define_knob("oc_nthread", [1, 2])
###### space definition end ######
data, weight = dense_stage.op.input_tensors
env = get_env()
cdata = s.cache_read(data, env.inp_scope, [dense_stage])
cweight = s.cache_read(weight, env.wgt_scope, [dense_stage])
s[dense_stage].set_scope(env.acc_scope)
# cache read input
cache_read_ewise = []
for consumer, tensor in ewise_inputs:
cache_read_ewise.append(s.cache_read(tensor, env.acc_scope, [consumer]))
# set ewise scope
for op in ewise_ops:
s[op].set_scope(env.acc_scope)
s[op].pragma(s[op].op.axis[0], env.alu)
for op in const_ops:
s[op].compute_inline()
# apply tiling for SRAM reuse
x_b, x_c, _, _ = s[output].op.axis
x_bo, x_bi = cfg["tile_b"].apply(s, output, x_b)
x_co, x_ci = cfg["tile_co"].apply(s, output, x_c)
s[output].reorder(x_bo, x_co, x_bi, x_ci)
store_pt = x_co
# set all compute scopes
s[dense_stage].compute_at(s[output], store_pt)
for op in ewise_ops:
s[op].compute_at(s[output], store_pt)
for tensor in cache_read_ewise:
s[tensor].compute_at(s[output], store_pt)
s[tensor].pragma(s[tensor].op.axis[0], env.dma_copy)
# virtual threading along output channel axes
if cfg["oc_nthread"].val > 1:
_, v_t = s[output].split(x_co, factor=cfg["oc_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
x_bo, x_co, x_bi, _ = s[dense_stage].op.axis
k_o, _ = s[dense_stage].op.reduce_axis
s[dense_stage].reorder(x_bo, k_o, x_co)
k_o, _ = cfg["tile_ci"].apply(s, dense_stage, k_o)
s[cdata].compute_at(s[dense_stage], k_o)
s[cweight].compute_at(s[dense_stage], k_o)
# Use VTA instructions
s[cdata].pragma(s[cdata].op.axis[0], env.dma_copy)
s[cweight].pragma(s[cweight].op.axis[0], env.dma_copy)
s[dense_stage].tensorize(x_bi, env.gemm)
s[output].pragma(x_ci, env.dma_copy)
return s
| 5,611 | 31.627907 | 89 | py |
tvm | tvm-main/vta/python/vta/top/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""TVM TOPI connector, eventually most of these should go to TVM repo"""
from . import bitpack
from .graphpack import graph_pack
from . import op
from .vta_conv2d import conv2d_packed, schedule_conv2d_packed
from .vta_conv2d_transpose import conv2d_transpose_packed, schedule_conv2d_transpose_packed
from .vta_group_conv2d import group_conv2d_packed, schedule_group_conv2d_packed
from .vta_dense import dense_packed, schedule_dense_packed
from . import utils
| 1,246 | 43.535714 | 91 | py |
tvm | tvm-main/vta/python/vta/top/vta_group_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Group conv2D operator declaration and schedule registration for VTA."""
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
from ..environment import get_env
@autotvm.register_topi_compute("group_conv2d_packed.vta")
def group_conv2d_packed(cfg, data, kernel, strides, padding, dilation, group, out_dtype):
"""Packed group conv2d nchw function."""
assert dilation == (1, 1)
if padding[0]:
pad_data = topi.nn.pad(data, [0, 0, padding[0], padding[1], 0, 0], name="pad_data")
else:
pad_data = data
assert len(data.shape) == 6
assert len(kernel.shape) == 6
assert data.dtype == "int8", data.dtype
assert kernel.dtype == "int8", kernel.dtype
assert out_dtype == "int32", out_dtype
oheight = topi.utils.get_const_int((pad_data.shape[2] - kernel.shape[2]) // strides[0] + 1)
owidth = topi.utils.get_const_int((pad_data.shape[3] - kernel.shape[3]) // strides[1] + 1)
oshape = (data.shape[0], kernel.shape[0], oheight, owidth, data.shape[4], kernel.shape[4])
ishape = topi.utils.get_const_tuple(data.shape)
kshape = topi.utils.get_const_tuple(kernel.shape)
assert group * kshape[1] == ishape[1]
assert kshape[0] % group == 0
d_i = te.reduce_axis((0, kshape[2]), name="d_i")
d_j = te.reduce_axis((0, kshape[3]), name="d_j")
k_o = te.reduce_axis((0, kshape[1]), name="k_o")
k_i = te.reduce_axis((0, kshape[-1]), name="k_i")
hstride, wstride = strides
out = te.compute(
oshape,
lambda b_o, c_o, i, j, b_i, c_i: te.sum(
pad_data[
b_o,
c_o // (kshape[0] // group) * kshape[1] + k_o,
i * hstride + d_i,
j * wstride + d_j,
b_i,
k_i,
].astype(out_dtype)
* kernel[c_o, k_o, d_i, d_j, c_i, k_i].astype(out_dtype),
axis=[k_o, d_i, d_j, k_i],
),
name="res",
tag="packed_group_conv2d",
)
cfg.add_flop(
2
* np.prod(topi.utils.get_const_tuple(oshape))
* kshape[2]
* kshape[3]
* ishape[1]
* kshape[-1]
)
return out
@autotvm.register_topi_schedule("group_conv2d_packed.vta")
def schedule_group_conv2d_packed(cfg, outs):
"""Schedule the packed conv2d."""
assert len(outs) == 1
output = outs[0]
const_ops = []
ewise_inputs = []
ewise_ops = []
conv2d_res = []
assert output.dtype == "int8"
assert output.op.input_tensors[0].dtype == "int32"
def _traverse(op):
if topi.tag.is_broadcast(op.tag):
if not op.same_as(output.op):
if not op.axis:
const_ops.append(op)
else:
ewise_ops.append(op)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.te.PlaceholderOp):
ewise_inputs.append((op, tensor))
else:
_traverse(tensor.op)
else:
assert op.tag == "packed_group_conv2d"
conv2d_res.append(op)
_traverse(output.op)
assert len(conv2d_res) == 1
conv2d_stage = conv2d_res[0].output(0)
s = te.create_schedule(output.op)
##### space definition begin #####
b, c_o, x_i, x_j, _, _ = s[conv2d_stage].op.axis
c_i, _, _, _ = s[conv2d_stage].op.reduce_axis
cfg.define_split("tile_b", b, num_outputs=2)
cfg.define_split("tile_h", x_i, num_outputs=2)
cfg.define_split("tile_w", x_j, num_outputs=2)
cfg.define_split("tile_ci", c_i, num_outputs=2)
cfg.define_split("tile_co", c_o, num_outputs=2)
cfg.define_knob("oc_nthread", [1, 2])
cfg.define_knob("h_nthread", [1, 2])
###### space definition end ######
data, kernel = conv2d_stage.op.input_tensors
if isinstance(data.op, tvm.te.ComputeOp) and "pad" in data.op.tag:
temp = data.op.input_tensors[0]
pad_data = data
data = temp
else:
pad_data = None
env = get_env()
# setup pad
if pad_data is not None:
cdata = pad_data
s[pad_data].set_scope(env.inp_scope)
else:
cdata = s.cache_read(data, env.inp_scope, [conv2d_stage])
ckernel = s.cache_read(kernel, env.wgt_scope, [conv2d_stage])
s[conv2d_stage].set_scope(env.acc_scope)
# cache read input
cache_read_ewise = []
for consumer, tensor in ewise_inputs:
cache_read_ewise.append(s.cache_read(tensor, env.acc_scope, [consumer]))
# set ewise scope
for op in ewise_ops:
s[op].set_scope(env.acc_scope)
s[op].pragma(s[op].op.axis[0], env.alu)
for op in const_ops:
s[op].compute_inline()
# tile
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[output].op.axis
x_co0, x_co1 = cfg["tile_co"].apply(s, output, x_co)
x_i0, x_i1 = cfg["tile_h"].apply(s, output, x_i)
x_j0, x_j1 = cfg["tile_w"].apply(s, output, x_j)
s[output].reorder(x_bo, x_i0, x_co0, x_j0, x_co1, x_i1, x_j1, x_bi, x_ci)
store_pt = x_j0
# set all compute scopes
s[conv2d_stage].compute_at(s[output], store_pt)
for op in ewise_ops:
s[op].compute_at(s[output], store_pt)
for tensor in cache_read_ewise:
s[tensor].compute_at(s[output], store_pt)
s[tensor].pragma(s[tensor].op.axis[0], env.dma_copy)
# virtual threading along output channel axes
if cfg["oc_nthread"].val > 1:
_, v_t = s[output].split(x_co0, factor=cfg["oc_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
# virtual threading along spatial rows
if cfg["h_nthread"].val > 1:
_, v_t = s[output].split(x_i0, factor=cfg["h_nthread"].val)
s[output].reorder(v_t, x_bo)
s[output].bind(v_t, te.thread_axis("cthread"))
x_bo, x_co, x_i, x_j, x_bi, x_ci = s[conv2d_stage].op.axis
k_o, d_i, d_j, k_i = s[conv2d_stage].op.reduce_axis
s[conv2d_stage].reorder(x_bo, k_o, x_j, d_j, d_i, x_co, x_i, x_bi, x_ci, k_i)
k_o, _ = cfg["tile_ci"].apply(s, conv2d_stage, k_o)
s[cdata].compute_at(s[conv2d_stage], k_o)
s[ckernel].compute_at(s[conv2d_stage], k_o)
# Use VTA instructions
s[cdata].pragma(s[cdata].op.axis[0], env.dma_copy)
s[ckernel].pragma(s[ckernel].op.axis[0], env.dma_copy)
s[conv2d_stage].tensorize(x_bi, env.gemm)
s[output].pragma(x_co1, env.dma_copy)
return s
| 7,208 | 33.658654 | 95 | py |
tvm | tvm-main/vta/python/vta/top/bitpack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=ungrouped-imports, unsupported-binary-operation
"""Bit packing operators"""
from __future__ import absolute_import as _abs
import tvm
from tvm import te
from tvm.topi import utils
from tvm.relay.op.op import register_compute, register_injective_schedule
from tvm.relay.op.op import register_pattern, OpPattern
def bitpack(data, bits, pack_type="int8", name="bitpack"):
"""Packs lowest dimension into format needed by VTA
Parameters
----------
pack_axis : int
index of the axis to pack in data
bit_axis : int
index of axis to place bit axis in resulting packed data
Returns
-------
packed : Tensor
The packed tensor.
"""
shape_vec = list(data.shape)
if pack_type == "int8":
data_width = 8
elif pack_type == "int16":
data_width = 16
elif pack_type == "int32":
data_width = 32
else:
raise RuntimeError("Unknown pack type %s" % pack_type)
assert data_width % bits == 0
lanes = data_width // bits
# Data must be in multiples of the data_width
assert utils.get_const_int(shape_vec[-1]) % lanes == 0, "Not a multiple of word size"
shape_vec[-1] = shape_vec[-1] // lanes
oshape = tuple(shape_vec)
def _bitpack(*indices):
ret = None
mask = tvm.tir.const((1 << bits) - 1, pack_type)
for k in range(lanes):
idx = list(indices)
idx[-1] = idx[-1] * lanes + k
elem = data(*idx).astype(pack_type)
if k == 0:
ret = elem & mask
else:
val = (elem & mask) << tvm.tir.const(k * bits, pack_type)
ret = ret | val
return ret
return te.compute(oshape, _bitpack, name=name, tag="bitpack")
@register_compute("bitpack", level=15)
def compute_bitpack(attrs, inputs):
lanes = attrs.lanes
dtype = inputs[0].dtype
assert dtype == "int8"
width = 8
assert width % lanes == 0
bits = 8 // lanes
return bitpack(inputs[0], bits, dtype)
register_injective_schedule("bitpack")
register_pattern("bitpack", OpPattern.INJECTIVE)
| 2,916 | 30.706522 | 89 | py |
tvm | tvm-main/vta/python/vta/testing/simulator.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=superfluous-parens
"""Utilities to start simulator."""
import ctypes
import json
import warnings
import tvm
from ..environment import get_env
from ..libinfo import find_libvta
def _load_sw():
"""Load hardware library for simulator."""
env = get_env()
lib_driver_name = (
"libvta_tsim"
if env.TARGET == "tsim"
else "libvta"
if env.TARGET == "intelfocl"
else "libvta_fsim"
)
require_sim = env.TARGET in ("sim", "tsim")
libs = []
# Load driver library
lib_driver = find_libvta(lib_driver_name, optional=(not require_sim))
if not lib_driver:
return []
try:
libs = [ctypes.CDLL(lib_driver[0], ctypes.RTLD_GLOBAL)]
except OSError as err:
if require_sim:
raise err
warnings.warn("Error when loading VTA driver {}: {}".format(lib_driver[0], err))
return []
if env.TARGET == "tsim":
lib_hw = find_libvta("libvta_hw", optional=True)
assert lib_hw # make sure to make in ${VTA_HW_PATH}/hardware/chisel
f = tvm.get_global_func("vta.tsim.init")
m = tvm.runtime.load_module(lib_hw[0], "vta-tsim")
f(m)
return lib_hw
return libs
def enabled():
"""Check if simulator is enabled."""
f = tvm.get_global_func("vta.simulator.profiler_clear", True)
return f is not None
def clear_stats():
"""Clear profiler statistics."""
env = get_env()
if env.TARGET == "sim":
f = tvm.get_global_func("vta.simulator.profiler_clear", True)
else:
f = tvm.get_global_func("vta.tsim.profiler_clear", True)
if f:
f()
def stats():
"""Get profiler statistics
Returns
-------
stats : dict
Current profiler statistics
"""
env = get_env()
if env.TARGET == "sim":
x = tvm.get_global_func("vta.simulator.profiler_status")()
else:
x = tvm.get_global_func("vta.tsim.profiler_status")()
return json.loads(x)
# debug flag to skip execution.
DEBUG_SKIP_EXEC = 1
def debug_mode(flag):
"""Set debug mode
Paramaters
----------
flag : int
The debug flag, 0 means clear all flags.
"""
tvm.get_global_func("vta.simulator.profiler_debug_mode")(flag)
LIBS = _load_sw()
| 3,073 | 25.964912 | 88 | py |
tvm | tvm-main/vta/python/vta/testing/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test Utilities"""
from __future__ import absolute_import as _abs
import os
from tvm import rpc, autotvm
from ..environment import get_env
from . import simulator
def run(run_func):
"""Run test function on all available env.
Parameters
----------
run_func : function(env, remote)
"""
env = get_env()
if env.TARGET in ["sim", "tsim", "intelfocl"]:
# Talk to local RPC if necessary to debug RPC server.
# Compile vta on your host with make at the root.
# Make sure TARGET is set to "sim" in the config.json file.
# Then launch the RPC server on the host machine
# with ./apps/vta_rpc/start_rpc_server.sh
# Set your VTA_LOCAL_SIM_RPC environment variable to
# the port it's listening to, e.g. 9090
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
remote = rpc.connect("127.0.0.1", local_rpc)
run_func(env, remote)
else:
# Make sure simulation library exists
# If this fails, build vta on host (make)
# with TARGET="sim" in the json.config file.
if env.TARGET == "sim":
assert simulator.enabled()
run_func(env, rpc.LocalSession())
elif env.TARGET in ["pynq", "ultra96", "de10nano"]:
# The environment variables below should be set if we are using
# a tracker to obtain a remote for a test device
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
# Otherwise, we can set the variables below to directly
# obtain a remote from a test device
pynq_host = os.environ.get("VTA_RPC_HOST", None)
pynq_port = os.environ.get("VTA_RPC_PORT", None)
# Run device from fleet node if env variables are defined
if tracker_host and tracker_port:
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, int(tracker_port), timeout=10000
)
run_func(env, remote)
else:
# Next, run on PYNQ if env variables are defined
if pynq_host and pynq_port:
remote = rpc.connect(pynq_host, int(pynq_port))
run_func(env, remote)
else:
raise RuntimeError(
"Please set the VTA_RPC_HOST and VTA_RPC_PORT environment variables"
)
else:
raise RuntimeError("Unknown target %s" % env.TARGET)
| 3,321 | 39.512195 | 88 | py |
tvm | tvm-main/vta/python/vta/testing/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing utilities, this namespace is not imported by default."""
from .utils import run
| 878 | 40.857143 | 67 | py |
tvm | tvm-main/vta/python/vta/exec/rpc_server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA customized TVM RPC Server
Provides additional runtime function and library loading.
"""
from __future__ import absolute_import
import logging
import argparse
import os
import ctypes
import json
import tvm
from tvm import rpc
from tvm.contrib import cc
from vta import program_bitstream
from ..environment import get_env, pkg_config
from ..libinfo import find_libvta
def server_start():
"""VTA RPC server extension."""
# pylint: disable=unused-variable
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
proj_root = os.path.abspath(os.path.join(curr_path, "../../../../"))
dll_path = find_libvta("libvta")[0]
cfg_path = os.path.abspath(os.path.join(proj_root, "3rdparty/vta-hw/config/vta_config.json"))
runtime_dll = []
_load_module = tvm.get_global_func("tvm.rpc.server.load_module")
def load_vta_dll():
"""Try to load vta dll"""
if not runtime_dll:
runtime_dll.append(ctypes.CDLL(dll_path, ctypes.RTLD_GLOBAL))
logging.info("Loading VTA library: %s", dll_path)
return runtime_dll[0]
@tvm.register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
load_vta_dll()
return _load_module(file_name)
@tvm.register_func("device_api.ext_dev")
def ext_dev_callback():
load_vta_dll()
return tvm.get_global_func("device_api.ext_dev")()
@tvm.register_func("tvm.contrib.vta.init", override=True)
def program_fpga(file_name):
# pylint: disable=import-outside-toplevel
env = get_env()
if env.TARGET == "pynq":
from pynq import xlnk
# Reset xilinx driver
xlnk.Xlnk().xlnk_reset()
elif env.TARGET == "de10nano":
# Load the de10nano program function.
load_vta_dll()
path = tvm.get_global_func("tvm.rpc.server.workpath")(file_name)
program_bitstream.bitstream_program(env.TARGET, path)
logging.info("Program FPGA with %s ", file_name)
@tvm.register_func("tvm.rpc.server.shutdown", override=True)
def server_shutdown():
if runtime_dll:
runtime_dll[0].VTARuntimeShutdown()
runtime_dll.pop()
@tvm.register_func("tvm.contrib.vta.reconfig_runtime", override=True)
def reconfig_runtime(cfg_json):
"""Rebuild and reload runtime with new configuration.
Parameters
----------
cfg_json : str
JSON string used for configurations.
"""
env = get_env()
if runtime_dll:
if env.TARGET == "de10nano":
print("Please reconfigure the runtime AFTER programming a bitstream.")
raise RuntimeError("Can only reconfig in the beginning of session...")
cfg = json.loads(cfg_json)
cfg["TARGET"] = env.TARGET
pkg = pkg_config(cfg)
# check if the configuration is already the same
if os.path.isfile(cfg_path):
old_cfg = json.loads(open(cfg_path, "r").read())
if pkg.same_config(old_cfg):
logging.info("Skip reconfig_runtime due to same config.")
return
cflags = ["-O2", "-std=c++17"]
cflags += pkg.cflags
ldflags = pkg.ldflags
lib_name = dll_path
source = pkg.lib_source
logging.info(
"Rebuild runtime:\n output=%s,\n cflags=%s,\n source=%s,\n ldflags=%s",
dll_path,
"\n\t".join(cflags),
"\n\t".join(source),
"\n\t".join(ldflags),
)
cc.create_shared(lib_name, source, cflags + ldflags)
with open(cfg_path, "w") as outputfile:
outputfile.write(pkg.cfg_json)
def main():
"""Main funciton"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--host", type=str, default="0.0.0.0", help="The host IP address the server binds to"
)
parser.add_argument("--port", type=int, default=9091, help="The port of the RPC")
parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
parser.add_argument(
"--key", type=str, default="", help="RPC key used to identify the connection type."
)
parser.add_argument("--tracker", type=str, default="", help="Report to RPC tracker")
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
if args.tracker:
url, port = args.tracker.split(":")
port = int(port)
tracker_addr = (url, port)
if not args.key:
raise RuntimeError("Need key to present type of resource when tracker is available")
else:
tracker_addr = None
# register the initialization callback
def server_init_callback():
# pylint: disable=redefined-outer-name, reimported, import-outside-toplevel, import-self
import tvm
import vta.exec.rpc_server
tvm.register_func("tvm.rpc.server.start", vta.exec.rpc_server.server_start, override=True)
server = rpc.Server(
args.host,
args.port,
args.port_end,
key=args.key,
tracker_addr=tracker_addr,
server_init_callback=server_init_callback,
)
server.proc.join()
if __name__ == "__main__":
main()
| 6,066 | 34.479532 | 100 | py |
tvm | tvm-main/vta/python/vta/exec/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""VTA Command line utils."""
| 816 | 42 | 62 | py |
tvm | tvm-main/vta/scripts/tune_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tuning a single conv2d operator"""
from collections import namedtuple
import logging
import os
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import vta
import vta.testing
env = vta.get_env()
Workload = namedtuple(
"Conv2DWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
],
)
resnet_wkls = [
# Workloads of resnet18 on imagenet
# ('resnet-18.C1', Workload(env.BATCH, 224, 224, 3, 64, 7, 7, 3, 3, 2, 2)),
("resnet-18.C2", Workload(env.BATCH, 56, 56, 64, 64, 3, 3, 1, 1, 1, 1)),
("resnet-18.C3", Workload(env.BATCH, 56, 56, 64, 128, 3, 3, 1, 1, 2, 2)),
("resnet-18.C4", Workload(env.BATCH, 56, 56, 64, 128, 1, 1, 0, 0, 2, 2)),
("resnet-18.C5", Workload(env.BATCH, 28, 28, 128, 128, 3, 3, 1, 1, 1, 1)),
("resnet-18.C6", Workload(env.BATCH, 28, 28, 128, 256, 3, 3, 1, 1, 2, 2)),
("resnet-18.C7", Workload(env.BATCH, 28, 28, 128, 256, 1, 1, 0, 0, 2, 2)),
("resnet-18.C8", Workload(env.BATCH, 14, 14, 256, 256, 3, 3, 1, 1, 1, 1)),
("resnet-18.C9", Workload(env.BATCH, 14, 14, 256, 512, 3, 3, 1, 1, 2, 2)),
("resnet-18.C10", Workload(env.BATCH, 14, 14, 256, 512, 1, 1, 0, 0, 2, 2)),
("resnet-18.C11", Workload(env.BATCH, 7, 7, 512, 512, 3, 3, 1, 1, 1, 1)),
]
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def conv2d(N, CI, H, W, CO, KH, KW, strides, padding, dilation):
data_shape = (N // env.BATCH, CI // env.BLOCK_IN, H, W, env.BATCH, env.BLOCK_IN)
kernel_shape = (CO // env.BLOCK_OUT, CI // env.BLOCK_IN, KH, KW, env.BLOCK_OUT, env.BLOCK_IN)
bias_shape = (N // env.BATCH, CO // env.BLOCK_OUT, 1, 1, env.BATCH, env.BLOCK_OUT)
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
bias = te.placeholder(bias_shape, name="bias", dtype=env.acc_dtype)
with tvm.target.vta():
res = topi.nn.conv2d(
input=data,
filter=kernel,
padding=padding,
strides=strides,
dilation=dilation,
layout="NCHW%dn%dc" % (env.BATCH, env.BLOCK_IN),
out_dtype=env.acc_dtype,
)
res = topi.right_shift(res, env.WGT_WIDTH)
res = topi.add(res, bias)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_conv2d_nchw([res])
else:
s = te.create_schedule([res.op])
return s, [data, kernel, bias, res]
if __name__ == "__main__":
# Logging config (for printing tuning log to the screen)
logging.basicConfig()
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
# Tuning log files
log_file = "%s.conv2d.log" % (env.TARGET)
# create tmp log file
tmp_log_file = log_file + ".tmp"
if os.path.exists(log_file):
os.remove(log_file)
# Get tracker info from env
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
if not tracker_host or not tracker_port:
print("Set your AutoTVM tracker node host and port variables to run the autotuner")
exit()
for idx, (wl_name, wl) in enumerate(resnet_wkls):
prefix = "[Task %2d/%2d] " % (idx, len(resnet_wkls))
# Read in workload parameters
N = wl.batch
CI = wl.in_filter
H = wl.height
W = wl.width
CO = wl.out_filter
KH = wl.hkernel
KW = wl.wkernel
strides = (wl.hstride, wl.wstride)
padding = (wl.hpad, wl.wpad)
dilation = (1, 1)
# Create task
task = autotvm.task.create(
conv2d,
args=(N, CI, H, W, CO, KH, KW, strides, padding, dilation),
target=tvm.target.vta(),
target_host=env.target_host,
template_key="direct",
)
print(task.config_space)
# Tune
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracker_host,
port=int(tracker_port),
number=5,
timeout=60,
# check_correctness=True, # TODO: re-enable when check_correctness works again.
),
)
# Run Tuner
tuner = autotvm.tuner.RandomTuner(task)
tuner.tune(
n_trial=len(task.config_space),
early_stopping=None,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(len(task.config_space), prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# Pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_file)
os.remove(tmp_log_file)
| 6,237 | 33.464088 | 97 | py |
tvm | tvm-main/vta/scripts/tune_group_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tuning a single group conv2d operator"""
from collections import namedtuple
import logging
import os
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import vta
import vta.testing
env = vta.get_env()
Workload = namedtuple(
"GroupConv2DWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"groups",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
],
)
# Mobilenet (grouped variant) workloads
mobilenet_wkls = [
("mobilenet.D1", Workload(env.BATCH, 112, 112, 32, 32, 2, 3, 3, 1, 1, 1, 1)),
("mobilenet.D2", Workload(env.BATCH, 112, 112, 64, 64, 4, 3, 3, 1, 1, 2, 2)),
("mobilenet.D3", Workload(env.BATCH, 56, 56, 128, 128, 8, 3, 3, 1, 1, 1, 1)),
("mobilenet.D4", Workload(env.BATCH, 56, 56, 128, 128, 8, 3, 3, 1, 1, 2, 2)),
("mobilenet.D5", Workload(env.BATCH, 28, 28, 256, 256, 16, 3, 3, 1, 1, 1, 1)),
("mobilenet.D6", Workload(env.BATCH, 28, 28, 256, 256, 16, 3, 3, 1, 1, 2, 2)),
("mobilenet.D7", Workload(env.BATCH, 14, 14, 512, 512, 32, 3, 3, 1, 1, 1, 1)),
("mobilenet.D8", Workload(env.BATCH, 14, 14, 512, 512, 32, 3, 3, 1, 1, 2, 2)),
("mobilenet.D9", Workload(env.BATCH, 7, 7, 1024, 1024, 64, 3, 3, 1, 1, 1, 1)),
]
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def group_conv2d(N, CI, H, W, CO, KH, KW, strides, padding, dilation, group):
CI_G = CI // groups
data_shape = (N // env.BATCH, CI // env.BLOCK_IN, H, W, env.BATCH, env.BLOCK_IN)
kernel_shape = (CO // env.BLOCK_OUT, CI_G // env.BLOCK_IN, KH, KW, env.BLOCK_OUT, env.BLOCK_IN)
bias_shape = (N // env.BATCH, CO // env.BLOCK_OUT, 1, 1, env.BATCH, env.BLOCK_OUT)
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
bias = te.placeholder(bias_shape, name="bias", dtype=env.acc_dtype)
with tvm.target.vta():
res = topi.nn.group_conv2d_nchw(
data, kernel, strides, padding, dilation, groups, env.acc_dtype
)
res = topi.right_shift(res, env.WGT_WIDTH)
res = topi.add(res, bias)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_group_conv2d_nchw([res])
else:
s = te.create_schedule([res.op])
return s, [data, kernel, bias, res]
if __name__ == "__main__":
# Logging config (for printing tuning log to the screen)
logging.basicConfig()
# Tuning log files
log_file = "%s.group_conv2d.log" % (env.TARGET)
# create tmp log file
tmp_log_file = log_file + ".tmp"
if os.path.exists(log_file):
os.remove(log_file)
# Get tracker info from env
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
if not tracker_host or not tracker_port:
print("Set your AutoTVM tracker node host and port variables to run the autotuner")
exit()
for idx, (wl_name, wl) in enumerate(mobilenet_wkls):
prefix = "[Task %2d/%2d] " % (idx, len(mobilenet_wkls))
# Read in workload parameters
N = wl.batch
CI = wl.in_filter
H = wl.height
W = wl.width
CO = wl.out_filter
KH = wl.hkernel
KW = wl.wkernel
strides = (wl.hstride, wl.wstride)
padding = (wl.hpad, wl.wpad)
dilation = (1, 1)
groups = wl.groups
# Create task
task = autotvm.task.create(
group_conv2d,
args=(N, CI, H, W, CO, KH, KW, strides, padding, dilation, groups),
target=tvm.target.vta(),
target_host=env.target_host,
template_key="direct",
)
print(task.config_space)
# Tune
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracker_host,
port=int(tracker_port),
number=5,
timeout=60,
# check_correctness=True, # TODO: re-enable when check_correctness works again.
),
)
# Run Tuner
tuner = autotvm.tuner.RandomTuner(task)
tuner.tune(
n_trial=len(task.config_space),
early_stopping=None,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(len(task.config_space), prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# Pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_file)
os.remove(tmp_log_file)
| 6,031 | 33.272727 | 99 | py |
tvm | tvm-main/vta/scripts/tune_conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tuning a single conv2d transpose operator"""
from collections import namedtuple
import logging
import os
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import vta
import vta.testing
# Get batch info from env
env = vta.get_env()
Workload = namedtuple(
"Conv2DTransposeWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
"o_hpad",
"o_wpad",
],
)
# DCGAN workloads
dcgan_wkls = [
# dcgan
("DCGAN.CT1", Workload(env.BATCH, 4, 4, 1024, 512, 4, 4, 1, 1, 2, 2, 0, 0)),
("DCGAN.CT2", Workload(env.BATCH, 8, 8, 512, 256, 4, 4, 1, 1, 2, 2, 0, 0)),
("DCGAN.CT3", Workload(env.BATCH, 16, 16, 256, 128, 4, 4, 1, 1, 2, 2, 0, 0)),
]
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def conv2d_transpose(N, CI, H, W, CO, KH, KW, strides, padding, opadding):
data_shape = (N // env.BATCH, CI // env.BLOCK_IN, H, W, env.BATCH, env.BLOCK_IN)
kernel_shape = (CO // env.BLOCK_OUT, CI // env.BLOCK_IN, KH, KW, env.BLOCK_OUT, env.BLOCK_IN)
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
with tvm.target.vta():
res = topi.nn.conv2d_transpose_nchw(
Input=data,
Filter=kernel,
strides=strides,
padding=padding,
out_dtype=env.acc_dtype,
output_padding=opadding,
)
res = topi.right_shift(res, env.WGT_WIDTH)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_conv2d_transpose_nchw([res])
else:
s = te.create_schedule([res.op])
return s, [data, kernel, res]
if __name__ == "__main__":
# Logging config (for printing tuning log to the screen)
logging.basicConfig()
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
# Tuning log files
log_file = "%s.conv2d_transpose.log" % (env.TARGET)
# create tmp log file
tmp_log_file = log_file + ".tmp"
if os.path.exists(log_file):
os.remove(log_file)
# Get tracker info from env
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
if not tracker_host or not tracker_port:
print("Set your AutoTVM tracker node host and port variables to run the autotuner")
exit()
for idx, (wl_name, wl) in enumerate(dcgan_wkls):
prefix = "[Task %2d/%2d] " % (idx, len(dcgan_wkls))
# Read in workload parameters
N = wl.batch
H = wl.height
W = wl.width
CI = wl.in_filter
CO = wl.out_filter
KH = wl.hkernel
KW = wl.wkernel
strides = (wl.hstride, wl.wstride)
padding = (wl.hpad, wl.wpad)
opadding = (wl.o_hpad, wl.o_wpad)
# Create task
task = autotvm.task.create(
conv2d_transpose,
args=(N, CI, H, W, CO, KH, KW, strides, padding, opadding),
target=tvm.target.Target(tvm.target.vta(), host=env.target_host),
template_key="direct",
)
print(task.config_space)
# Tune
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracker_host,
port=int(tracker_port),
number=5,
timeout=60,
# check_correctness=True, # TODO: re-enable when check_correctness works again.
),
)
# Run Tuner
tuner = autotvm.tuner.RandomTuner(task)
tuner.tune(
n_trial=len(task.config_space),
early_stopping=None,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(len(task.config_space), prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# Pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_file)
os.remove(tmp_log_file)
| 5,495 | 30.953488 | 97 | py |
tvm | tvm-main/vta/scripts/tune_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tuning a single dense operator"""
from collections import namedtuple
import logging
import os
import tvm
from tvm import te
from tvm import autotvm
from tvm import topi
import vta
import vta.testing
env = vta.get_env()
Workload = namedtuple("DenseWorkload", ["batch", "in_filter", "out_filter"])
dense_wkls = [
("lstm.dense.1", Workload(1, 256, 128)),
("lstm.dense.4", Workload(4, 256, 128)),
]
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def dense(N, CI, CO):
data_shape = (N // env.BATCH, CI // env.BLOCK_IN, env.BATCH, env.BLOCK_IN)
kernel_shape = (CO // env.BLOCK_OUT, CI // env.BLOCK_IN, env.BLOCK_OUT, env.BLOCK_IN)
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
with tvm.target.vta():
res = topi.nn.dense(data, kernel, None, "int32")
res = topi.right_shift(res, 8)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_dense([res])
else:
s = te.create_schedule([res.op])
return s, [data, kernel, res]
if __name__ == "__main__":
# Logging config (for printing tuning log to the screen)
logging.basicConfig()
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
# Tuning log files
log_file = "%s.dense.log" % (env.TARGET)
# create tmp log file
tmp_log_file = log_file + ".tmp"
if os.path.exists(log_file):
os.remove(log_file)
# Get tracker info from env
tracket_host = os.environ.get("TVM_TRACKER_HOST", None)
tracket_port = os.environ.get("TVM_TRACKER_PORT", None)
if not tracket_host or not tracket_port:
print("Set your AutoTVM tracker node host and port variables to run the autotuner")
exit()
for idx, (wl_name, wl) in enumerate(dense_wkls):
prefix = "[Task %2d/%2d] " % (idx, len(dense_wkls))
# Workload parameters
N = wl.batch
CI = wl.in_filter
CO = wl.out_filter
task = autotvm.task.create(
dense,
args=(N, CI, CO),
target=tvm.target.vta(),
target_host=env.target_host,
template_key="direct",
)
print(task.config_space)
# Tune
measure_option = autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracket_host,
port=int(tracket_port),
number=5,
timeout=60,
# check_correctness=True, # TODO: re-enable when check_correctness works again.
),
)
# Run Tuner
tuner = autotvm.tuner.RandomTuner(task)
tuner.tune(
n_trial=len(task.config_space),
early_stopping=None,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(len(task.config_space), prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# Pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_file)
os.remove(tmp_log_file)
| 4,430 | 31.108696 | 95 | py |
tvm | tvm-main/vta/scripts/tune_resnet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Perform ResNet autoTVM tuning on VTA using Relay."""
import argparse, os, time
from mxnet.gluon.model_zoo import vision
import numpy as np
from PIL import Image
from tvm import topi
import tvm
from tvm import te
from tvm import rpc, autotvm, relay
from tvm.autotvm.measure.measure_methods import request_remote
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib import graph_executor, utils, download
from tvm.contrib.debugger import debug_executor
import vta
from vta.testing import simulator
from vta.top import graph_pack
from tvm.autotvm.task import extract_from_program
def parse_arguments():
parser = argparse.ArgumentParser(description="Train a model for image classification.")
parser.add_argument(
"--model",
type=str,
default="resnet18_v1",
choices=["resnet18_v1"],
help="Input model name.",
)
parser.add_argument(
"--start-name",
type=str,
default="nn.max_pool2d",
help="The name of the node where packing starts",
)
parser.add_argument(
"--stop-name",
type=str,
default="nn.global_avg_pool2d",
help="The name of the node where packing stops",
)
parser.add_argument(
"--debug-profile", action="store_true", help="Show layer-wise time cost profiling results"
)
parser.add_argument(
"--device", default="vta", choices=["vta", "arm_cpu"], help="Select device target"
)
parser.add_argument(
"--measurements", type=int, default=1, help="Number of measurements during AutoTVM search"
)
parser.add_argument("--tuner", type=str, default="random", help="AutoTVM search strategy")
parser.add_argument(
"--log-filename", type=str, default="resnet-18.log", help="AutoTVM log file name"
)
return parser.parse_args()
def register_vta_tuning_tasks():
from tvm.autotvm.task.topi_integration import TaskExtractEnv, deserialize_args
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
# init autotvm env to register VTA operator
TaskExtractEnv()
@autotvm.task.register("topi_nn_conv2d", override=True)
def _topi_nn_conv2d(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
A, W = args[:2]
with tvm.target.vta():
res = topi.nn.conv2d(*args, **kwargs)
res = topi.right_shift(res, 8)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_conv2d_nchw([res])
else:
s = te.create_schedule([res.op])
return s, [A, W, res]
@autotvm.task.register("topi_nn_dense", override=True)
def _topi_nn_dense(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
A, W = args[:2]
with tvm.target.vta():
res = topi.nn.dense(*args, **kwargs)
res = topi.right_shift(res, 8)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_dense([res])
else:
s = te.create_schedule([res.op])
return s, [A, W, res]
def compile_network(opt, env, target):
# Populate the shape and data type dictionary
dtype_dict = {"data": "float32"}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
# Get off the shelf gluon model, and convert to relay
gluon_model = vision.get_model(opt.model, pretrained=True)
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
# Update shape and type dictionary
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
# Perform quantization in Relay
# Note: We set opt_level to 3 in order to fold batch norm
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
relay_prog = relay.quantize.quantize(mod["main"], params=params)
# Perform graph packing and constant folding for VTA target
if target.device_name == "vta":
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
relay_prog,
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=opt.start_name,
stop_name=opt.stop_name,
)
return relay_prog, params
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
try_winograd=True,
):
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
n_trial_ = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial_,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(n_trial_, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
if __name__ == "__main__":
opt = parse_arguments()
# Make sure that TVM was compiled with RPC=1
assert tvm.runtime.enabled("rpc")
# Read in VTA environment
env = vta.get_env()
# Get remote from fleet node
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
if not tracker_host or not tracker_port:
print("Set your AutoTVM tracker node host and port variables to run the autotuner")
exit()
# Get remote
if env.TARGET != "sim":
# Measure build start time
reconfig_start = time.time()
# Get remote from fleet node
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, int(tracker_port), timeout=10000
)
# Reconfigure the JIT runtime and FPGA.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
# Report on reconfiguration time
reconfig_time = time.time() - reconfig_start
print("Reconfigured FPGA and RPC runtime in {0:.2f}s!".format(reconfig_time))
# In simulation mode, host the RPC server locally.
else:
remote = rpc.LocalSession()
# VTA target and execution context
target = env.target if opt.device == "vta" else env.target_vta_cpu
ctx = remote.ext_dev(0) if opt.device == "vta" else remote.cpu(0)
# Compile Relay program
print("Initial compile...")
relay_prog, params = compile_network(opt, env, target)
# Register VTA tuning tasks
register_vta_tuning_tasks()
# Perform task extraction on Relay program
print("Extracting tasks...")
tasks = extract_from_program(
func=relay_prog,
params=params,
ops=(relay.op.get("nn.conv2d"),),
target=tvm.target.Target(target, host=env.target_host),
)
# Perform Autotuning
print("Tuning...")
tuning_opt = {
"log_filename": opt.log_filename,
"tuner": opt.tuner,
"n_trial": 1e9,
"early_stopping": None,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func=vta.vta_autotvm_build_func),
runner=autotvm.RPCRunner(
env.TARGET,
tracker_host,
tracker_port,
number=4,
min_repeat_ms=150,
repeat=opt.measurements,
timeout=60,
# check_correctness=True, # TODO: re-enable when check_correctness works again.
),
),
}
tune_tasks(tasks, **tuning_opt)
# Compile kernels with history best records
with autotvm.tophub.context(target, extra_files=[opt.log_filename]):
# Compile network
print("Compiling network with best tuning parameters...")
if target.device_name != "vta":
with tvm.transform.PassContext(opt_level=3, disabled_pass={"AlterOpLayout"}):
graph, lib, params = relay.build(
relay_prog,
target=tvm.target.Target(target, host=env.target_host),
params=params,
)
else:
with vta.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
graph, lib, params = relay.build(
relay_prog,
target=tvm.target.Target(target, host=env.target_host),
params=params,
)
# Export library
temp = utils.tempdir()
lib.save(temp.relpath("graphlib.o"))
remote.upload(temp.relpath("graphlib.o"))
lib = remote.load_module("graphlib.o")
# If detailed runtime info is needed build with debug runtime
if opt.debug_profile:
m = debug_executor.create(graph, lib, ctx)
else:
m = graph_executor.create(graph, lib, ctx)
# Set the network parameters and synthetic input
image = tvm.nd.array((np.random.uniform(size=(1, 3, 224, 224))).astype("float32"))
m.set_input(**params)
m.set_input("data", image)
# Perform inference
timer = m.module.time_evaluator("run", ctx, number=4, repeat=opt.measurements)
tcost = timer()
prof_res = np.array(tcost.results) * 1000 # convert to millisecond
print(
"Mean inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), np.std(prof_res))
)
# Display profile information
if opt.debug_profile:
m.run()
| 13,325 | 34.631016 | 98 | py |
tvm | tvm-main/vta/tests/python/de10nano/test_program_rpc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys, os
import tvm
from tvm import rpc
from vta import get_bitstream_path, download_bitstream, program_fpga, reconfig_runtime
host = os.environ.get("VTA_RPC_HOST", "de10nano")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
def program_rpc_bitstream(path=None):
"""Program the FPGA on the RPC server
Parameters
----------
path : path to bitstream (optional)
"""
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
program_fpga(remote, path)
def reconfig_rpc_runtime():
"""Reconfig the RPC server runtime"""
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
reconfig_runtime(remote)
bitstream = sys.argv[1] if len(sys.argv) == 2 else None
program_rpc_bitstream(bitstream)
reconfig_rpc_runtime()
| 1,577 | 31.875 | 86 | py |
tvm | tvm-main/vta/tests/python/unittest/test_vta_insn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit test VTA's instructions """
import tvm
from tvm import te
import numpy as np
from tvm import topi
from tvm.contrib import utils
import vta
import vta.testing
from vta.testing import simulator
np.random.seed(0xDEADB)
def test_save_load_out():
"""Test save/store output command"""
def _run(env, remote):
n = 6
x = te.placeholder((n, n, env.BATCH, env.BLOCK_OUT), name="x", dtype=env.acc_dtype)
x_buf = te.compute((n, n, env.BATCH, env.BLOCK_OUT), lambda *i: x(*i), "x_buf")
# insert no-op that won't be optimized away
y_buf = te.compute((n, n, env.BATCH, env.BLOCK_OUT), lambda *i: x_buf(*i) >> 0, "y_buf")
y = te.compute(
(n, n, env.BATCH, env.BLOCK_OUT), lambda *i: y_buf(*i).astype(env.inp_dtype), "y"
)
# schedule
s = te.create_schedule(y.op)
s[x_buf].set_scope(env.acc_scope)
s[x_buf].pragma(x_buf.op.axis[0], env.dma_copy)
s[y_buf].set_scope(env.acc_scope)
s[y_buf].pragma(y_buf.op.axis[0], env.alu)
s[y].pragma(y.op.axis[0], env.dma_copy)
# verification
with vta.build_config():
m = vta.build(s, [x, y], tvm.target.Target("ext_dev", host=env.target_host))
if not remote:
return
temp = utils.tempdir()
m.save(temp.relpath("load_act.o"))
remote.upload(temp.relpath("load_act.o"))
f = remote.load_module("load_act.o")
# verify
dev = remote.ext_dev(0)
x_np = np.random.randint(1, 10, size=(n, n, env.BATCH, env.BLOCK_OUT)).astype(x.dtype)
y_np = x_np.astype(y.dtype)
x_nd = tvm.nd.array(x_np, dev)
y_nd = tvm.nd.empty(y_np.shape, device=dev, dtype=y_np.dtype)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(x_nd, y_nd)
np.testing.assert_equal(y_np, y_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Save load execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
vta.testing.run(_run)
def test_padded_load():
"""Test padded load."""
def _run(env, remote):
def check_padded_load(pad_before, pad_after, test_name=None):
# declare
n = 3
m = 5
x = te.placeholder((n, m, env.BATCH, env.BLOCK_OUT), name="x", dtype=env.acc_dtype)
x_buf = topi.nn.pad(x, pad_before, pad_after, name="y")
# insert no-op that won't be optimized away
y_buf = te.compute(
(
n + pad_before[0] + pad_after[0],
m + pad_before[1] + pad_after[1],
env.BATCH,
env.BLOCK_OUT,
),
lambda *i: x_buf(*i) >> 0,
"y_buf",
)
y = te.compute(
(
n + pad_before[0] + pad_after[0],
m + pad_before[1] + pad_after[1],
env.BATCH,
env.BLOCK_OUT,
),
lambda *i: y_buf(*i).astype(env.inp_dtype),
"y",
)
# schedule
s = te.create_schedule(y.op)
s[x_buf].set_scope(env.acc_scope)
s[x_buf].pragma(x_buf.op.axis[0], env.dma_copy)
s[y_buf].set_scope(env.acc_scope)
s[y_buf].pragma(y_buf.op.axis[0], env.alu)
s[y].pragma(y.op.axis[0], env.dma_copy)
# build
with vta.build_config():
mod = vta.build(s, [x, y], tvm.target.Target("ext_dev", host=env.target_host))
if not remote:
return
temp = utils.tempdir()
mod.save(temp.relpath("padded_load.o"))
remote.upload(temp.relpath("padded_load.o"))
f = remote.load_module("padded_load.o")
# verify
dev = remote.ext_dev(0)
x_np = np.random.randint(0, 10, size=(n, m, env.BATCH, env.BLOCK_OUT)).astype(x.dtype)
y_np = np.zeros(
(
n + pad_before[0] + pad_after[0],
m + pad_before[1] + pad_after[1],
env.BATCH,
env.BLOCK_OUT,
)
).astype(y.dtype)
y_np[pad_before[0] : pad_before[0] + n, pad_before[1] : pad_before[1] + m, :] = x_np
x_nd = tvm.nd.array(x_np, dev)
y_nd = tvm.nd.empty(y_np.shape, device=dev, dtype=y_np.dtype)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(x_nd, y_nd)
np.testing.assert_equal(y_np, y_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Padded {} load execution statistics:".format(test_name))
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
check_padded_load([2, 0, 0, 0], [0, 0, 0, 0], test_name="Y0")
check_padded_load([0, 2, 0, 0], [0, 0, 0, 0], test_name="Y1")
check_padded_load([0, 0, 0, 0], [2, 0, 0, 0], test_name="X0")
check_padded_load([0, 0, 0, 0], [0, 2, 0, 0], test_name="X1")
check_padded_load([1, 1, 0, 0], [1, 1, 0, 0], test_name="all")
vta.testing.run(_run)
def test_gemm():
"""Test GEMM."""
def _run(env, remote):
# declare
o = 4
n = 1
m = 4
x = te.placeholder((o, n, env.BATCH, env.BLOCK_IN), name="x", dtype=env.inp_dtype)
w = te.placeholder((m, n, env.BLOCK_OUT, env.BLOCK_IN), name="w", dtype=env.wgt_dtype)
x_buf = te.compute((o, n, env.BATCH, env.BLOCK_IN), lambda *i: x(*i), "x_buf")
w_buf = te.compute((m, n, env.BLOCK_OUT, env.BLOCK_IN), lambda *i: w(*i), "w_buf")
ko = te.reduce_axis((0, n), name="ko")
ki = te.reduce_axis((0, env.BLOCK_IN), name="ki")
y_gem = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT),
lambda bo, co, bi, ci: te.sum(
x_buf[bo, ko, bi, ki].astype(env.acc_dtype)
* w_buf[co, ko, ci, ki].astype(env.acc_dtype),
axis=[ko, ki],
),
name="y_gem",
)
y_shf = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT), lambda *i: y_gem(*i) >> 8, name="y_shf"
)
y_max = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT), lambda *i: tvm.te.max(y_shf(*i), 0), "y_max"
) # relu
y_min = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT),
lambda *i: tvm.te.min(y_max(*i), (1 << (env.INP_WIDTH - 1)) - 1),
"y_min",
) # relu
y = te.compute(
(o, m, env.BATCH, env.BLOCK_OUT), lambda *i: y_min(*i).astype(env.inp_dtype), name="y"
)
if not remote:
return
def verify(s, name=None):
# Build with the CSE pass disabled as otherwise it would complicate the test
with vta.build_config(disabled_pass={"tir.CommonSubexprElimTIR"}):
mod = vta.build(s, [x, w, y], tvm.target.Target("ext_dev", host=env.target_host))
temp = utils.tempdir()
mod.save(temp.relpath("gemm.o"))
remote.upload(temp.relpath("gemm.o"))
f = remote.load_module("gemm.o")
# verify
dev = remote.ext_dev(0)
x_np = np.random.randint(-128, 128, size=(o, n, env.BATCH, env.BLOCK_IN)).astype(
x.dtype
)
w_np = np.random.randint(-128, 128, size=(m, n, env.BLOCK_OUT, env.BLOCK_IN)).astype(
w.dtype
)
y_np = np.zeros((o, m, env.BATCH, env.BLOCK_OUT)).astype(y.dtype)
x_nd = tvm.nd.array(x_np, dev)
w_nd = tvm.nd.array(w_np, dev)
y_nd = tvm.nd.array(y_np, dev)
y_np = y_np.astype(env.acc_dtype)
for b in range(o):
for i in range(m):
for j in range(n):
y_np[b, i, :] += np.dot(
x_np[b, j, :].astype(env.acc_dtype), w_np[i, j].T.astype(env.acc_dtype)
)
y_np = np.right_shift(y_np, 8)
y_np = np.clip(y_np, 0, (1 << (env.INP_WIDTH - 1)) - 1).astype(y.dtype)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(x_nd, w_nd, y_nd)
np.testing.assert_equal(y_np, y_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("GEMM schedule:{} execution statistics:".format(name))
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
def test_schedule1():
# default schedule with no smt
s = te.create_schedule(y.op)
# set the scope of the SRAM buffers
s[x_buf].set_scope(env.inp_scope)
s[w_buf].set_scope(env.wgt_scope)
s[y_gem].set_scope(env.acc_scope)
s[y_shf].set_scope(env.acc_scope)
s[y_max].set_scope(env.acc_scope)
s[y_min].set_scope(env.acc_scope)
# set pragmas for DMA transfer and ALU ops
s[x_buf].compute_at(s[y_gem], ko)
s[x_buf].pragma(s[x_buf].op.axis[0], env.dma_copy)
s[w_buf].compute_at(s[y_gem], ko)
s[w_buf].pragma(s[w_buf].op.axis[0], env.dma_copy)
s[y_shf].pragma(s[y_shf].op.axis[0], env.alu)
s[y_max].pragma(s[y_max].op.axis[0], env.alu)
s[y_min].pragma(s[y_min].op.axis[0], env.alu)
s[y].pragma(s[y].op.axis[0], env.dma_copy)
# tensorization
s[y_gem].reorder(
ko,
s[y_gem].op.axis[0],
s[y_gem].op.axis[1],
s[y_gem].op.axis[2],
s[y_gem].op.axis[3],
ki,
)
s[y_gem].tensorize(s[y_gem].op.axis[2], env.gemm)
verify(s, name="default")
def test_smt():
# test smt schedule
s = te.create_schedule(y.op)
s[x_buf].set_scope(env.inp_scope)
s[w_buf].set_scope(env.wgt_scope)
s[y_gem].set_scope(env.acc_scope)
s[y_shf].set_scope(env.acc_scope)
s[y_max].set_scope(env.acc_scope)
s[y_min].set_scope(env.acc_scope)
abo, aco, abi, aci = s[y].op.axis
abo1, abo2 = s[y].split(abo, nparts=2)
s[y].bind(abo1, te.thread_axis("cthread"))
s[y_gem].compute_at(s[y], abo1)
s[y_shf].compute_at(s[y], abo1)
s[y_max].compute_at(s[y], abo1)
s[y_min].compute_at(s[y], abo1)
s[y_gem].reorder(
ko,
s[y_gem].op.axis[0],
s[y_gem].op.axis[1],
s[y_gem].op.axis[2],
s[y_gem].op.axis[3],
ki,
)
s[y_gem].tensorize(s[y_gem].op.axis[2], env.gemm)
s[y_shf].pragma(s[y_shf].op.axis[0], env.alu)
s[y_max].pragma(s[y_max].op.axis[0], env.alu)
s[y_min].pragma(s[y_min].op.axis[0], env.alu)
s[x_buf].compute_at(s[y_gem], ko)
s[x_buf].pragma(s[x_buf].op.axis[0], env.dma_copy)
s[w_buf].compute_at(s[y_gem], ko)
s[w_buf].pragma(s[w_buf].op.axis[0], env.dma_copy)
s[y].pragma(abo2, env.dma_copy)
verify(s, name="smt")
test_schedule1()
test_smt()
vta.testing.run(_run)
def test_alu():
def _run(env, remote):
def check_alu(tvm_op, np_op=None, use_imm=False, test_name=None):
"""Test ALU"""
m = 8
n = 8
imm = np.random.randint(1, 5)
# compute
a = te.placeholder((m, n, env.BATCH, env.BLOCK_OUT), name="a", dtype=env.acc_dtype)
a_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: a(*i), "a_buf"
) # DRAM->SRAM
if use_imm:
res_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: tvm_op(a_buf(*i), imm), "res_buf"
) # compute
else:
b = te.placeholder((m, n, env.BATCH, env.BLOCK_OUT), name="b", dtype=env.acc_dtype)
b_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: b(*i), "b_buf"
) # DRAM->SRAM
res_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT),
lambda *i: tvm_op(a_buf(*i), b_buf(*i)),
"res_buf",
) # compute5B
res = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT),
lambda *i: res_buf(*i).astype(env.inp_dtype),
"res",
) # SRAM->DRAM
# schedule
s = te.create_schedule(res.op)
s[a_buf].set_scope(env.acc_scope) # SRAM
s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy) # DRAM->SRAM
s[res_buf].set_scope(env.acc_scope) # SRAM
s[res_buf].pragma(res_buf.op.axis[0], env.alu) # compute
s[res].pragma(res.op.axis[0], env.dma_copy) # SRAM->DRAM
if not use_imm:
s[b_buf].set_scope(env.acc_scope) # SRAM
s[b_buf].pragma(b_buf.op.axis[0], env.dma_copy) # DRAM->SRAM
if not remote:
return
# build
with vta.build_config():
if use_imm:
mod = vta.build(s, [a, res], tvm.target.Target("ext_dev", host=env.target_host))
else:
mod = vta.build(
s, [a, b, res], tvm.target.Target("ext_dev", host=env.target_host)
)
temp = utils.tempdir()
mod.save(temp.relpath("load_act.o"))
remote.upload(temp.relpath("load_act.o"))
f = remote.load_module("load_act.o")
# verify
dev = remote.ext_dev(0)
a_np = np.random.randint(-16, 16, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(a.dtype)
if use_imm:
res_np = np_op(a_np, imm) if np_op else tvm_op(a_np, imm)
else:
b_np = np.random.randint(-16, 16, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(
b.dtype
)
res_np = np_op(a_np, b_np) if np_op else tvm_op(a_np, b_np)
res_np = res_np.astype(res.dtype)
a_nd = tvm.nd.array(a_np, dev)
res_nd = tvm.nd.array(np.zeros((m, n, env.BATCH, env.BLOCK_OUT)).astype(res.dtype), dev)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
if use_imm:
f(a_nd, res_nd)
else:
b_nd = tvm.nd.array(b_np, dev)
f(a_nd, b_nd, res_nd)
np.testing.assert_equal(res_np, res_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("ALU {} execution statistics:".format(test_name))
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
check_alu(lambda x, y: x << y, np.left_shift, use_imm=True, test_name="SHL")
check_alu(tvm.te.max, np.maximum, use_imm=True, test_name="MAX")
check_alu(tvm.te.max, np.maximum, test_name="MAX")
check_alu(lambda x, y: x + y, use_imm=True, test_name="ADD")
check_alu(lambda x, y: x + y, test_name="ADD")
check_alu(lambda x, y: x >> y, np.right_shift, use_imm=True, test_name="SHR")
vta.testing.run(_run)
def test_relu():
"""Test RELU on ALU"""
def _run(env, remote):
m = 8
n = 10
# compute
a = te.placeholder((m, n, env.BATCH, env.BLOCK_OUT), name="a", dtype=env.acc_dtype)
a_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: a(*i), "a_buf"
) # DRAM->SRAM
max_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: tvm.te.max(a_buf(*i), 0), "res_buf"
) # relu
min_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT),
lambda *i: tvm.te.min(max_buf(*i), (1 << (env.INP_WIDTH - 1)) - 1),
"max_buf",
) # relu
res = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT),
lambda *i: min_buf(*i).astype(env.inp_dtype),
"min_buf",
) # SRAM->DRAM
# schedule
s = te.create_schedule(res.op)
s[a_buf].set_scope(env.acc_scope) # SRAM
s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy) # DRAM->SRAM
s[max_buf].set_scope(env.acc_scope) # SRAM
s[min_buf].set_scope(env.acc_scope) # SRAM
s[max_buf].pragma(max_buf.op.axis[0], env.alu) # compute
s[min_buf].pragma(min_buf.op.axis[0], env.alu) # compute
s[res].pragma(res.op.axis[0], env.dma_copy) # SRAM->DRAM
# build
with vta.build_config():
mod = vta.build(s, [a, res], tvm.target.Target("ext_dev", host=env.target_host))
if not remote:
return
temp = utils.tempdir()
mod.save(temp.relpath("load_act.o"))
remote.upload(temp.relpath("load_act.o"))
f = remote.load_module("load_act.o")
# verify
dev = remote.ext_dev(0)
a_np = np.random.randint(-256, 256, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(a.dtype)
res_np = np.clip(a_np, 0, (1 << (env.INP_WIDTH - 1)) - 1).astype(res.dtype)
a_nd = tvm.nd.array(a_np, dev)
res_nd = tvm.nd.array(np.zeros((m, n, env.BATCH, env.BLOCK_OUT)).astype(res.dtype), dev)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(a_nd, res_nd)
np.testing.assert_equal(res_np, res_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Relu execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
vta.testing.run(_run)
def test_shift_and_scale():
"""Test shift and scale on ALU"""
def _run(env, remote):
m = 2
n = 8
imm_shift = np.random.randint(0, 8)
imm_scale = np.random.randint(1, 5)
# compute
a = te.placeholder((m, n, env.BATCH, env.BLOCK_OUT), name="a", dtype=env.acc_dtype)
a_buf = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: a(*i), "a_buf"
) # DRAM->SRAM
res_shift = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: a_buf(*i) + imm_shift, "res_shift"
) # compute
res_scale = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: res_shift(*i) >> imm_scale, "res_scale"
) # compute
res = te.compute(
(m, n, env.BATCH, env.BLOCK_OUT), lambda *i: res_scale(*i).astype(env.inp_dtype), "res"
) # SRAM->DRAM
# schedule
s = te.create_schedule(res.op)
s[a_buf].set_scope(env.acc_scope) # SRAM
s[res_shift].set_scope(env.acc_scope) # SRAM
s[res_scale].set_scope(env.acc_scope) # SRAM
s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy) # DRAM->SRAM
s[res_shift].pragma(res_shift.op.axis[0], env.alu) # compute
s[res_scale].pragma(res_scale.op.axis[0], env.alu) # compute
s[res].pragma(res.op.axis[0], env.dma_copy) # SRAM->DRAM
# build
mod = vta.build(s, [a, res], tvm.target.Target("ext_dev", host=env.target_host))
if not remote:
return
temp = utils.tempdir()
mod.save(temp.relpath("load_act.o"))
remote.upload(temp.relpath("load_act.o"))
f = remote.load_module("load_act.o")
# verify
dev = remote.ext_dev(0)
a_np = np.random.randint(-10, 10, size=(m, n, env.BATCH, env.BLOCK_OUT)).astype(a.dtype)
res_np = np.right_shift((a_np + imm_shift), imm_scale)
res_np = res_np.astype(res.dtype)
a_nd = tvm.nd.array(a_np, dev)
res_nd = tvm.nd.array(np.zeros((m, n, env.BATCH, env.BLOCK_OUT)).astype(res.dtype), dev)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
f(a_nd, res_nd)
np.testing.assert_equal(res_np, res_nd.numpy())
if env.TARGET in ["sim", "tsim"]:
sim_stats = simulator.stats()
print("Shift and scale execution statistics:")
for k, v in sim_stats.items():
print("\t{:<16}: {:>16}".format(k, v))
vta.testing.run(_run)
def test_runtime_array():
def _run(env, remote):
n = 100
dev = remote.ext_dev(0)
x_np = np.random.randint(1, 10, size=(n, n, env.BATCH, env.BLOCK_OUT)).astype("int8")
x_nd = tvm.nd.array(x_np, dev)
np.testing.assert_equal(x_np, x_nd.numpy())
vta.testing.run(_run)
if __name__ == "__main__":
test_runtime_array()
test_save_load_out()
test_padded_load()
test_gemm()
test_alu()
test_relu()
test_shift_and_scale()
| 22,211 | 37.968421 | 100 | py |
tvm | tvm-main/vta/tests/python/unittest/test_environment.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import vta
def test_env():
env = vta.get_env()
mock = env.mock
assert mock.alu == "skip_alu"
def test_env_scope():
env = vta.get_env()
cfg = env.cfg_dict
cfg["TARGET"] = "xyz"
with vta.Environment(cfg):
assert vta.get_env().TARGET == "xyz"
assert vta.get_env().TARGET == env.TARGET
if __name__ == "__main__":
test_env()
test_env_scope()
| 1,176 | 29.973684 | 62 | py |
tvm | tvm-main/vta/tests/python/integration/test_benchmark_gemm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
import numpy as np
from tvm.contrib import utils
import vta.testing
from vta.testing import simulator
def test_gemm():
def run_gemm_packed(env, remote, batch_size, channel, block):
data_shape = (batch_size // env.BATCH, channel // env.BLOCK_IN, env.BATCH, env.BLOCK_IN)
weight_shape = (
channel // env.BLOCK_OUT,
channel // env.BLOCK_IN,
env.BLOCK_OUT,
env.BLOCK_IN,
)
res_shape = (batch_size // env.BATCH, channel // env.BLOCK_OUT, env.BATCH, env.BLOCK_OUT)
# To compute number of ops, use a x2 factor for FMA
num_ops = 2 * channel * channel * batch_size
ko = te.reduce_axis((0, channel // env.BLOCK_IN), name="ko")
ki = te.reduce_axis((0, env.BLOCK_IN), name="ki")
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
weight = te.placeholder(weight_shape, name="weight", dtype=env.wgt_dtype)
data_buf = te.compute(data_shape, lambda *i: data(*i), "data_buf")
weight_buf = te.compute(weight_shape, lambda *i: weight(*i), "weight_buf")
res_gem = te.compute(
res_shape,
lambda bo, co, bi, ci: te.sum(
data_buf[bo, ko, bi, ki].astype(env.acc_dtype)
* weight_buf[co, ko, ci, ki].astype(env.acc_dtype),
axis=[ko, ki],
),
name="res_gem",
)
res_shf = te.compute(res_shape, lambda *i: res_gem(*i) >> 8, name="res_shf")
res_max = te.compute(res_shape, lambda *i: tvm.te.max(res_shf(*i), 0), "res_max") # relu
res_min = te.compute(
res_shape, lambda *i: tvm.te.min(res_max(*i), (1 << (env.INP_WIDTH - 1)) - 1), "res_min"
) # relu
res = te.compute(res_shape, lambda *i: res_min(*i).astype(env.inp_dtype), name="res")
def verify(s):
mod = vta.build(
s,
[data, weight, res],
tvm.target.Target("ext_dev", host=env.target_host),
name="gemm",
)
temp = utils.tempdir()
mod.save(temp.relpath("gemm.o"))
remote.upload(temp.relpath("gemm.o"))
f = remote.load_module("gemm.o")
# verify
dev = remote.ext_dev(0)
# Data in original format
data_orig = np.random.randint(-128, 128, size=(batch_size, channel)).astype(data.dtype)
weight_orig = np.random.randint(-128, 128, size=(channel, channel)).astype(weight.dtype)
data_packed = data_orig.reshape(
batch_size // env.BATCH, env.BATCH, channel // env.BLOCK_IN, env.BLOCK_IN
).transpose((0, 2, 1, 3))
weight_packed = weight_orig.reshape(
channel // env.BLOCK_OUT, env.BLOCK_OUT, channel // env.BLOCK_IN, env.BLOCK_IN
).transpose((0, 2, 1, 3))
res_np = np.zeros(res_shape).astype(res.dtype)
data_arr = tvm.nd.array(data_packed, dev)
weight_arr = tvm.nd.array(weight_packed, dev)
res_arr = tvm.nd.array(res_np, dev)
res_ref = np.zeros(res_shape).astype(env.acc_dtype)
for b in range(batch_size // env.BATCH):
for i in range(channel // env.BLOCK_OUT):
for j in range(channel // env.BLOCK_IN):
res_ref[b, i, :] += np.dot(
data_packed[b, j, :].astype(env.acc_dtype),
weight_packed[i, j].T.astype(env.acc_dtype),
)
res_ref = np.right_shift(res_ref, 8)
res_ref = np.clip(res_ref, 0, (1 << (env.INP_WIDTH - 1)) - 1).astype(res.dtype)
time_f = f.time_evaluator("gemm", dev, number=20)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
cost = time_f(data_arr, weight_arr, res_arr)
if env.TARGET in ["sim", "tsim"]:
stats = simulator.stats()
print("Execution statistics:")
for k, v in stats.items():
print("\t{:<16}: {:>16}".format(k, v))
res_unpack = res_arr.numpy().reshape(
batch_size // env.BATCH, channel // env.BLOCK_OUT, env.BATCH, env.BLOCK_OUT
)
return cost
def run_schedule(load_inp, load_wgt, gemm, alu, store_out, print_ir):
s = te.create_schedule(res.op)
s[data_buf].set_scope(env.inp_scope)
s[weight_buf].set_scope(env.wgt_scope)
s[res_gem].set_scope(env.acc_scope)
s[res_shf].set_scope(env.acc_scope)
s[res_min].set_scope(env.acc_scope)
s[res_max].set_scope(env.acc_scope)
if block:
bblock = block // env.BATCH
iblock = block // env.BLOCK_IN
oblock = block // env.BLOCK_OUT
xbo, xco, xbi, xci = s[res].op.axis
xb1, xco1, xb2, xco2 = s[res].tile(xbo, xco, bblock, oblock)
store_pt = xb2
s[res_gem].compute_at(s[res], xco1)
s[res_shf].compute_at(s[res], xco1)
s[res_min].compute_at(s[res], xco1)
s[res_max].compute_at(s[res], xco1)
xbo, xco, xbi, xci = s[res_gem].op.axis
# Compute one line at a time
ko1, ko2 = s[res_gem].split(ko, iblock)
s[res_gem].reorder(ko1, ko2, xbo, xco, xbi, xci, ki)
s[data_buf].compute_at(s[res_gem], ko1)
s[weight_buf].compute_at(s[res_gem], ko1)
# Use VTA instructions
s[data_buf].pragma(s[data_buf].op.axis[0], load_inp)
s[weight_buf].pragma(s[weight_buf].op.axis[0], load_wgt)
s[res_gem].tensorize(xbi, gemm)
s[res_shf].pragma(s[res_shf].op.axis[0], alu)
s[res_min].pragma(s[res_min].op.axis[0], alu)
s[res_max].pragma(s[res_max].op.axis[0], alu)
s[res].pragma(store_pt, store_out)
else:
xbo, xco, xbi, xci = s[res_gem].op.axis
s[res_gem].reorder(ko, xbo, xco, xbi, xci, ki)
# Use VTA instructions
s[data_buf].pragma(s[data_buf].op.axis[0], load_inp)
s[weight_buf].pragma(s[weight_buf].op.axis[0], load_wgt)
s[res_gem].tensorize(xbi, gemm)
s[res_shf].pragma(s[res_shf].op.axis[0], alu)
s[res_min].pragma(s[res_min].op.axis[0], alu)
s[res_max].pragma(s[res_max].op.axis[0], alu)
s[res].pragma(s[res].op.axis[0], store_out)
if print_ir:
print(tvm.lower(s, [data, weight, res], simple_mode=True))
return verify(s)
def gemm_normal(print_ir):
mock = env.mock
print("----- GEMM GOPS End-to-End Test-------")
def run_test(header, print_ir):
cost = run_schedule(
env.dma_copy,
env.dma_copy,
env.gemm,
env.alu,
env.dma_copy,
print_ir,
)
gops = (num_ops / cost.mean) / float(10**9)
print(header)
print("\tTime cost = %g sec/op, %g GOPS" % (cost.mean, gops))
with vta.build_config():
run_test("NORMAL", print_ir)
def gemm_unittest(print_ir):
mock = env.mock
print("----- GEMM Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
mock.dma_copy, mock.dma_copy, env.gemm, mock.alu, mock.dma_copy, print_ir
)
gops = (num_ops / cost.mean) / float(10**9)
print(header)
print("\tTime cost = %g sec/op, %g GOPS" % (cost.mean, gops))
with vta.build_config():
run_test("NORMAL", print_ir)
def alu_unittest(print_ir):
mock = env.mock
print("----- ALU Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
mock.dma_copy, mock.dma_copy, mock.gemm, env.alu, mock.dma_copy, print_ir
)
gops = (num_ops / cost.mean) / float(10**9)
print(header)
print("\tTime cost = %g sec/op, %g GOPS" % (cost.mean, gops))
with vta.build_config():
run_test("NORMAL", print_ir)
print("")
def load_inp_unittest(print_ir):
mock = env.mock
print("----- LoadInp Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
env.dma_copy, mock.dma_copy, mock.gemm, mock.alu, mock.dma_copy, print_ir
)
gops = (num_ops / cost.mean) / float(10**9)
bandwith = (batch_size * channel * env.INP_WIDTH / cost.mean) / float(10**9)
print(header)
print(
"\tTime cost = %g sec/op, %g GOPS, bandwidth=%g Gbits"
% (cost.mean, gops, bandwith)
)
with vta.build_config():
run_test("NORMAL", print_ir)
print("")
def load_wgt_unittest(print_ir):
mock = env.mock
print("----- LoadWgt Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
mock.dma_copy, env.dma_copy, mock.gemm, mock.alu, mock.dma_copy, print_ir
)
gops = (num_ops / cost.mean) / float(10**9)
bandwith = (channel * channel * env.WGT_WIDTH / cost.mean) / float(10**9)
print(header)
print(
"\tTime cost = %g sec/op, %g GOPS, bandwidth=%g Gbits"
% (cost.mean, gops, bandwith)
)
with vta.build_config():
run_test("NORMAL", print_ir)
print("")
def store_out_unittest(print_ir):
mock = env.mock
print("----- StoreOut Unit Test-------")
def run_test(header, print_ir):
cost = run_schedule(
mock.dma_copy, mock.dma_copy, mock.gemm, mock.alu, env.dma_copy, print_ir
)
gops = (num_ops / cost.mean) / float(10**9)
bandwith = (batch_size * channel * env.OUT_WIDTH / cost.mean) / float(10**9)
print(header)
print(
"\tTime cost = %g sec/op, %g GOPS, bandwidth=%g Gbits"
% (cost.mean, gops, bandwith)
)
with vta.build_config():
run_test("NORMAL", print_ir)
print("")
gemm_normal(False)
gemm_unittest(False)
alu_unittest(False)
def _run(env, remote):
print("========GEMM 128=========")
run_gemm_packed(env, remote, 128, 128, 128)
vta.testing.run(_run)
if __name__ == "__main__":
test_gemm()
| 12,050 | 40.84375 | 100 | py |
tvm | tvm-main/vta/tests/python/integration/test_benchmark_topi_dense.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing topi gemm operator for VTA"""
import os
import json
from collections import namedtuple
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.contrib import utils
from tvm.contrib.pickle_memoize import memoize
from tvm import topi
import tvm.topi.testing
import vta
from vta import program_fpga, reconfig_runtime
import vta.testing
from vta.testing import simulator
# FIXME: we need a custom clip operator to circumvent a pattern detection limitation
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def run_gemm(
env,
remote,
target,
batch_size,
in_feat,
out_feat,
check_correctness=True,
print_ir=True,
samples=4,
):
# Perform packing only if we are targeting the accelerator
if "arm_cpu" in target.keys:
data_pack = False
elif "vta" in target.keys:
data_pack = True
# Derive shapes depending upon packing
a_shape = (batch_size, in_feat)
w_shape = (out_feat, in_feat)
if data_pack:
data_shape = (batch_size // env.BATCH, in_feat // env.BLOCK_IN, env.BATCH, env.BLOCK_IN)
kernel_shape = (
out_feat // env.BLOCK_OUT,
in_feat // env.BLOCK_IN,
env.BLOCK_OUT,
env.BLOCK_IN,
)
fcompute = vta.top.dense_packed
fschedule = vta.top.schedule_dense_packed
else:
data_shape = a_shape
kernel_shape = w_shape
fcompute = topi.x86.dense_nopack
fschedule = topi.x86.schedule_dense_nopack
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
# Define base computation schedule
with target:
res = fcompute(data, kernel, None, env.acc_dtype)
res = topi.right_shift(res, 8)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
# Derive base schedule
s = fschedule([res])
if print_ir:
print(vta.lower(s, [data, kernel, res], simple_mode=True))
# Derive number of ops
num_ops = 2 * batch_size * in_feat * out_feat
# @memoize("vta.tests.test_benchmark_topi.dense.verify")
def get_ref_data():
# derive min max for act, wgt types (max non inclusive)
a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))
w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)
r_np = np.dot(a_np.astype(env.acc_dtype), w_np.T.astype(env.acc_dtype)).astype(
env.acc_dtype
)
return a_np, w_np, r_np
# Data in original format
data_np, kernel_np, res_ref = get_ref_data()
if data_pack:
data_np = data_np.reshape(
batch_size // env.BATCH, env.BATCH, in_feat // env.BLOCK_IN, env.BLOCK_IN
).transpose((0, 2, 1, 3))
kernel_np = kernel_np.reshape(
out_feat // env.BLOCK_OUT, env.BLOCK_OUT, in_feat // env.BLOCK_IN, env.BLOCK_IN
).transpose((0, 2, 1, 3))
# Build
if "vta" in target.keys:
mod = vta.build(
s,
[data, kernel, res],
target=tvm.target.Target(target, host=env.target_host),
name="dense",
)
else:
mod = tvm.build(
s,
[data, kernel, res],
target=tvm.target.Target(target, host=env.target_host),
name="dense",
)
temp = utils.tempdir()
mod.save(temp.relpath("dense.o"))
remote.upload(temp.relpath("dense.o"))
f = remote.load_module("dense.o")
dev = remote.device(str(target))
res_np = np.zeros(topi.utils.get_const_tuple(res.shape)).astype(res.dtype)
data_arr = tvm.nd.array(data_np, dev)
kernel_arr = tvm.nd.array(kernel_np, dev)
res_arr = tvm.nd.array(res_np, dev)
time_f = f.time_evaluator("dense", dev, number=samples)
# In vta sim mode, collect simulator runtime statistics
stats = {}
cost = None
if env.TARGET in ["sim", "tsim"]:
# Check if we're in local RPC mode (allows us to rebuild the
# runtime on the fly when varying the VTA designs)
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
if env.TARGET == "sim":
remote.get_function("vta.simulator.profiler_clear")()
else:
remote.get_function("vta.tsim.profiler_clear")()
cost = time_f(data_arr, kernel_arr, res_arr)
if env.TARGET == "sim":
stats = json.loads(remote.get_function("vta.simulator.profiler_status")())
else:
stats = json.loads(remote.get_function("vta.tsim.profiler_status")())
else:
simulator.clear_stats()
cost = time_f(data_arr, kernel_arr, res_arr)
stats = simulator.stats()
else:
cost = time_f(data_arr, kernel_arr, res_arr)
# Check correctness
correct = False
if check_correctness:
res_orig = res_arr.numpy()
if data_pack:
res_orig = res_orig.reshape(batch_size, out_feat)
res_ref = res_ref >> 8
res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)
res_ref = res_ref.astype(env.out_dtype)
correct = np.allclose(res_orig, res_ref)
gops = (num_ops / cost.mean) / float(10**9)
status = "PASSED" if correct else "FAILED"
if "arm_cpu" in target.keys:
device = "CPU"
elif "vta" in target.keys:
device = "VTA"
print("%s DENSE TEST %s: Time cost = %g sec/op, %g GOPS" % (device, status, cost.mean, gops))
return correct, cost, stats
def test_gemm(device="vta", batch=128, in_feat=128, out_feat=128):
def _run(env, remote):
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
target = env.target_vta_cpu
with autotvm.tophub.context(target): # load pre-tuned schedule parameters
run_gemm(env, remote, target, batch, in_feat, out_feat)
vta.testing.run(_run)
if __name__ == "__main__":
test_gemm("vta", 16, 512, 1008)
| 7,640 | 34.375 | 97 | py |
tvm | tvm-main/vta/tests/python/integration/test_benchmark_topi_group_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing topi group conv2d operator for VTA"""
import json
import os
import pytest
import numpy as np
from collections import namedtuple
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from tvm.contrib import utils
from tvm import topi
import tvm.topi.testing
import vta
from vta import program_fpga, reconfig_runtime
import vta.testing
from vta.testing import simulator
Workload = namedtuple(
"GroupConv2DWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"groups",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
],
)
# Get batch info from env
env = vta.get_env()
# Mobilenet (grouped variant) workloads
mobilenet_wkls = [
("mobilenet.D1", Workload(env.BATCH, 112, 112, 32, 32, 2, 3, 3, 1, 1, 1, 1)),
("mobilenet.D2", Workload(env.BATCH, 112, 112, 64, 64, 4, 3, 3, 1, 1, 2, 2)),
("mobilenet.D3", Workload(env.BATCH, 56, 56, 128, 128, 8, 3, 3, 1, 1, 1, 1)),
("mobilenet.D4", Workload(env.BATCH, 56, 56, 128, 128, 8, 3, 3, 1, 1, 2, 2)),
("mobilenet.D5", Workload(env.BATCH, 28, 28, 256, 256, 16, 3, 3, 1, 1, 1, 1)),
("mobilenet.D6", Workload(env.BATCH, 28, 28, 256, 256, 16, 3, 3, 1, 1, 2, 2)),
("mobilenet.D7", Workload(env.BATCH, 14, 14, 512, 512, 32, 3, 3, 1, 1, 1, 1)),
("mobilenet.D8", Workload(env.BATCH, 14, 14, 512, 512, 32, 3, 3, 1, 1, 2, 2)),
("mobilenet.D9", Workload(env.BATCH, 7, 7, 1024, 1024, 64, 3, 3, 1, 1, 1, 1)),
]
# FIXME: we need a custom clip operator to circumvent a pattern detection limitation
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def run_group_conv2d(env, remote, wl, target, check_correctness=True, print_ir=False, samples=4):
# Workload assertions
assert wl.hpad == wl.wpad
# Perform packing only if we are targeting the accelerator
if "arm_cpu" in target.keys:
data_pack = False
layout = "NCHW"
fcompute = topi.nn.group_conv2d_nchw
fschedule = topi.generic.schedule_group_conv2d_nchw
elif "vta" in target.keys:
data_pack = True
layout = "NCHW%dn%dc" % (env.BATCH, env.BLOCK_IN)
fcompute = vta.top.group_conv2d_packed
fschedule = vta.top.schedule_group_conv2d_packed
# Derive shapes depending upon packing
CI_G = wl.in_filter // wl.groups
a_shape = (wl.batch, wl.in_filter, wl.height, wl.width)
w_shape = (wl.out_filter, CI_G, wl.hkernel, wl.wkernel)
b_shape = (wl.batch, wl.out_filter, 1, 1)
if data_pack:
data_shape = (
wl.batch // env.BATCH,
wl.in_filter // env.BLOCK_IN,
wl.height,
wl.width,
env.BATCH,
env.BLOCK_IN,
)
kernel_shape = (
wl.out_filter // env.BLOCK_OUT,
CI_G // env.BLOCK_IN,
wl.hkernel,
wl.wkernel,
env.BLOCK_OUT,
env.BLOCK_IN,
)
bias_shape = (
wl.batch // env.BATCH,
wl.out_filter // env.BLOCK_OUT,
1,
1,
env.BATCH,
env.BLOCK_OUT,
)
else:
data_shape = a_shape
kernel_shape = w_shape
bias_shape = b_shape
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
bias = te.placeholder(bias_shape, name="bias", dtype=env.acc_dtype)
padding = relay.nn.get_pad_tuple2d((wl.hpad, wl.wpad))
# Define base computation schedule
with target:
res = fcompute(
data, kernel, (wl.hstride, wl.wstride), padding, (1, 1), wl.groups, env.acc_dtype
)
res = topi.right_shift(res, 8)
res = topi.add(res, bias)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
# Derive base schedule
s = fschedule([res])
if print_ir:
print(vta.lower(s, [data, kernel, bias, res], simple_mode=True))
# Derive number of ops
fout_height = (wl.height + 2 * wl.hpad - wl.hkernel) // wl.hstride + 1
fout_width = (wl.width + 2 * wl.wpad - wl.wkernel) // wl.wstride + 1
num_ops = (
2
* wl.batch
* fout_height
* fout_width
* wl.hkernel
* wl.wkernel
* wl.out_filter
* wl.in_filter
// wl.groups
)
def get_ref_data():
# derive min max for act, wgt, and bias types (max non inclusive)
a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))
w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
b_min, b_max = 0 - 1 << (env.INP_WIDTH + env.WGT_WIDTH - 2), 1 << (
env.INP_WIDTH + env.WGT_WIDTH - 2
)
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)
b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype)
r_np = tvm.topi.testing.conv2d_nchw_python(
a_np.astype(env.acc_dtype),
w_np.astype(env.acc_dtype),
(wl.hstride, wl.wstride),
wl.hpad,
wl.groups,
).astype(env.acc_dtype)
return a_np, w_np, b_np, r_np
# Data in original format
data_np, kernel_np, bias_np, res_ref = get_ref_data()
if data_pack:
data_np = data_np.reshape(
wl.batch // env.BATCH,
env.BATCH,
wl.in_filter // env.BLOCK_IN,
env.BLOCK_IN,
wl.height,
wl.width,
).transpose((0, 2, 4, 5, 1, 3))
kernel_np = kernel_np.reshape(
wl.out_filter // env.BLOCK_OUT,
env.BLOCK_OUT,
CI_G // env.BLOCK_IN,
env.BLOCK_IN,
wl.hkernel,
wl.wkernel,
).transpose((0, 2, 4, 5, 1, 3))
bias_np = bias_np.reshape(
wl.batch // env.BATCH, wl.out_filter // env.BLOCK_OUT, 1, 1, env.BATCH, env.BLOCK_OUT
)
# Build
if "vta" in target.keys:
with vta.build_config(disabled_pass={"tir.CommonSubexprElimTIR"}):
mod = vta.build(
s,
[data, kernel, bias, res],
target=tvm.target.Target(target, host=env.target_host),
name="conv2d",
)
else:
mod = tvm.build(
s,
[data, kernel, bias, res],
target=tvm.target.Target(target, host=env.target_host),
name="conv2d",
)
temp = utils.tempdir()
mod.save(temp.relpath("conv2d.o"))
remote.upload(temp.relpath("conv2d.o"))
f = remote.load_module("conv2d.o")
dev = remote.device(str(target))
res_np = np.zeros(topi.utils.get_const_tuple(res.shape)).astype(res.dtype)
data_arr = tvm.nd.array(data_np, dev)
kernel_arr = tvm.nd.array(kernel_np, dev)
bias_arr = tvm.nd.array(bias_np, dev)
res_arr = tvm.nd.array(res_np, dev)
time_f = f.time_evaluator("conv2d", dev, number=samples)
# In vta sim mode, collect simulator runtime statistics
stats = {}
cost = None
if env.TARGET in ["sim", "tsim"]:
# Check if we're in local RPC mode (allows us to rebuild the
# runtime on the fly when varying the VTA designs)
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
if env.TARGET == "sim":
remote.get_function("vta.simulator.profiler_clear")()
else:
remote.get_function("vta.tsim.profiler_clear")()
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
if env.TARGET == "sim":
stats = json.loads(remote.get_function("vta.simulator.profiler_status")())
else:
stats = json.loads(remote.get_function("vta.tsim.profiler_status")())
else:
simulator.clear_stats()
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
stats = simulator.stats()
else:
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
# Check correctness
correct = False
if check_correctness:
res_orig = res_arr.numpy()
if data_pack:
res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape(
wl.batch, wl.out_filter, fout_height, fout_width
)
bias_np = bias_np.transpose((0, 4, 1, 5, 2, 3)).reshape(wl.batch, wl.out_filter, 1, 1)
res_ref = res_ref >> env.WGT_WIDTH
res_ref += bias_np
res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)
res_ref = res_ref.astype(env.out_dtype)
correct = np.allclose(res_orig, res_ref)
gops = (num_ops / cost.mean) / float(10**9)
status = "PASSED" if correct else "FAILED"
if "arm_cpu" in target.keys:
device = "CPU"
elif "vta" in target.keys:
device = "VTA"
print(
"%s GROUP CONV2D TEST %s: Time cost = %g sec/op, %g GOPS"
% (device, status, cost.mean, gops)
)
return correct, cost, stats
@pytest.mark.parametrize("device", ["vta", "arm_cpu"])
def test_conv2d(device):
def _run(env, remote):
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
target = env.target_vta_cpu
with autotvm.tophub.context(target): # load pre-tuned schedule parameters
for _, wl in mobilenet_wkls:
print(wl)
run_group_conv2d(env, remote, wl, target)
vta.testing.run(_run)
if __name__ == "__main__":
test_conv2d(device="arm_cpu")
test_conv2d(device="vta")
| 11,109 | 34.158228 | 98 | py |
tvm | tvm-main/vta/tests/python/integration/test_benchmark_topi_conv2d_transpose.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing topi conv2d_transpose operator for VTA"""
import json
import os
import pytest
import numpy as np
from collections import namedtuple
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from tvm.contrib import utils
from tvm.contrib.pickle_memoize import memoize
from tvm import topi
import tvm.topi.testing
import vta
from vta import program_fpga, reconfig_runtime
import vta.testing
from vta.testing import simulator
Workload = namedtuple(
"Conv2DTransposeWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
"o_hpad",
"o_wpad",
],
)
# Get batch info from env
env = vta.get_env()
# DCGAN workloads
dcgan_wklds = [
# dcgan
("DCGAN.CT1", Workload(env.BATCH, 4, 4, 1024, 512, 4, 4, 1, 1, 2, 2, 0, 0)),
("DCGAN.CT2", Workload(env.BATCH, 8, 8, 512, 256, 4, 4, 1, 1, 2, 2, 0, 0)),
("DCGAN.CT3", Workload(env.BATCH, 16, 16, 256, 128, 4, 4, 1, 1, 2, 2, 0, 0)),
]
# FIXME: we need a custom clip operator to circumvent a pattern detection limitation
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
# Helper function to get factors
def _find_factors(n):
factors = []
for f in range(1, n + 1):
if n % f == 0:
factors.append(f)
return factors
def run_conv2d_transpose(
env, remote, wl, target, check_correctness=True, print_ir=False, samples=4
):
# Workload assertions
assert wl.hpad == wl.wpad
# Perform packing only if we are targeting the accelerator
if "arm_cpu" in target.keys:
data_pack = False
layout = "NCHW"
fcompute = topi.arm_cpu.conv2d_transpose_nchw
fschedule = topi.arm_cpu.schedule_conv2d_transpose_nchw
elif "vta" in target.keys:
data_pack = True
layout = "NCHW%dn%dc" % (env.BATCH, env.BLOCK_IN)
fcompute = vta.top.conv2d_transpose_packed
fschedule = vta.top.schedule_conv2d_transpose_packed
# Derive shapes depending upon packing
a_shape = (wl.batch, wl.in_filter, wl.height, wl.width)
w_shape = (wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)
if data_pack:
data_shape = (
wl.batch // env.BATCH,
wl.in_filter // env.BLOCK_IN,
wl.height,
wl.width,
env.BATCH,
env.BLOCK_IN,
)
kernel_shape = (
wl.out_filter // env.BLOCK_OUT,
wl.in_filter // env.BLOCK_IN,
wl.hkernel,
wl.wkernel,
env.BLOCK_OUT,
env.BLOCK_IN,
)
else:
data_shape = a_shape
kernel_shape = w_shape
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
padding = relay.nn.get_pad_tuple2d((wl.hpad, wl.wpad))
# Define base computation schedule
with target:
res = fcompute(
data, kernel, (wl.hstride, wl.wstride), padding, env.acc_dtype, (wl.o_hpad, wl.o_wpad)
)
res = topi.right_shift(res, env.WGT_WIDTH)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
# Derive base schedule
s = fschedule([res])
if print_ir:
print(vta.lower(s, [data, kernel, res], simple_mode=True))
# Derive number of ops
fout_height = (wl.height - 1) * wl.hstride - 2 * wl.hpad + wl.hkernel + wl.o_hpad
fout_width = (wl.width - 1) * wl.wstride - 2 * wl.wpad + wl.wkernel + wl.o_wpad
num_ops = (
2
* wl.batch
* fout_height
* fout_width
* wl.hkernel
* wl.wkernel
* wl.out_filter
* wl.in_filter
)
# @memoize("vta.tests.test_benchmark_topi.conv2d.verify_nhwc")
def get_ref_data():
# derive min max for act and wgt types (max non inclusive)
a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))
w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(
w_min, w_max, size=(wl.in_filter, wl.out_filter, wl.hkernel, wl.wkernel)
).astype(kernel.dtype)
r_np = tvm.topi.testing.conv2d_transpose_nchw_python(
a_np.astype(env.acc_dtype),
w_np.astype(env.acc_dtype),
(wl.hstride, wl.wstride),
wl.hpad,
(wl.o_hpad, wl.o_wpad),
).astype(env.acc_dtype)
return a_np, w_np, r_np
# Data in original format
data_np, kernel_np, res_ref = get_ref_data()
if data_pack:
data_np = data_np.reshape(
wl.batch // env.BATCH,
env.BATCH,
wl.in_filter // env.BLOCK_IN,
env.BLOCK_IN,
wl.height,
wl.width,
).transpose((0, 2, 4, 5, 1, 3))
kernel_np = kernel_np.reshape(
wl.in_filter // env.BLOCK_IN,
env.BLOCK_IN,
wl.out_filter // env.BLOCK_OUT,
env.BLOCK_OUT,
wl.hkernel,
wl.wkernel,
).transpose((2, 0, 4, 5, 3, 1))
kernel_np = np.flip(kernel_np, 2)
kernel_np = np.flip(kernel_np, 3)
# Build
if "vta" in target.keys:
with vta.build_config(disabled_pass={"tir.CommonSubexprElimTIR"}):
mod = vta.build(
s,
[data, kernel, res],
target=target,
target_host=env.target_host,
name="conv2d_transpose",
)
else:
mod = tvm.build(
s,
[data, kernel, res],
target=target,
target_host=env.target_host,
name="conv2d_transpose",
)
temp = utils.tempdir()
mod.save(temp.relpath("conv2d_transpose.o"))
remote.upload(temp.relpath("conv2d_transpose.o"))
f = remote.load_module("conv2d_transpose.o")
dev = remote.device(str(target))
res_np = np.zeros(topi.utils.get_const_tuple(res.shape)).astype(res.dtype)
data_arr = tvm.nd.array(data_np, dev)
kernel_arr = tvm.nd.array(kernel_np, dev)
res_arr = tvm.nd.array(res_np, dev)
time_f = f.time_evaluator("conv2d_transpose", dev, number=samples)
# In vta sim mode, collect simulator runtime statistics
stats = {}
cost = None
if env.TARGET in ["sim", "tsim"]:
# Check if we're in local RPC mode (allows us to rebuild the
# runtime on the fly when varying the VTA designs)
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
if env.TARGET == "sim":
remote.get_function("vta.simulator.profiler_clear")()
else:
remote.get_function("vta.tsim.profiler_clear")()
cost = time_f(data_arr, kernel_arr, res_arr)
if env.TARGET == "sim":
stats = json.loads(remote.get_function("vta.simulator.profiler_status")())
else:
stats = json.loads(remote.get_function("vta.tsim.profiler_status")())
else:
simulator.clear_stats()
cost = time_f(data_arr, kernel_arr, res_arr)
stats = simulator.stats()
else:
cost = time_f(data_arr, kernel_arr, res_arr)
# Check correctness
correct = False
if check_correctness:
res_orig = res_arr.numpy()
if data_pack:
res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape(
wl.batch, wl.out_filter, fout_height, fout_width
)
res_ref = res_ref >> env.WGT_WIDTH
res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)
res_ref = res_ref.astype(env.out_dtype)
correct = np.allclose(res_orig, res_ref)
gops = (num_ops / cost.mean) / float(10**9)
status = "PASSED" if correct else "FAILED"
if "arm_cpu" in target.keys:
device = "CPU"
elif "vta" in target.keys:
device = "VTA"
print("%s CONV2D TEST %s: Time cost = %g sec/op, %g GOPS" % (device, status, cost.mean, gops))
return correct, cost, stats
@pytest.mark.parametrize("device", ["vta", "arm_cpu"])
def test_conv2d_transpose(device):
def _run(env, remote):
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
target = env.target_vta_cpu
with autotvm.tophub.context(target): # load pre-tuned schedule parameters
for _, wl in dcgan_wklds:
print(wl)
run_conv2d_transpose(env, remote, wl, target)
vta.testing.run(_run)
if __name__ == "__main__":
test_conv2d_transpose(device="arm_cpu")
test_conv2d_transpose(device="vta")
| 10,223 | 32.411765 | 98 | py |
tvm | tvm-main/vta/tests/python/integration/test_benchmark_topi_conv2d.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing topi conv2d operator for VTA"""
import json
import os
import pytest
import numpy as np
from collections import namedtuple
import tvm
from tvm import te
from tvm import relay
from tvm import autotvm
from tvm.contrib import utils
from tvm.contrib.pickle_memoize import memoize
from tvm import topi
import tvm.topi.testing
import vta
from vta import program_fpga, reconfig_runtime
import vta.testing
from vta.testing import simulator
Workload = namedtuple(
"Conv2DWorkload",
[
"batch",
"height",
"width",
"in_filter",
"out_filter",
"hkernel",
"wkernel",
"hpad",
"wpad",
"hstride",
"wstride",
],
)
# Get batch info from env
env = vta.get_env()
# ResNet18 workloads
resnet_wkls = [
# Workloads of resnet18 on imagenet
# ('resnet-18.C1', Workload(env.BATCH, 224, 224, 3, 64, 7, 7, 3, 3, 2, 2)),
("resnet-18.C2", Workload(env.BATCH, 56, 56, 64, 64, 3, 3, 1, 1, 1, 1)),
("resnet-18.C3", Workload(env.BATCH, 56, 56, 64, 128, 3, 3, 1, 1, 2, 2)),
("resnet-18.C4", Workload(env.BATCH, 56, 56, 64, 128, 1, 1, 0, 0, 2, 2)),
("resnet-18.C5", Workload(env.BATCH, 28, 28, 128, 128, 3, 3, 1, 1, 1, 1)),
("resnet-18.C6", Workload(env.BATCH, 28, 28, 128, 256, 3, 3, 1, 1, 2, 2)),
("resnet-18.C7", Workload(env.BATCH, 28, 28, 128, 256, 1, 1, 0, 0, 2, 2)),
("resnet-18.C8", Workload(env.BATCH, 14, 14, 256, 256, 3, 3, 1, 1, 1, 1)),
("resnet-18.C9", Workload(env.BATCH, 14, 14, 256, 512, 3, 3, 1, 1, 2, 2)),
("resnet-18.C10", Workload(env.BATCH, 14, 14, 256, 512, 1, 1, 0, 0, 2, 2)),
("resnet-18.C11", Workload(env.BATCH, 7, 7, 512, 512, 3, 3, 1, 1, 1, 1)),
]
# FIXME: we need a custom clip operator to circumvent a pattern detection limitation
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
def run_conv2d(env, remote, wl, target, check_correctness=True, print_ir=False, samples=4):
# Workload assertions
assert wl.hpad == wl.wpad
# Perform packing only if we are targeting the accelerator
if "arm_cpu" in target.keys:
data_pack = False
layout = "NCHW"
conv2d_fcompute = topi.arm_cpu.conv2d_nchw_spatial_pack
conv2d_fschedule = topi.arm_cpu.schedule_conv2d_nchw_spatial_pack
elif "vta" in target.keys:
data_pack = True
layout = "NCHW%dn%dc" % (env.BATCH, env.BLOCK_IN)
conv2d_fcompute = vta.top.conv2d_packed
conv2d_fschedule = vta.top.schedule_conv2d_packed
# Derive shapes depending upon packing
a_shape = (wl.batch, wl.in_filter, wl.height, wl.width)
w_shape = (wl.out_filter, wl.in_filter, wl.hkernel, wl.wkernel)
b_shape = (wl.batch, wl.out_filter, 1, 1)
if data_pack:
data_shape = (
wl.batch // env.BATCH,
wl.in_filter // env.BLOCK_IN,
wl.height,
wl.width,
env.BATCH,
env.BLOCK_IN,
)
kernel_shape = (
wl.out_filter // env.BLOCK_OUT,
wl.in_filter // env.BLOCK_IN,
wl.hkernel,
wl.wkernel,
env.BLOCK_OUT,
env.BLOCK_IN,
)
bias_shape = (
wl.batch // env.BATCH,
wl.out_filter // env.BLOCK_OUT,
1,
1,
env.BATCH,
env.BLOCK_OUT,
)
else:
data_shape = a_shape
kernel_shape = w_shape
bias_shape = b_shape
data = te.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
bias = te.placeholder(bias_shape, name="bias", dtype=env.acc_dtype)
padding = relay.nn.get_pad_tuple2d((wl.hpad, wl.wpad))
# Define base computation schedule
with target:
if data_pack:
res = conv2d_fcompute(
data, kernel, (wl.hstride, wl.wstride), padding, (1, 1), layout, env.acc_dtype
)
else:
res = conv2d_fcompute(
data, kernel, (wl.hstride, wl.wstride), padding, (1, 1), env.acc_dtype
)
res = topi.right_shift(res, 8)
res = topi.add(res, bias)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
# Derive base schedule
s = conv2d_fschedule([res])
if print_ir:
print(vta.lower(s, [data, kernel, bias, res], simple_mode=True))
# Derive number of ops
fout_height = (wl.height + 2 * wl.hpad - wl.hkernel) // wl.hstride + 1
fout_width = (wl.width + 2 * wl.wpad - wl.wkernel) // wl.wstride + 1
num_ops = (
2
* wl.batch
* fout_height
* fout_width
* wl.hkernel
* wl.wkernel
* wl.out_filter
* wl.in_filter
)
# @memoize("vta.tests.test_benchmark_topi.conv2d.verify_nhwc")
def get_ref_data():
# derive min max for act, wgt, and bias types (max non inclusive)
a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))
w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
b_min, b_max = 0 - 1 << (env.INP_WIDTH + env.WGT_WIDTH - 2), 1 << (
env.INP_WIDTH + env.WGT_WIDTH - 2
)
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)
b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype)
r_np = tvm.topi.testing.conv2d_nchw_python(
a_np.astype(env.acc_dtype),
w_np.astype(env.acc_dtype),
(wl.hstride, wl.wstride),
wl.hpad,
).astype(env.acc_dtype)
return a_np, w_np, b_np, r_np
# Data in original format
data_np, kernel_np, bias_np, res_ref = get_ref_data()
if data_pack:
data_np = data_np.reshape(
wl.batch // env.BATCH,
env.BATCH,
wl.in_filter // env.BLOCK_IN,
env.BLOCK_IN,
wl.height,
wl.width,
).transpose((0, 2, 4, 5, 1, 3))
kernel_np = kernel_np.reshape(
wl.out_filter // env.BLOCK_OUT,
env.BLOCK_OUT,
wl.in_filter // env.BLOCK_IN,
env.BLOCK_IN,
wl.hkernel,
wl.wkernel,
).transpose((0, 2, 4, 5, 1, 3))
bias_np = bias_np.reshape(
wl.batch // env.BATCH, wl.out_filter // env.BLOCK_OUT, 1, 1, env.BATCH, env.BLOCK_OUT
)
# Build
if "vta" in target.keys:
with vta.build_config(disabled_pass={"tir.CommonSubexprElimTIR"}):
mod = vta.build(
s,
[data, kernel, bias, res],
target=tvm.target.Target(target, host=env.target_host),
name="conv2d",
)
else:
mod = tvm.build(
s,
[data, kernel, bias, res],
target=tvm.target.Target(target, host=env.target_host),
name="conv2d",
)
temp = utils.tempdir()
mod.save(temp.relpath("conv2d.o"))
remote.upload(temp.relpath("conv2d.o"))
f = remote.load_module("conv2d.o")
dev = remote.device(str(target))
res_np = np.zeros(topi.utils.get_const_tuple(res.shape)).astype(res.dtype)
data_arr = tvm.nd.array(data_np, dev)
kernel_arr = tvm.nd.array(kernel_np, dev)
bias_arr = tvm.nd.array(bias_np, dev)
res_arr = tvm.nd.array(res_np, dev)
time_f = f.time_evaluator("conv2d", dev, number=samples)
# In vta sim mode, collect simulator runtime statistics
stats = {}
cost = None
if env.TARGET in ["sim", "tsim"]:
# Check if we're in local RPC mode (allows us to rebuild the
# runtime on the fly when varying the VTA designs)
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
if env.TARGET == "sim":
remote.get_function("vta.simulator.profiler_clear")()
else:
remote.get_function("vta.tsim.profiler_clear")()
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
if env.TARGET == "sim":
stats = json.loads(remote.get_function("vta.simulator.profiler_status")())
else:
stats = json.loads(remote.get_function("vta.tsim.profiler_status")())
else:
simulator.clear_stats()
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
stats = simulator.stats()
else:
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
# Check correctness
correct = False
if check_correctness:
res_orig = res_arr.numpy()
if data_pack:
res_orig = res_orig.transpose((0, 4, 1, 5, 2, 3)).reshape(
wl.batch, wl.out_filter, fout_height, fout_width
)
bias_np = bias_np.transpose((0, 4, 1, 5, 2, 3)).reshape(wl.batch, wl.out_filter, 1, 1)
res_ref = res_ref >> env.WGT_WIDTH
res_ref += bias_np
res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)
res_ref = res_ref.astype(env.out_dtype)
correct = np.allclose(res_orig, res_ref)
gops = (num_ops / cost.mean) / float(10**9)
status = "PASSED" if correct else "FAILED"
if "arm_cpu" in target.keys:
device = "CPU"
elif "vta" in target.keys:
device = "VTA"
print("%s CONV2D TEST %s: Time cost = %g sec/op, %g GOPS" % (device, status, cost.mean, gops))
return correct, cost, stats
@pytest.mark.parametrize("device", ["vta", "arm_cpu"])
def test_conv2d(device):
def _run(env, remote):
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim", "intelfocl"]:
assert tvm.runtime.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
target = env.target_vta_cpu
with autotvm.tophub.context(target): # load pre-tuned schedule parameters
for _, wl in resnet_wkls:
print(wl)
run_conv2d(env, remote, wl, target)
vta.testing.run(_run)
if __name__ == "__main__":
test_conv2d(device="arm_cpu")
test_conv2d(device="vta")
| 11,482 | 34.996865 | 98 | py |
tvm | tvm-main/vta/tests/python/pynq/test_program_rpc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import tvm
from tvm import te
from tvm import rpc
from vta import get_bitstream_path, download_bitstream, program_fpga, reconfig_runtime
host = os.environ.get("VTA_RPC_HOST", "pynq")
port = int(os.environ.get("VTA_RPC_PORT", "9091"))
def program_rpc_bitstream(path=None):
"""Program the FPGA on the RPC server
Parameters
----------
path : path to bitstream (optional)
"""
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
program_fpga(remote, path)
def reconfig_rpc_runtime():
"""Reconfig the RPC server runtime"""
assert tvm.runtime.enabled("rpc")
remote = rpc.connect(host, port)
reconfig_runtime(remote)
program_rpc_bitstream()
reconfig_rpc_runtime()
| 1,522 | 30.729167 | 86 | py |
tvm | tvm-main/docs/legacy_redirect.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from string import Template
import json
import os
legacy_redirects = [
["dev/benchmark.html", "../arch/benchmark.html"],
["dev/convert_layout.html", "../arch/convert_layout.html"],
["dev/debugger.html", "../arch/debugger.html"],
["dev/device_target_interactions.html", "../arch/device_target_interactions.html"],
["dev/frontend/tensorflow.html", "../../arch/frontend/tensorflow.html"],
["dev/hybrid_script.html", "../arch/hybrid_script.html"],
["dev/index.html", "../arch/index.html"],
["dev/inferbound.html", "../arch/inferbound.html"],
[
"dev/introduction_to_module_serialization.html",
"../arch/introduction_to_module_serialization.html",
],
["dev/microtvm_design.html", "../arch/microtvm_design.html"],
["dev/model_library_format.html", "../arch/model_library_format.html"],
["dev/pass_infra.html", "../arch/pass_infra.html"],
["dev/relay_intro.html", "../arch/relay_intro.html"],
["dev/relay_op_strategy.html", "../arch/relay_op_strategy.html"],
["dev/runtime.html", "../arch/runtime.html"],
["dev/runtimes/vulkan.html", "../../arch/runtimes/vulkan.html"],
["dev/security.html", "../arch/security.html"],
["dev/virtual_machine.html", "../arch/virtual_machine.html"],
["dev/how_to.html", "index.html"],
["dev/pytest_target_parametrization.html", "how_to/pytest_target_parametrization.html"],
["dev/relay_add_op.html", "how_to/relay_add_op.html"],
["dev/relay_add_pass.html", "how_to/relay_add_pass.html"],
["dev/relay_bring_your_own_codegen.html", "how_to/relay_bring_your_own_codegen.html"],
["dev/codebase_walkthrough.html", "tutorial/codebase_walkthrough.html"],
["deploy/android.html", "../how_to/deploy/android.html"],
["deploy/arm_compute_lib.html", "../how_to/deploy/arm_compute_lib.html"],
["deploy/bnns.html", "../how_to/deploy/bnns.html"],
["deploy/cpp_deploy.html", "../how_to/deploy/cpp_deploy.html"],
["deploy/hls.html", "../how_to/deploy/hls.html"],
["deploy/index.html", "../how_to/deploy/index.html"],
["deploy/integrate.html", "../how_to/deploy/integrate.html"],
["deploy/tensorrt.html", "../how_to/deploy/tensorrt.html"],
["deploy/vitis_ai.html", "../how_to/deploy/vitis_ai.html"],
["profiling/index.html", "../how_to/profile/index.html"],
["profiling/papi.html", "../how_to/profile/papi.html"],
["api/links.html", "../reference/api/links.html"],
["api/python/auto_scheduler.html", "../../reference/api/python/auto_scheduler.html"],
["api/python/autotvm.html", "../../reference/api/python/autotvm.html"],
["api/python/contrib.html", "../../reference/api/python/contrib.html"],
["api/python/driver.html", "../../reference/api/python/driver.html"],
["api/python/error.html", "../../reference/api/python/error.html"],
["api/python/graph_executor.html", "../../reference/api/python/graph_executor.html"],
["api/python/index.html", "../../reference/api/python/index.html"],
["api/python/ir.html", "../../reference/api/python/ir.html"],
["api/python/micro.html", "../../reference/api/python/micro.html"],
["api/python/ndarray.html", "../../reference/api/python/ndarray.html"],
["api/python/relay/analysis.html", "../../../reference/api/python/relay/analysis.html"],
["api/python/relay/backend.html", "../../../reference/api/python/relay/backend.html"],
[
"api/python/relay/dataflow_pattern.html",
"../../../reference/api/python/relay/dataflow_pattern.html",
],
["api/python/relay/frontend.html", "../../../reference/api/python/relay/frontend.html"],
["api/python/relay/image.html", "../../../reference/api/python/relay/image.html"],
["api/python/relay/index.html", "../../../reference/api/python/relay/index.html"],
["api/python/relay/nn.html", "../../../reference/api/python/relay/nn.html"],
["api/python/relay/testing.html", "../../../reference/api/python/relay/testing.html"],
["api/python/relay/transform.html", "../../../reference/api/python/relay/transform.html"],
["api/python/relay/vision.html", "../../../reference/api/python/relay/vision.html"],
["api/python/rpc.html", "../../reference/api/python/rpc.html"],
["api/python/runtime.html", "../../reference/api/python/runtime.html"],
["api/python/target.html", "../../reference/api/python/target.html"],
["api/python/te.html", "../../reference/api/python/te.html"],
["api/python/tir.html", "../../reference/api/python/tir.html"],
["api/python/topi.html", "../../reference/api/python/topi.html"],
["api/python/vta/index.html", "../../../reference/api/python/vta/index.html"],
["langref/hybrid_script.html", "../reference/langref/hybrid_script.html"],
["langref/index.html", "../reference/langref/index.html"],
["langref/relay_adt.html", "../reference/langref/relay_adt.html"],
["langref/relay_expr.html", "../reference/langref/relay_expr.html"],
["langref/relay_op.html", "../reference/langref/relay_op.html"],
["langref/relay_pattern.html", "../reference/langref/relay_pattern.html"],
["langref/relay_type.html", "../reference/langref/relay_type.html"],
["microtvm/index.html", "../topic/microtvm/index.html"],
["vta/dev/config.html", "../../topic/vta/dev/config.html"],
["vta/dev/hardware.html", "../../topic/vta/dev/hardware.html"],
["vta/dev/index.html", "../../topic/vta/dev/index.html"],
["vta/index.html", "../topic/vta/index.html"],
["vta/install.html", "../topic/vta/install.html"],
["tutorials/index.html", "../tutorial/index.html"],
["tutorials/frontend/from_caffe2.html", "../../how_to/compile_models/from_caffe2.html"],
["tutorials/frontend/from_coreml.html", "../../how_to/compile_models/from_coreml.html"],
["tutorials/frontend/from_darknet.html", "../../how_to/compile_models/from_darknet.html"],
["tutorials/frontend/from_keras.html", "../../how_to/compile_models/from_keras.html"],
["tutorials/frontend/from_mxnet.html", "../../how_to/compile_models/from_mxnet.html"],
["tutorials/frontend/from_onnx.html", "../../how_to/compile_models/from_onnx.html"],
["tutorials/frontend/from_paddle.html", "../../how_to/compile_models/from_paddle.html"],
["tutorials/frontend/from_pytorch.html", "../../how_to/compile_models/from_pytorch.html"],
["tutorials/frontend/from_tensorflow.html", "../../how_to/compile_models/from_tensorflow.html"],
["tutorials/frontend/from_tflite.html", "../../how_to/compile_models/from_tflite.html"],
[
"tutorials/frontend/deploy_model_on_android.html",
"../../how_to/deploy_models/deploy_model_on_android.html",
],
[
"tutorials/frontend/deploy_model_on_rasp.html",
"../../how_to/deploy_models/deploy_model_on_rasp.html",
],
[
"tutorials/frontend/deploy_object_detection_pytorch.html",
"../../how_to/deploy_models/deploy_object_detection_pytorch.html",
],
[
"tutorials/frontend/deploy_prequantized.html",
"../../how_to/deploy_models/deploy_prequantized.html",
],
[
"tutorials/frontend/deploy_prequantized_tflite.html",
"../../how_to/deploy_models/deploy_prequantized_tflite.html",
],
[
"tutorials/frontend/deploy_quantized.html",
"../../how_to/deploy_models/deploy_quantized.html",
],
["tutorials/frontend/deploy_sparse.html", "../../how_to/deploy_models/deploy_sparse.html"],
[
"tutorials/dev/bring_your_own_datatypes.html",
"../../how_to/extend_tvm/bring_your_own_datatypes.html",
],
[
"tutorials/dev/low_level_custom_pass.html",
"../../how_to/extend_tvm/low_level_custom_pass.html",
],
["tutorials/dev/use_pass_infra.html", "../../how_to/extend_tvm/use_pass_infra.html"],
["tutorials/dev/use_pass_instrument.html", "../../how_to/extend_tvm/use_pass_instrument.html"],
["tutorials/optimize/opt_conv_cuda.html", "../../how_to/optimize_operators/opt_conv_cuda.html"],
[
"tutorials/optimize/opt_conv_tensorcore.html",
"../../how_to/optimize_operators/opt_conv_tensorcore.html",
],
["tutorials/optimize/opt_gemm.html", "../../how_to/optimize_operators/opt_gemm.html"],
[
"tutorials/auto_scheduler/tune_conv2d_layer_cuda.html",
"../../how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.html",
],
[
"tutorials/auto_scheduler/tune_network_arm.html",
"../../how_to/tune_with_autoscheduler/tune_network_arm.html",
],
[
"tutorials/auto_scheduler/tune_network_cuda.html",
"../../how_to/tune_with_autoscheduler/tune_network_cuda.html",
],
[
"tutorials/auto_scheduler/tune_network_mali.html",
"../../how_to/tune_with_autoscheduler/tune_network_mali.html",
],
[
"tutorials/auto_scheduler/tune_network_x86.html",
"../../how_to/tune_with_autoscheduler/tune_network_x86.html",
],
[
"tutorials/auto_scheduler/tune_sparse_x86.html",
"../../how_to/tune_with_autoscheduler/tune_sparse_x86.html",
],
[
"tutorials/autotvm/tune_conv2d_cuda.html",
"../../how_to/tune_with_autotvm/tune_conv2d_cuda.html",
],
["tutorials/autotvm/tune_relay_arm.html", "../../how_to/tune_with_autotvm/tune_relay_arm.html"],
[
"tutorials/autotvm/tune_relay_cuda.html",
"../../how_to/tune_with_autotvm/tune_relay_cuda.html",
],
[
"tutorials/autotvm/tune_relay_mobile_gpu.html",
"../../how_to/tune_with_autotvm/tune_relay_mobile_gpu.html",
],
["tutorials/autotvm/tune_relay_x86.html", "../../how_to/tune_with_autotvm/tune_relay_x86.html"],
["tutorials/micro/micro_autotune.html", "../../how_to/work_with_microtvm/micro_autotune.html"],
[
"tutorials/micro/micro_reference_vm.html",
"../../how_to/work_with_microtvm/micro_reference_vm.html",
],
["tutorials/micro/micro_tflite.html", "../../how_to/work_with_microtvm/micro_tflite.html"],
["tutorials/frontend/build_gcn.html", "../../how_to/work_with_relay/build_gcn.html"],
[
"tutorials/frontend/using_external_lib.html",
"../../how_to/work_with_relay/using_external_lib.html",
],
["tutorials/language/extern_op.html", "../../how_to/work_with_schedules/extern_op.html"],
["tutorials/language/intrin_math.html", "../../how_to/work_with_schedules/intrin_math.html"],
["tutorials/language/reduction.html", "../../how_to/work_with_schedules/reduction.html"],
["tutorials/language/scan.html", "../../how_to/work_with_schedules/scan.html"],
[
"tutorials/language/schedule_primitives.html",
"../../how_to/work_with_schedules/schedule_primitives.html",
],
["tutorials/language/tedd.html", "../../how_to/work_with_schedules/tedd.html"],
["tutorials/language/tensorize.html", "../../how_to/work_with_schedules/tensorize.html"],
["tutorials/language/tuple_inputs.html", "../../how_to/work_with_schedules/tuple_inputs.html"],
[
"tutorials/get_started/auto_scheduler_matmul_x86.html",
"../../tutorial/auto_scheduler_matmul_x86.html",
],
["tutorials/get_started/autotvm_matmul_x86.html", "../../tutorial/autotvm_matmul_x86.html"],
["tutorials/get_started/autotvm_relay_x86.html", "../../tutorial/autotvm_relay_x86.html"],
[
"tutorials/get_started/cross_compilation_and_rpc.html",
"../../tutorial/cross_compilation_and_rpc.html",
],
["tutorials/get_started/install.html", "../../tutorial/install.html"],
["tutorials/topi/intro_topi.html", "../../tutorial/intro_topi.html"],
["tutorials/get_started/introduction.html", "../../tutorial/introduction.html"],
["tutorials/get_started/relay_quick_start.html", "../../tutorial/relay_quick_start.html"],
[
"tutorials/get_started/tensor_expr_get_started.html",
"../../tutorial/tensor_expr_get_started.html",
],
[
"tutorials/get_started/tvmc_command_line_driver.html",
"../../tutorial/tvmc_command_line_driver.html",
],
[
"tutorials/get_started/tvmc_python.html",
"../../tutorial/tvmc_python.html",
],
]
redirect_template = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="refresh" content="1; url=$to" />
<script>
window.location.href = "$to"
</script>
</head>
</html>
"""
def build_legacy_redirect(tvm_path):
def legacy_redirect(app, docname): # Sphinx expects two arguments
if app.builder.name == "html":
src = Template(redirect_template)
for frm, to in legacy_redirects:
frm = tvm_path.resolve() / "docs" / "_build" / "html" / frm
redirect = src.substitute({"to": to})
os.makedirs(os.path.dirname(frm), exist_ok=True)
with open(frm, "w") as f:
f.write(redirect)
return legacy_redirect
| 13,700 | 49.00365 | 100 | py |
tvm | tvm-main/docs/script_convert.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import pathlib
BASH = "# bash"
BASH_IGNORE = "# bash-ignore"
BASH_MULTILINE_COMMENT = "# bash-comment"
def bash_to_python(src_path: pathlib.Path, dest_path: pathlib.Path):
"""Convert a bash script file to a Python format compatible with Sphinx doc."""
with open(src_path, "r") as src_f:
with open(dest_path, "w") as dest_f:
line = src_f.readline()
bash_block = []
bash_detected = False
bash_ignore_detected = False
new_line_required = False
bash_multiline_comment_detected = False
while line:
line = line.strip("\n").strip("\r")
if bash_detected:
if line == BASH:
# write the bash block to destination
if new_line_required:
dest_f.write("\n")
python_code = "# .. code-block:: bash\n#\n"
for bash_line in bash_block:
python_code += f"# \t {bash_line}\n"
python_code += "#"
dest_f.write(python_code)
bash_detected = False
bash_block = []
new_line_required = True
else:
# add new bash command line
bash_block.append(line)
elif bash_ignore_detected:
if line == BASH_IGNORE:
bash_ignore_detected = False
else:
new_line_required = False
pass
elif bash_multiline_comment_detected:
if line == BASH_MULTILINE_COMMENT:
bash_multiline_comment_detected = False
else:
if line != "#":
assert len(line) > 2, "Detected empty line."
dest_f.write(line[2:])
new_line_required = True
else:
if line == BASH:
bash_detected = True
elif line == BASH_IGNORE:
bash_ignore_detected = True
elif line == BASH_MULTILINE_COMMENT:
bash_multiline_comment_detected = True
else:
if new_line_required:
dest_f.write("\n")
dest_f.write(f"{line}")
new_line_required = True
line = src_f.readline()
if new_line_required:
dest_f.write("\n")
new_line_required = False
def main():
parser = argparse.ArgumentParser(description="Convert tutorial script to Python.")
parser.add_argument("script", type=str, help="Path to script file.")
args = parser.parse_args()
src_path = pathlib.Path(args.script)
dest_path = src_path.parent / f"{src_path.stem}.py"
bash_to_python(src_path, dest_path)
if __name__ == "__main__":
main()
| 3,961 | 38.227723 | 86 | py |
tvm | tvm-main/docs/conf.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 23 19:40:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from functools import partial
import gc
from importlib import import_module
import inspect
from hashlib import md5
import os
from pathlib import Path
import re
import sys
from textwrap import dedent, indent
from unittest.mock import patch
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curr_path = Path(__file__).expanduser().absolute().parent
if curr_path.name == "_staging":
# Can't use curr_path.parent, because sphinx_gallery requires a relative path.
tvm_path = Path(os.pardir, os.pardir)
else:
tvm_path = Path(os.pardir)
sys.path.insert(0, str(tvm_path.resolve() / "python"))
sys.path.insert(0, str(tvm_path.resolve() / "vta" / "python"))
sys.path.insert(0, str(tvm_path.resolve() / "docs"))
# -- General configuration ------------------------------------------------
# General information about the project.
project = "tvm"
author = "Apache Software Foundation"
copyright = "2020 - 2023, %s" % author
github_doc_root = "https://github.com/apache/tvm/tree/main/docs/"
os.environ["TVM_BUILD_DOC"] = "1"
def git_describe_version(original_version):
"""Get git describe version."""
ver_py = tvm_path.joinpath("version.py")
libver = {"__file__": ver_py}
exec(compile(open(ver_py, "rb").read(), ver_py, "exec"), libver, libver)
_, gd_version = libver["git_describe_version"]()
if gd_version != original_version:
print("Use git describe based version %s" % gd_version)
return gd_version
# Version information.
import tvm
from tvm import topi
from tvm import te
from tvm import testing
version = git_describe_version(tvm.__version__)
release = version
def monkey_patch(module_name, func_name):
"""Helper function for monkey-patching library functions.
Used to modify a few sphinx-gallery behaviors to make the "Open in Colab" button work correctly.
Should be called as a decorator with arguments. Note this behaves differently from unittest's
@mock.patch, as our monkey_patch decorator should be placed on the new version of the function.
"""
module = import_module(module_name)
original_func = getattr(module, func_name)
def decorator(function):
updated_func = partial(function, real_func=original_func)
setattr(module, func_name, updated_func)
return updated_func
return decorator
CURRENT_FILE_CONF = None
@monkey_patch("sphinx_gallery.py_source_parser", "split_code_and_text_blocks")
def split_code_and_text_blocks(source_file, return_node, real_func):
"""Monkey-patch split_code_and_text_blocks to expose sphinx-gallery's file-level config.
It's kinda gross, but we need access to file_conf to detect the requires_cuda flag.
"""
global CURRENT_FILE_CONF
file_conf, blocks, node = real_func(source_file, return_node)
CURRENT_FILE_CONF = file_conf
return (file_conf, blocks, node)
# This header replaces the default sphinx-gallery one in sphinx_gallery/gen_rst.py.
COLAB_HTML_HEADER = """
.. DO NOT EDIT. THIS FILE WAS AUTOMATICALLY GENERATED BY
.. TVM'S MONKEY-PATCHED VERSION OF SPHINX-GALLERY. TO MAKE
.. CHANGES, EDIT THE SOURCE PYTHON FILE:
.. "{python_file}"
.. only:: html
.. note::
:class: sphx-glr-download-link-note
This tutorial can be used interactively with Google Colab! You can also click
:ref:`here <sphx_glr_download_{ref_name}>` to run the Jupyter notebook locally.
.. image:: {button_svg}
:align: center
:target: {colab_url}
:width: 300px
.. rst-class:: sphx-glr-example-title
.. _sphx_glr_{ref_name}:
"""
# Google Colab allows opening .ipynb files on GitHub by appending a GitHub path to this base URL.
COLAB_URL_BASE = "https://colab.research.google.com/github"
# The GitHub path where the site is automatically deployed by tvm-bot.
IPYTHON_GITHUB_BASE = "apache/tvm-site/blob/asf-site/docs/_downloads/"
# The SVG image of the "Open in Colab" button.
BUTTON = (
"https://raw.githubusercontent.com/tlc-pack/web-data/main/images/utilities/colab_button.svg"
)
@monkey_patch("sphinx_gallery.gen_rst", "save_rst_example")
def save_rst_example(example_rst, example_file, time_elapsed, memory_used, gallery_conf, real_func):
"""Monkey-patch save_rst_example to include the "Open in Colab" button."""
# The url is the md5 hash of the notebook path.
example_fname = os.path.relpath(example_file, gallery_conf["src_dir"])
ref_fname = example_fname.replace(os.path.sep, "_")
notebook_path = example_fname[:-2] + "ipynb"
digest = md5(notebook_path.encode()).hexdigest()
# Fixed documentation versions must link to different (earlier) .ipynb notebooks.
colab_url = f"{COLAB_URL_BASE}/{IPYTHON_GITHUB_BASE}"
if "dev" not in version:
colab_url += version + "/"
colab_url += digest + "/" + os.path.basename(notebook_path)
new_header = COLAB_HTML_HEADER.format(
python_file=example_fname, ref_name=ref_fname, colab_url=colab_url, button_svg=BUTTON
)
with patch("sphinx_gallery.gen_rst.EXAMPLE_HEADER", new_header):
real_func(example_rst, example_file, time_elapsed, memory_used, gallery_conf)
INCLUDE_DIRECTIVE_RE = re.compile(r"^([ \t]*)\.\. include::\s*(.+)\n", flags=re.M)
COMMENT_DIRECTIVE_RE = re.compile(r"^\.\.(?: .*)?\n(?:(?: .*)?\n)*", flags=re.M)
ADMONITION_DIRECTIVE_RE = re.compile(rf"^\.\. admonition:: *(.*)\n((?:(?: .*)?\n)*)\n", flags=re.M)
@monkey_patch("sphinx_gallery.notebook", "rst2md")
def rst2md(text, gallery_conf, target_dir, heading_levels, real_func):
"""Monkey-patch rst2md to support comments and some include directives.
Currently, only include directives without any parameters are supported. Also, note that in
reStructuredText any unrecognized explicit markup block is treated as a comment (see
https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#comments).
For callouts, we only replace generic "admonition" directives. All others should be replaced by
sphinx-gallery's rst2md. Note that the "alert" and "alert-info" tags are support in most IPython
notebooks, but they render kinda funky on Colab.
"""
def load_include(match):
full_path = os.path.join(target_dir, match.group(2))
with open(full_path) as f:
lines = f.read()
indented = indent(lines, match.group(1)) + "\n"
return indented
text = re.sub(INCLUDE_DIRECTIVE_RE, load_include, text)
# Replace generic, titled admonitions with indented text. Other admonitions (e.g. .. note::)
# will be handled by sphinx-gallery's
def rewrite_generic_admonition(match):
title, text = match.groups()
stripped_text = dedent(text).strip()
return f'<div class="alert alert-info"><h4>{title}</h4><p>{stripped_text}</p></div>'
text = re.sub(ADMONITION_DIRECTIVE_RE, rewrite_generic_admonition, text)
# Call the real function, and then strip any remaining directives (i.e. comments)
text = real_func(text, gallery_conf, target_dir, heading_levels)
text = re.sub(COMMENT_DIRECTIVE_RE, "", text)
return text
def install_request_hook(gallery_conf, fname):
testing.utils.install_request_hook(depth=3)
INSTALL_TVM_DEV = f"""\
%%shell
# Installs the latest dev build of TVM from PyPI. If you wish to build
# from source, see https://tvm.apache.org/docs/install/from_source.html
pip install apache-tvm --pre"""
INSTALL_TVM_FIXED = f"""\
%%shell
# Installs TVM version {version} from PyPI. If you wish to build
# from source, see https://tvm.apache.org/docs/install/from_source.html
pip install apache-tvm=={version}"""
INSTALL_TVM_CUDA_DEV = f"""\
%%shell
# Installs the latest dev build of TVM from PyPI, with CUDA enabled. To use this,
# you must request a Google Colab instance with a GPU by going to Runtime ->
# Change runtime type -> Hardware accelerator -> GPU. If you wish to build from
# source, see see https://tvm.apache.org/docs/install/from_source.html
pip install tlcpack-nightly-cu113 --pre -f https://tlcpack.ai/wheels"""
INSTALL_TVM_CUDA_FIXED = f"""\
%%shell
# Installs TVM version {version} from PyPI, with CUDA enabled. To use this,
# you must request a Google Colab instance with a GPU by going to Runtime ->
# Change runtime type -> Hardware accelerator -> GPU. If you wish to build from
# source, see see https://tvm.apache.org/docs/install/from_source.html
pip install apache-tvm-cu113=={version} -f https://tlcpack.ai/wheels"""
@monkey_patch("sphinx_gallery.gen_rst", "jupyter_notebook")
def jupyter_notebook(script_blocks, gallery_conf, target_dir, real_func):
"""Monkey-patch sphinx-gallery to add a TVM import block to each IPython notebook.
If we had only one import block, we could skip the patching and just set first_notebook_cell.
However, how we import TVM depends on if we are using a fixed or dev version, and whether we
will use the GPU.
Tutorials requiring a CUDA-enabled build of TVM should use the flag:
# sphinx_gallery_requires_cuda = True
"""
requires_cuda = CURRENT_FILE_CONF.get("requires_cuda", False)
fixed_version = not "dev" in version
if fixed_version and requires_cuda:
install_block = INSTALL_TVM_CUDA_FIXED
elif fixed_version and not requires_cuda:
install_block = INSTALL_TVM_FIXED
elif not fixed_version and requires_cuda:
install_block = INSTALL_TVM_CUDA_DEV
else:
install_block = INSTALL_TVM_DEV
new_conf = {**gallery_conf, "first_notebook_cell": install_block}
return real_func(script_blocks, new_conf, target_dir)
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx_gallery.gen_gallery",
"autodocsumm",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# generate autosummary even if no references
autosummary_generate = True
# The main toctree document.
main_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "_staging"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme is set by the make target
html_theme = os.environ.get("TVM_THEME", "rtd")
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# only import rtd theme and set it if want to build docs locally
if not on_rtd and html_theme == "rtd":
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"analytics_id": "UA-75982049-2",
"logo_only": True,
}
html_logo = "_static/img/tvm-logo-small.png"
html_favicon = "_static/img/tvm-logo-square.png"
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(main_doc, "%s.tex" % project, project, author, "manual"),
]
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
# "numpy": ("https://numpy.org/doc/stable", None),
# "scipy": ("https://docs.scipy.org/doc/scipy", None),
# "matplotlib": ("https://matplotlib.org/", None),
}
from sphinx_gallery.sorting import ExplicitOrder
examples_dirs = [
tvm_path.joinpath("gallery", "tutorial"),
tvm_path.joinpath("gallery", "how_to", "compile_models"),
tvm_path.joinpath("gallery", "how_to", "deploy_models"),
tvm_path.joinpath("gallery", "how_to", "work_with_relay"),
tvm_path.joinpath("gallery", "how_to", "work_with_schedules"),
tvm_path.joinpath("gallery", "how_to", "optimize_operators"),
tvm_path.joinpath("gallery", "how_to", "tune_with_autotvm"),
tvm_path.joinpath("gallery", "how_to", "tune_with_autoscheduler"),
tvm_path.joinpath("gallery", "how_to", "work_with_microtvm"),
tvm_path.joinpath("gallery", "how_to", "extend_tvm"),
tvm_path.joinpath("vta", "tutorials"),
]
gallery_dirs = [
"tutorial",
"how_to/compile_models",
"how_to/deploy_models",
"how_to/work_with_relay",
"how_to/work_with_schedules",
"how_to/optimize_operators",
"how_to/tune_with_autotvm",
"how_to/tune_with_autoscheduler",
"how_to/work_with_microtvm",
"how_to/extend_tvm",
"topic/vta/tutorials",
]
subsection_order = ExplicitOrder(
str(p)
for p in [
tvm_path / "vta" / "tutorials" / "frontend",
tvm_path / "vta" / "tutorials" / "optimize",
tvm_path / "vta" / "tutorials" / "autotvm",
]
)
# Explicitly define the order within a subsection.
# The listed files are sorted according to the list.
# The unlisted files are sorted by filenames.
# The unlisted files always appear after listed files.
within_subsection_order = {
"tutorial": [
"introduction.py",
"install.py",
"tvmc_command_line_driver.py",
"tvmc_python.py",
"autotvm_relay_x86.py",
"tensor_expr_get_started.py",
"autotvm_matmul_x86.py",
"auto_scheduler_matmul_x86.py",
"tensor_ir_blitz_course.py",
"topi.pi",
"cross_compilation_and_rpc.py",
"relay_quick_start.py",
"uma.py",
],
"compile_models": [
"from_pytorch.py",
"from_tensorflow.py",
"from_mxnet.py",
"from_onnx.py",
"from_keras.py",
"from_tflite.py",
"from_coreml.py",
"from_darknet.py",
"from_caffe2.py",
"from_paddle.py",
],
"work_with_schedules": [
"schedule_primitives.py",
"reduction.py",
"intrin_math.py",
"scan.py",
"extern_op.py",
"tensorize.py",
"tuple_inputs.py",
"tedd.py",
],
"optimize_operators": [
"opt_gemm.py",
"opt_conv_cuda.py",
"opt_conv_tensorcore.py",
],
"tune_with_autotvm": [
"tune_conv2d_cuda.py",
"tune_relay_cuda.py",
"tune_relay_x86.py",
"tune_relay_arm.py",
"tune_relay_mobile_gpu.py",
],
"tune_with_autoscheduler": [
"tune_matmul_x86.py",
"tune_conv2d_layer_cuda.py",
"tune_network_x86.py",
"tune_network_cuda.py",
],
"extend_tvm": [
"low_level_custom_pass.py",
"use_pass_infra.py",
"use_pass_instrument.py",
"bring_your_own_datatypes.py",
],
"work_with_microtvm": [
"micro_tvmc.py",
"micro_tflite.py",
"micro_aot.py",
"micro_pytorch.py",
"micro_train.py",
"micro_autotune.py",
"micro_ethosu.py",
"micro_mlperftiny.py",
],
}
class WithinSubsectionOrder:
def __init__(self, src_dir):
self.src_dir = src_dir.split("/")[-1]
def __call__(self, filename):
# If the order is provided, use the provided order
if (
self.src_dir in within_subsection_order
and filename in within_subsection_order[self.src_dir]
):
index = within_subsection_order[self.src_dir].index(filename)
assert index < 1e10
return "\0%010d" % index
# Otherwise, sort by filename
return filename
# When running the tutorials on GPUs we are dependent on the Python garbage collector
# collecting TVM packed function closures for any device memory to also be released. This
# is not a good setup for machines with lots of CPU ram but constrained GPU ram, so force
# a gc after each example.
def force_gc(gallery_conf, fname):
gc.collect()
# Skips certain files to avoid dependency issues
filename_pattern_default = "^(?!.*micro_mlperftiny.py).*$"
sphinx_gallery_conf = {
"backreferences_dir": "gen_modules/backreferences",
"doc_module": ("tvm", "numpy"),
"reference_url": {
"tvm": None,
# "matplotlib": "https://matplotlib.org/",
# "numpy": "https://numpy.org/doc/stable",
},
"examples_dirs": examples_dirs,
"within_subsection_order": WithinSubsectionOrder,
"gallery_dirs": gallery_dirs,
"subsection_order": subsection_order,
"filename_pattern": os.environ.get("TVM_TUTORIAL_EXEC_PATTERN", filename_pattern_default),
"download_all_examples": False,
"min_reported_time": 60,
"expected_failing_examples": [],
"reset_modules": ("matplotlib", "seaborn", force_gc),
"promote_jupyter_magic": True,
"reset_modules": (install_request_hook),
}
autodoc_default_options = {
"member-order": "bysource",
}
# Maps the original namespace to list of potential modules
# that we can import alias from.
tvm_alias_check_map = {
"tvm.te": ["tvm.tir"],
"tvm.tir": ["tvm.ir", "tvm.runtime"],
"tvm.relay": ["tvm.ir", "tvm.tir"],
}
## Setup header and other configs
import tlcpack_sphinx_addon
footer_copyright = "© 2023 Apache Software Foundation | All rights reserved"
footer_note = " ".join(
"""
Copyright © 2023 The Apache Software Foundation. Apache TVM, Apache, the Apache feather,
and the Apache TVM project logo are either trademarks or registered trademarks of
the Apache Software Foundation.""".split(
"\n"
)
).strip()
header_logo = "https://tvm.apache.org/assets/images/logo.svg"
header_logo_link = "https://tvm.apache.org/"
header_links = [
("Community", "https://tvm.apache.org/community"),
("Download", "https://tvm.apache.org/download"),
("VTA", "https://tvm.apache.org/vta"),
("Blog", "https://tvm.apache.org/blog"),
("Docs", "https://tvm.apache.org/docs"),
("Conference", "https://tvmconf.org"),
("Github", "https://github.com/apache/tvm/"),
]
header_dropdown = {
"name": "ASF",
"items": [
("Apache Homepage", "https://apache.org/"),
("License", "https://www.apache.org/licenses/"),
("Sponsorship", "https://www.apache.org/foundation/sponsorship.html"),
("Security", "https://www.apache.org/security/"),
("Thanks", "https://www.apache.org/foundation/thanks.html"),
("Events", "https://www.apache.org/events/current-event"),
],
}
def fixup_tutorials(original_url: str) -> str:
if "docs/tutorial" in original_url:
# tutorials true source is in Python or .txt files, but Sphinx only sees
# the generated .rst files so this maps them back to the source
if original_url.endswith("index.rst"):
# for index pages, go to the README files
return re.sub(
r"docs/tutorial/(.*)index\.rst", "gallery/tutorial/\\1README.txt", original_url
)
else:
# otherwise for tutorials, redirect to python files
return re.sub(r"docs/tutorial/(.*)\.rst", "gallery/tutorial/\\1.py", original_url)
else:
# do nothing for normal non-tutorial .rst files
return original_url
html_context = {
"footer_copyright": footer_copyright,
"footer_note": footer_note,
"header_links": header_links,
"header_dropdown": header_dropdown,
"header_logo": header_logo,
"header_logo_link": header_logo_link,
"version_prefixes": ["main", "v0.8.0/", "v0.9.0/", "v0.10.0/", "v0.11.0/", "v0.12.0/"],
"display_github": True,
"github_user": "apache",
"github_repo": "tvm",
"github_version": "main/docs/",
"theme_vcs_pageview_mode": "edit",
"edit_link_hook_fn": fixup_tutorials,
}
# add additional overrides
templates_path += [tlcpack_sphinx_addon.get_templates_path()]
html_static_path += [tlcpack_sphinx_addon.get_static_path()]
def update_alias_docstring(name, obj, lines):
"""Update the docstring of alias functions.
This function checks if the obj is an alias of another documented object
in a different module.
If it is an alias, then it will append the alias information to the docstring.
Parameters
----------
name : str
The full name of the object in the doc.
obj : object
The original object.
lines : list
The docstring lines, need to be modified inplace.
"""
arr = name.rsplit(".", 1)
if len(arr) != 2:
return
target_mod, target_name = arr
if target_mod not in tvm_alias_check_map:
return
if not hasattr(obj, "__module__"):
return
obj_mod = obj.__module__
for amod in tvm_alias_check_map[target_mod]:
if not obj_mod.startswith(amod):
continue
if hasattr(sys.modules[amod], target_name):
obj_type = ":py:func" if callable(obj) else ":py:class"
lines.append(".. rubric:: Alias of %s:`%s.%s`" % (obj_type, amod, target_name))
def process_docstring(app, what, name, obj, options, lines):
"""Sphinx callback to process docstring"""
if callable(obj) or inspect.isclass(obj):
update_alias_docstring(name, obj, lines)
from legacy_redirect import build_legacy_redirect
def strip_ipython_magic(app, docname, source):
"""Prevents IPython magic commands from being rendered in HTML files.
TODO rework this function to remove IPython magic commands from include directives too.
"""
for i in range(len(source)):
source[i] = re.sub(r"%%.*\n\s*", "", source[i])
def setup(app):
app.connect("source-read", strip_ipython_magic)
app.connect("autodoc-process-docstring", process_docstring)
app.connect("build-finished", build_legacy_redirect(tvm_path))
| 24,952 | 33.323246 | 100 | py |
tvm | tvm-main/rust/tvm/examples/resnet/src/build_resnet.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import csv
import logging
from os import path as osp
import sys
import shutil
import numpy as np
import tvm
from tvm import te
from tvm import relay, runtime
from tvm.relay import testing
from tvm.contrib import graph_executor, cc
from PIL import Image
from tvm.contrib.download import download_testdata
from mxnet.gluon.model_zoo.vision import get_model
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description="Resnet build example")
aa = parser.add_argument
aa("--build-dir", type=str, required=True, help="directory to put the build artifacts")
aa("--batch-size", type=int, default=1, help="input image batch size")
aa(
"--opt-level",
type=int,
default=3,
help="level of optimization. 0 is unoptimized and 3 is the highest level",
)
aa("--target", type=str, default="llvm", help="target for compilation")
aa("--image-shape", type=str, default="3,224,224", help="input image dimensions")
aa("--image-name", type=str, default="cat.png", help="name of input image to download")
args = parser.parse_args()
build_dir = args.build_dir
batch_size = args.batch_size
opt_level = args.opt_level
target = tvm.target.create(args.target)
image_shape = tuple(map(int, args.image_shape.split(",")))
data_shape = (batch_size,) + image_shape
def build(target_dir):
"""Compiles resnet18 with TVM"""
# Download the pretrained model in MxNet's format.
block = get_model("resnet18_v1", pretrained=True)
shape_dict = {"data": (1, 3, 224, 224)}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
# Add softmax to do classification in last layer.
func = mod["main"]
func = relay.Function(
func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs
)
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(func, target, params=params)
# save the model artifacts
deploy_lib = osp.join(target_dir, "deploy_lib.o")
lib.save(deploy_lib)
cc.create_shared(osp.join(target_dir, "deploy_lib.so"), [osp.join(target_dir, "deploy_lib.o")])
with open(osp.join(target_dir, "deploy_graph.json"), "w") as fo:
fo.write(graph)
with open(osp.join(target_dir, "deploy_param.params"), "wb") as fo:
fo.write(runtime.save_param_dict(params))
def download_img_labels():
"""Download an image and imagenet1k class labels for test"""
from mxnet.gluon.utils import download
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "synset.txt"
synset_path = download_testdata(synset_url, synset_name + ".raw", module="data", overwrite=True)
with open(synset_path) as fin:
data = fin.read()
synset = eval(data)
with open(synset_name, "w") as f:
for key in synset:
f.write(synset[key])
f.write("\n")
print(synset_path)
print(synset_name)
return synset
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
def get_cat_image():
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
shutil.copyfile(img_path, "cat.png")
img = Image.open(img_path).resize((224, 224))
return transform_image(img)
def test_build(build_dir):
"""Sanity check with the cat image we download."""
graph = open(osp.join(build_dir, "deploy_graph.json")).read()
lib = tvm.runtime.load_module(osp.join(build_dir, "deploy_lib.so"))
params = bytearray(open(osp.join(build_dir, "deploy_param.params"), "rb").read())
input_data = get_cat_image()
dev = tvm.cpu()
module = graph_executor.create(graph, lib, dev)
module.load_params(params)
module.run(data=input_data)
out = module.get_output(0).numpy()
top1 = np.argmax(out[0])
synset = download_img_labels()
print("TVM prediction top-1:", top1, synset[top1])
if __name__ == "__main__":
logger.info("Compiling the model to graph executor.")
build(build_dir)
logger.info("Testing the model's predication on test data.")
test_build(build_dir)
| 5,428 | 32.720497 | 100 | py |
tvm | tvm-main/rust/tvm/tests/basics/src/tvm_add.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os.path as osp
import sys
import tvm
from tvm import te
from tvm.contrib import cc
def main(target, out_dir):
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda i: A[i] + B[i], name="C")
s = te.create_schedule(C.op)
if target == "cuda":
bx, tx = s[C].split(C.op.axis[0], factor=64)
s[C].bind(bx, te.thread_axis("blockIdx.x"))
s[C].bind(tx, te.thread_axis("threadIdx.x"))
fadd = tvm.build(s, [A, B, C], tvm.target.Target(target, host="llvm"), name="myadd")
fadd.save(osp.join(out_dir, "test_add.o"))
if target == "cuda":
fadd.imported_modules[0].save(osp.join(out_dir, "test_add.ptx"))
cc.create_shared(osp.join(out_dir, "test_add.so"), [osp.join(out_dir, "test_add.o")])
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
| 1,694 | 34.3125 | 89 | py |
tvm | tvm-main/rust/tvm-graph-rt/tests/build_model.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Builds a simple graph for testing."""
from os import path as osp
import numpy as np
import tvm
from tvm import te
from tvm import relay, runtime
from tvm.relay import testing
CWD = osp.dirname(osp.abspath(osp.expanduser(__file__)))
def _get_model(dshape):
data = relay.var("data", shape=dshape)
fc = relay.nn.dense(data, relay.var("dense_weight"), units=dshape[-1] * 2)
fc = relay.nn.bias_add(fc, relay.var("dense_bias"))
left, right = relay.split(fc, indices_or_sections=2, axis=1)
one = relay.const(1, dtype="float32")
return relay.Tuple([(left + one), (right - one), fc])
def main():
dshape = (32, 16)
net = _get_model(dshape)
mod, params = testing.create_workload(net)
graph, lib, params = relay.build(mod, "llvm", params=params)
with open(osp.join(CWD, "graph.json"), "w") as f_resnet:
f_resnet.write(graph)
with open(osp.join(CWD, "graph.params"), "wb") as f_params:
f_params.write(runtime.save_param_dict(params))
if __name__ == "__main__":
main()
| 1,845 | 32.563636 | 78 | py |
tvm | tvm-main/rust/tvm-graph-rt/tests/test_tvm_basic/src/build_test_lib.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Prepares a simple TVM library for testing."""
from os import path as osp
import sys
import tvm
from tvm.relay.backend import Runtime
from tvm import te
def main():
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = tvm.te.create_schedule(C.op)
s[C].parallel(s[C].op.axis[0])
runtime = Runtime("cpp", {"system-lib": True})
print(tvm.lower(s, [A, B, C], simple_mode=True))
tvm.build(s, [A, B, C], "llvm", runtime=runtime).save(osp.join(sys.argv[1], "test.o"))
if __name__ == "__main__":
main()
| 1,449 | 32.72093 | 90 | py |
tvm | tvm-main/rust/tvm-graph-rt/tests/test_wasm32/src/build_test_lib.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Prepares a simple TVM library for testing."""
from os import path as osp
import sys
import tvm
from tvm import te
from tvm.relay.backend import Runtime
def main():
n = te.var("n")
A = te.placeholder((n,), name="A")
B = te.placeholder((n,), name="B")
C = te.compute(A.shape, lambda *i: A(*i) + B(*i), name="C")
s = tvm.te.create_schedule(C.op)
s[C].parallel(s[C].op.axis[0])
print(tvm.lower(s, [A, B, C], simple_mode=True))
runtime = Runtime("cpp", {"system-lib": True})
tvm.build(s, [A, B, C], "llvm -mtriple=wasm32-unknown-unknown", runtime=runtime).save(
osp.join(sys.argv[1], "test.o")
)
if __name__ == "__main__":
main()
| 1,495 | 32.244444 | 90 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.